[
  {
    "path": ".gitattributes",
    "content": "# Auto detect text files and perform LF normalization\n* text=auto\n* text eol=lf\n\n*.pdf binary\n"
  },
  {
    "path": ".github/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2022-2023 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": ".github/actions/bitcoin/action.yml",
    "content": "name: bitcoin-regtest\ndescription: Spawns a regtest Bitcoin daemon\n\ninputs:\n  version:\n    description: \"Version to download and run\"\n    required: false\n    default: \"27.0\"\n\nruns:\n  using: \"composite\"\n  steps:\n    - name: Bitcoin Daemon Cache\n      id: cache-bitcoind\n      uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809\n      with:\n        path: bitcoin.tar.gz\n        key: bitcoind-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}\n\n    - name: Download the Bitcoin Daemon\n      if: steps.cache-bitcoind.outputs.cache-hit != 'true'\n      shell: bash\n      run: |\n        RUNNER_OS=linux\n        RUNNER_ARCH=x86_64\n        FILE=bitcoin-${{ inputs.version }}-$RUNNER_ARCH-$RUNNER_OS-gnu.tar.gz\n\n        wget https://bitcoincore.org/bin/bitcoin-core-${{ inputs.version }}/$FILE\n        mv $FILE bitcoin.tar.gz\n\n    - name: Extract the Bitcoin Daemon\n      shell: bash\n      run: |\n        tar xzvf bitcoin.tar.gz\n        cd bitcoin-${{ inputs.version }}\n        sudo mv bin/* /bin && sudo mv lib/* /lib\n\n    - name: Bitcoin Regtest Daemon\n      shell: bash\n      run: PATH=$PATH:/usr/bin ./orchestration/dev/networks/bitcoin/run.sh -daemon\n"
  },
  {
    "path": ".github/actions/build-dependencies/action.yml",
    "content": "name: build-dependencies\ndescription: Installs build dependencies for Serai\n\nruns:\n  using: \"composite\"\n  steps:\n    - name: Remove unused packages\n      shell: bash\n      run: |\n        # Ensure the repositories are synced\n        sudo apt update -y\n\n        # Actually perform the removals\n        sudo apt remove -y \"*powershell*\" \"*nuget*\" \"*bazel*\" \"*ansible*\" \"*terraform*\" \"*heroku*\" \"*aws*\" azure-cli\n        sudo apt remove -y \"*nodejs*\" \"*npm*\" \"*yarn*\" \"*java*\" \"*kotlin*\" \"*golang*\" \"*swift*\" \"*julia*\" \"*fortran*\" \"*android*\"\n        sudo apt remove -y \"*apache2*\" \"*nginx*\" \"*firefox*\" \"*chromium*\" \"*chrome*\" \"*edge*\"\n\n        sudo apt remove -y --allow-remove-essential -f shim-signed *python3*\n        # This removal command requires the prior removals due to unmet dependencies otherwise\n        sudo apt remove -y \"*qemu*\" \"*sql*\" \"*texinfo*\" \"*imagemagick*\"\n\n        # Reinstall python3 as a general dependency of a functional operating system\n        sudo apt install -y python3 --fix-missing\n      if: runner.os == 'Linux'\n\n    - name: Remove unused packages\n      shell: bash\n      run: |\n        (gem uninstall -aIx) || (exit 0)\n        brew uninstall --force \"*msbuild*\" \"*powershell*\" \"*nuget*\" \"*bazel*\" \"*ansible*\" \"*terraform*\" \"*heroku*\" \"*aws*\" azure-cli\n        brew uninstall --force \"*nodejs*\" \"*npm*\" \"*yarn*\" \"*java*\" \"*kotlin*\" \"*golang*\" \"*swift*\" \"*julia*\" \"*fortran*\" \"*android*\"\n        brew uninstall --force \"*apache2*\" \"*nginx*\" \"*firefox*\" \"*chromium*\" \"*chrome*\" \"*edge*\"\n        brew uninstall --force \"*qemu*\" \"*sql*\" \"*texinfo*\" \"*imagemagick*\"\n        brew cleanup\n      if: runner.os == 'macOS'\n\n    - name: Install dependencies\n      shell: bash\n      run: |\n        if [ \"$RUNNER_OS\" == \"Linux\" ]; then\n          sudo apt install -y ca-certificates protobuf-compiler libclang-dev\n        elif [ \"$RUNNER_OS\" == \"Windows\" ]; then\n          choco install protoc\n        elif [ \"$RUNNER_OS\" == \"macOS\" ]; then\n          brew install protobuf llvm\n          HOMEBREW_ROOT_PATH=/opt/homebrew # Apple Silicon\n          if [ $(uname -m) = \"x86_64\" ]; then HOMEBREW_ROOT_PATH=/usr/local; fi # Intel\n          ls $HOMEBREW_ROOT_PATH/opt/llvm/lib | grep \"libclang.dylib\" # Make sure this installed `libclang`\n          echo \"DYLD_LIBRARY_PATH=$HOMEBREW_ROOT_PATH/opt/llvm/lib:$DYLD_LIBRARY_PATH\" >> \"$GITHUB_ENV\"\n        fi\n\n    - name: Install solc\n      shell: bash\n      run: |\n        cargo +1.89 install svm-rs --version =0.5.18\n        svm install 0.8.26\n        svm use 0.8.26\n\n    - name: Remove preinstalled Docker\n      shell: bash\n      run: |\n        docker system prune -a --volumes\n        sudo apt remove -y *docker*\n        # Install uidmap which will be required for the explicitly installed Docker\n        sudo apt install uidmap\n      if: runner.os == 'Linux'\n\n    - name: Update system dependencies\n      shell: bash\n      run: |\n        sudo apt update -y\n        sudo apt upgrade -y\n        sudo apt autoremove -y\n        sudo apt clean\n      if: runner.os == 'Linux'\n\n    - name: Install rootless Docker\n      uses: docker/setup-docker-action@b60f85385d03ac8acfca6d9996982511d8620a19\n      with:\n        rootless: true\n        set-host: true\n      if: runner.os == 'Linux'\n\n    # - name: Cache Rust\n    #   uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43\n"
  },
  {
    "path": ".github/actions/monero/action.yml",
    "content": "name: monero-regtest\ndescription: Spawns a regtest Monero daemon\n\ninputs:\n  version:\n    description: \"Version to download and run\"\n    required: false\n    default: v0.18.3.4\n\nruns:\n  using: \"composite\"\n  steps:\n    - name: Monero Daemon Cache\n      id: cache-monerod\n      uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809\n      with:\n        path: /usr/bin/monerod\n        key: monerod-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}\n\n    - name: Download the Monero Daemon\n      if: steps.cache-monerod.outputs.cache-hit != 'true'\n      # Calculates OS/ARCH to demonstrate it, yet then locks to linux-x64 due\n      # to the contained folder not following the same naming scheme and\n      # requiring further expansion not worth doing right now\n      shell: bash\n      run: |\n        RUNNER_OS=${{ runner.os }}\n        RUNNER_ARCH=${{ runner.arch }}\n\n        RUNNER_OS=${RUNNER_OS,,}\n        RUNNER_ARCH=${RUNNER_ARCH,,}\n\n        RUNNER_OS=linux\n        RUNNER_ARCH=x64\n\n        FILE=monero-$RUNNER_OS-$RUNNER_ARCH-${{ inputs.version }}.tar.bz2\n        wget https://downloads.getmonero.org/cli/$FILE\n        tar -xvf $FILE\n\n        sudo mv monero-x86_64-linux-gnu-${{ inputs.version }}/monerod /usr/bin/monerod\n        sudo chmod 777 /usr/bin/monerod\n        sudo chmod +x /usr/bin/monerod\n\n    - name: Monero Regtest Daemon\n      shell: bash\n      run: PATH=$PATH:/usr/bin ./orchestration/dev/networks/monero/run.sh --detach\n"
  },
  {
    "path": ".github/actions/monero-wallet-rpc/action.yml",
    "content": "name: monero-wallet-rpc\ndescription: Spawns a Monero Wallet-RPC.\n\ninputs:\n  version:\n    description: \"Version to download and run\"\n    required: false\n    default: v0.18.3.4\n\nruns:\n  using: \"composite\"\n  steps:\n    - name: Monero Wallet RPC Cache\n      id: cache-monero-wallet-rpc\n      uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809\n      with:\n        path: monero-wallet-rpc\n        key: monero-wallet-rpc-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}\n\n    - name: Download the Monero Wallet RPC\n      if: steps.cache-monero-wallet-rpc.outputs.cache-hit != 'true'\n      # Calculates OS/ARCH to demonstrate it, yet then locks to linux-x64 due\n      # to the contained folder not following the same naming scheme and\n      # requiring further expansion not worth doing right now\n      shell: bash\n      run: |\n        RUNNER_OS=${{ runner.os }}\n        RUNNER_ARCH=${{ runner.arch }}\n\n        RUNNER_OS=${RUNNER_OS,,}\n        RUNNER_ARCH=${RUNNER_ARCH,,}\n\n        RUNNER_OS=linux\n        RUNNER_ARCH=x64\n\n        FILE=monero-$RUNNER_OS-$RUNNER_ARCH-${{ inputs.version }}.tar.bz2\n        wget https://downloads.getmonero.org/cli/$FILE\n        tar -xvf $FILE\n\n        mv monero-x86_64-linux-gnu-${{ inputs.version }}/monero-wallet-rpc monero-wallet-rpc\n\n    - name: Monero Wallet RPC\n      shell: bash\n      run: |\n        ./monero-wallet-rpc --allow-mismatched-daemon-version \\\n          --daemon-address 0.0.0.0:18081 --daemon-login serai:seraidex \\\n          --disable-rpc-login --rpc-bind-port 18082 \\\n          --wallet-dir ./ \\\n          --detach\n"
  },
  {
    "path": ".github/actions/test-dependencies/action.yml",
    "content": "name: test-dependencies\ndescription: Installs test dependencies for Serai\n\ninputs:\n  monero-version:\n    description: \"Monero version to download and run as a regtest node\"\n    required: false\n    default: v0.18.3.4\n\n  bitcoin-version:\n    description: \"Bitcoin version to download and run as a regtest node\"\n    required: false\n    default: \"27.1\"\n\nruns:\n  using: \"composite\"\n  steps:\n    - name: Install Build Dependencies\n      uses: ./.github/actions/build-dependencies\n\n    - name: Install Foundry\n      uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773\n      with:\n        version: nightly-f625d0fa7c51e65b4bf1e8f7931cd1c6e2e285e9\n        cache: false\n\n    - name: Run a Monero Regtest Node\n      uses: ./.github/actions/monero\n      with:\n        version: ${{ inputs.monero-version }}\n\n    - name: Run a Bitcoin Regtest Node\n      uses: ./.github/actions/bitcoin\n      with:\n        version: ${{ inputs.bitcoin-version }}\n\n    - name: Run a Monero Wallet-RPC\n      uses: ./.github/actions/monero-wallet-rpc\n"
  },
  {
    "path": ".github/nightly-version",
    "content": "nightly-2025-11-01\n"
  },
  {
    "path": ".github/workflows/common-tests.yml",
    "content": "name: common/ Tests\n\non:\n  push:\n    branches:\n      - develop\n    paths:\n      - \"common/**\"\n\n  pull_request:\n    paths:\n      - \"common/**\"\n\n  workflow_dispatch:\n\njobs:\n  test-common:\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac\n\n      - name: Build Dependencies\n        uses: ./.github/actions/build-dependencies\n\n      - name: Run Tests\n        run: |\n          GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \\\n            -p std-shims \\\n            -p zalloc \\\n            -p patchable-async-sleep \\\n            -p serai-db \\\n            -p serai-env \\\n            -p simple-request\n"
  },
  {
    "path": ".github/workflows/coordinator-tests.yml",
    "content": "name: Coordinator Tests\n\non:\n  push:\n    branches:\n      - develop\n    paths:\n      - \"common/**\"\n      - \"crypto/**\"\n      - \"networks/**\"\n      - \"message-queue/**\"\n      - \"coordinator/**\"\n      - \"orchestration/**\"\n      - \"tests/docker/**\"\n      - \"tests/coordinator/**\"\n\n  pull_request:\n    paths:\n      - \"common/**\"\n      - \"crypto/**\"\n      - \"networks/**\"\n      - \"message-queue/**\"\n      - \"coordinator/**\"\n      - \"orchestration/**\"\n      - \"tests/docker/**\"\n      - \"tests/coordinator/**\"\n\n  workflow_dispatch:\n\njobs:\n  build:\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac\n\n      - name: Install Build Dependencies\n        uses: ./.github/actions/build-dependencies\n\n      - name: Run coordinator Docker tests\n        run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-coordinator-tests\n"
  },
  {
    "path": ".github/workflows/crypto-tests.yml",
    "content": "name: crypto/ Tests\n\non:\n  push:\n    branches:\n      - develop\n    paths:\n      - \"common/**\"\n      - \"crypto/**\"\n\n  pull_request:\n    paths:\n      - \"common/**\"\n      - \"crypto/**\"\n\n  workflow_dispatch:\n\njobs:\n  test-crypto:\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac\n\n      - name: Build Dependencies\n        uses: ./.github/actions/build-dependencies\n\n      - name: Run Tests\n        run: |\n          GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \\\n            -p flexible-transcript \\\n            -p ff-group-tests \\\n            -p dalek-ff-group \\\n            -p minimal-ed448 \\\n            -p ciphersuite \\\n            -p ciphersuite-kp256 \\\n            -p multiexp \\\n            -p schnorr-signatures \\\n            -p dleq \\\n            -p dkg \\\n            -p dkg-recovery \\\n            -p dkg-dealer \\\n            -p dkg-promote \\\n            -p dkg-musig \\\n            -p dkg-pedpop \\\n            -p modular-frost \\\n            -p frost-schnorrkel\n"
  },
  {
    "path": ".github/workflows/daily-deny.yml",
    "content": "name: Daily Deny Check\n\non:\n  schedule:\n    - cron: \"0 0 * * *\"\n\njobs:\n  deny:\n    name: Run cargo deny\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac\n\n      - name: Advisory Cache\n        uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809\n        with:\n          path: ~/.cargo/advisory-db\n          key: rust-advisory-db\n\n      - name: Install cargo deny\n        run: cargo +1.89 install cargo-deny --version =0.18.3\n\n      - name: Run cargo deny\n        run: cargo deny -L error --all-features check --hide-inclusion-graph\n"
  },
  {
    "path": ".github/workflows/full-stack-tests.yml",
    "content": "name: Full Stack Tests\n\non:\n  push:\n    branches:\n      - develop\n\n  pull_request:\n\n  workflow_dispatch:\n\njobs:\n  build:\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac\n\n      - name: Install Build Dependencies\n        uses: ./.github/actions/build-dependencies\n\n      - name: Run Full Stack Docker tests\n        run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-full-stack-tests\n"
  },
  {
    "path": ".github/workflows/lint.yml",
    "content": "name: Lint\n\non:\n  push:\n    branches:\n      - develop\n  pull_request:\n  workflow_dispatch:\n\njobs:\n  clippy:\n    strategy:\n      matrix:\n        os: [ubuntu-latest, macos-15-intel, macos-latest, windows-latest]\n    runs-on: ${{ matrix.os }}\n\n    steps:\n      - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac\n\n      - name: Get nightly version to use\n        id: nightly\n        shell: bash\n        run: echo \"version=$(cat .github/nightly-version)\" >> $GITHUB_OUTPUT\n\n      - name: Build Dependencies\n        uses: ./.github/actions/build-dependencies\n\n      - name: Install nightly rust\n        run: rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c rust-src -c clippy\n\n      - name: Run Clippy\n        run: cargo +${{ steps.nightly.outputs.version }} clippy --all-features --all-targets -- -D warnings -A clippy::items_after_test_module\n\n      # Also verify the lockfile isn't dirty\n      # This happens when someone edits a Cargo.toml yet doesn't do anything\n      # which causes the lockfile to be updated\n      # The above clippy run will cause it to be updated, so checking there's\n      # no differences present now performs the desired check\n      - name: Verify lockfile\n        shell: bash\n        run: git diff | wc -l | LC_ALL=\"en_US.utf8\" grep -x -e \"^[ ]*0\"\n\n  deny:\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac\n\n      - name: Advisory Cache\n        uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809\n        with:\n          path: ~/.cargo/advisory-db\n          key: rust-advisory-db\n\n      - name: Install cargo deny\n        run: cargo +1.89 install cargo-deny --version =0.18.4\n\n      - name: Run cargo deny\n        run: cargo deny -L error --all-features check --hide-inclusion-graph\n\n  fmt:\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac\n\n      - name: Get nightly version to use\n        id: nightly\n        shell: bash\n        run: echo \"version=$(cat .github/nightly-version)\" >> $GITHUB_OUTPUT\n\n      - name: Install nightly rust\n        run: rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -c rustfmt\n\n      - name: Run rustfmt\n        run: cargo +${{ steps.nightly.outputs.version }} fmt -- --check\n\n  machete:\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac\n      - name: Verify all dependencies are in use\n        run: |\n          cargo +1.89 install cargo-machete --version =0.8.0\n          cargo +1.89 machete\n"
  },
  {
    "path": ".github/workflows/message-queue-tests.yml",
    "content": "name: Message Queue Tests\n\non:\n  push:\n    branches:\n      - develop\n    paths:\n      - \"common/**\"\n      - \"crypto/**\"\n      - \"message-queue/**\"\n      - \"orchestration/**\"\n      - \"tests/docker/**\"\n      - \"tests/message-queue/**\"\n\n  pull_request:\n    paths:\n      - \"common/**\"\n      - \"crypto/**\"\n      - \"message-queue/**\"\n      - \"orchestration/**\"\n      - \"tests/docker/**\"\n      - \"tests/message-queue/**\"\n\n  workflow_dispatch:\n\njobs:\n  build:\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac\n\n      - name: Install Build Dependencies\n        uses: ./.github/actions/build-dependencies\n\n      - name: Run message-queue Docker tests\n        run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-message-queue-tests\n"
  },
  {
    "path": ".github/workflows/mini-tests.yml",
    "content": "name: mini/ Tests\n\non:\n  push:\n    branches:\n      - develop\n    paths:\n      - \"mini/**\"\n\n  pull_request:\n    paths:\n      - \"mini/**\"\n\n  workflow_dispatch:\n\njobs:\n  test-common:\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac\n\n      - name: Build Dependencies\n        uses: ./.github/actions/build-dependencies\n\n      - name: Run Tests\n        run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p mini-serai\n"
  },
  {
    "path": ".github/workflows/monthly-nightly-update.yml",
    "content": "name: Monthly Nightly Update\n\non:\n  schedule:\n    - cron: \"0 0 1 * *\"\n\njobs:\n  update:\n    name: Update nightly\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac\n        with:\n          submodules: \"recursive\"\n\n      - name: Write nightly version\n        run: echo $(date +\"nightly-%Y-%m\"-01) > .github/nightly-version\n\n      - name: Create the commit\n        run: |\n          git config user.name \"GitHub Actions\"\n          git config user.email \"<>\"\n\n          git checkout -b $(date +\"nightly-%Y-%m\")\n\n          git add .github/nightly-version\n          git commit -m \"Update nightly\"\n          git push -u origin $(date +\"nightly-%Y-%m\")\n\n      - name: Pull Request\n        uses: actions/github-script@d7906e4ad0b1822421a7e6a35d5ca353c962f410\n        with:\n          script: |\n            const { repo, owner } = context.repo;\n\n            const result = await github.rest.pulls.create({\n              title: (new Date()).toLocaleString(\n                false,\n                { month: \"long\", year: \"numeric\" }\n              ) + \" - Rust Nightly Update\",\n              owner,\n              repo,\n              head: \"nightly-\" + (new Date()).toISOString().split(\"-\").splice(0, 2).join(\"-\"),\n              base: \"develop\",\n              body: \"PR auto-generated by a GitHub workflow.\"\n            });\n\n            github.rest.issues.addLabels({\n              owner,\n              repo,\n              issue_number: result.data.number,\n              labels: [\"improvement\"]\n            });\n"
  },
  {
    "path": ".github/workflows/networks-tests.yml",
    "content": "name: networks/ Tests\n\non:\n  push:\n    branches:\n      - develop\n    paths:\n      - \"common/**\"\n      - \"crypto/**\"\n      - \"networks/**\"\n\n  pull_request:\n    paths:\n      - \"common/**\"\n      - \"crypto/**\"\n      - \"networks/**\"\n\n  workflow_dispatch:\n\njobs:\n  test-networks:\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac\n\n      - name: Test Dependencies\n        uses: ./.github/actions/test-dependencies\n\n      - name: Run Tests\n        run: |\n          GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \\\n            -p bitcoin-serai \\\n            -p alloy-simple-request-transport \\\n            -p ethereum-serai \\\n            -p serai-ethereum-relayer \\\n"
  },
  {
    "path": ".github/workflows/no-std.yml",
    "content": "name: no-std build\n\non:\n  push:\n    branches:\n      - develop\n    paths:\n      - \"common/**\"\n      - \"crypto/**\"\n      - \"networks/**\"\n      - \"tests/no-std/**\"\n\n  pull_request:\n    paths:\n      - \"common/**\"\n      - \"crypto/**\"\n      - \"networks/**\"\n      - \"tests/no-std/**\"\n\n  workflow_dispatch:\n\njobs:\n  build:\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac\n\n      - name: Install Build Dependencies\n        uses: ./.github/actions/build-dependencies\n\n      - name: Install RISC-V Toolchain\n        run: sudo apt update && sudo apt install -y gcc-riscv64-unknown-elf gcc-multilib && rustup target add riscv32imac-unknown-none-elf\n\n      - name: Verify no-std builds\n        run: CFLAGS=-I/usr/include cargo build --target riscv32imac-unknown-none-elf -p serai-no-std-tests\n"
  },
  {
    "path": ".github/workflows/pages.yml",
    "content": "# MIT License\n#\n# Copyright (c) 2022 just-the-docs\n# Copyright (c) 2022-2024 Luke Parker\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nname: Deploy Rust docs and Jekyll site to Pages\n\non:\n  push:\n    branches:\n      - \"develop\"\n\n  workflow_dispatch:\n\npermissions:\n  contents: read\n  pages: write\n  id-token: write\n\n# Only allow one concurrent deployment\nconcurrency:\n  group: \"pages\"\n  cancel-in-progress: true\n\njobs:\n  # Build job\n  build:\n    runs-on: ubuntu-latest\n    steps:\n      - name: Checkout\n        uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac\n      - name: Setup Ruby\n        uses: ruby/setup-ruby@44511735964dcb71245e7e55f72539531f7bc0eb\n        with:\n          bundler-cache: true\n          cache-version: 0\n          working-directory: \"${{ github.workspace }}/docs\"\n      - name: Setup Pages\n        id: pages\n        uses: actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b\n      - name: Build with Jekyll\n        run: cd ${{ github.workspace }}/docs && bundle exec jekyll build --baseurl \"${{ steps.pages.outputs.base_path }}\"\n        env:\n          JEKYLL_ENV: production\n\n      - name: Get nightly version to use\n        id: nightly\n        shell: bash\n        run: echo \"version=$(cat .github/nightly-version)\" >> $GITHUB_OUTPUT\n      - name: Build Dependencies\n        uses: ./.github/actions/build-dependencies\n      - name: Buld Rust docs\n        run: |\n          rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c rust-docs\n          RUSTDOCFLAGS=\"--cfg docsrs\" cargo +${{ steps.nightly.outputs.version }} doc --workspace --no-deps --all-features\n          mv target/doc docs/_site/rust\n\n      - name: Upload artifact\n        uses: actions/upload-pages-artifact@7b1f4a764d45c48632c6b24a0339c27f5614fb0b\n        with:\n          path: \"docs/_site/\"\n\n  # Deployment job\n  deploy:\n    environment:\n      name: github-pages\n      url: ${{ steps.deployment.outputs.page_url }}\n    runs-on: ubuntu-latest\n    needs: build\n    steps:\n      - name: Deploy to GitHub Pages\n        id: deployment\n        uses: actions/deploy-pages@d6db90164ac5ed86f2b6aed7e0febac5b3c0c03e\n"
  },
  {
    "path": ".github/workflows/processor-tests.yml",
    "content": "name: Processor Tests\n\non:\n  push:\n    branches:\n      - develop\n    paths:\n      - \"common/**\"\n      - \"crypto/**\"\n      - \"networks/**\"\n      - \"message-queue/**\"\n      - \"processor/**\"\n      - \"orchestration/**\"\n      - \"tests/docker/**\"\n      - \"tests/processor/**\"\n\n  pull_request:\n    paths:\n      - \"common/**\"\n      - \"crypto/**\"\n      - \"networks/**\"\n      - \"message-queue/**\"\n      - \"processor/**\"\n      - \"orchestration/**\"\n      - \"tests/docker/**\"\n      - \"tests/processor/**\"\n\n  workflow_dispatch:\n\njobs:\n  build:\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac\n\n      - name: Install Build Dependencies\n        uses: ./.github/actions/build-dependencies\n\n      - name: Run processor Docker tests\n        run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-processor-tests\n"
  },
  {
    "path": ".github/workflows/reproducible-runtime.yml",
    "content": "name: Reproducible Runtime\n\non:\n  push:\n    branches:\n      - develop\n    paths:\n      - \"Cargo.lock\"\n      - \"common/**\"\n      - \"crypto/**\"\n      - \"substrate/**\"\n      - \"orchestration/runtime/**\"\n      - \"tests/reproducible-runtime/**\"\n\n  pull_request:\n    paths:\n      - \"Cargo.lock\"\n      - \"common/**\"\n      - \"crypto/**\"\n      - \"substrate/**\"\n      - \"orchestration/runtime/**\"\n      - \"tests/reproducible-runtime/**\"\n\n  workflow_dispatch:\n\njobs:\n  build:\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac\n\n      - name: Install Build Dependencies\n        uses: ./.github/actions/build-dependencies\n\n      - name: Run Reproducible Runtime tests\n        run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-reproducible-runtime-tests\n"
  },
  {
    "path": ".github/workflows/tests.yml",
    "content": "name: Tests\n\non:\n  push:\n    branches:\n      - develop\n    paths:\n      - \"common/**\"\n      - \"crypto/**\"\n      - \"networks/**\"\n      - \"message-queue/**\"\n      - \"processor/**\"\n      - \"coordinator/**\"\n      - \"substrate/**\"\n\n  pull_request:\n    paths:\n      - \"common/**\"\n      - \"crypto/**\"\n      - \"networks/**\"\n      - \"message-queue/**\"\n      - \"processor/**\"\n      - \"coordinator/**\"\n      - \"substrate/**\"\n\n  workflow_dispatch:\n\njobs:\n  test-infra:\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac\n\n      - name: Build Dependencies\n        uses: ./.github/actions/build-dependencies\n\n      - name: Run Tests\n        run: |\n          GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \\\n            -p serai-message-queue \\\n            -p serai-processor-messages \\\n            -p serai-processor \\\n            -p tendermint-machine \\\n            -p tributary-chain \\\n            -p serai-coordinator \\\n            -p serai-orchestrator \\\n            -p serai-docker-tests\n\n  test-substrate:\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac\n\n      - name: Build Dependencies\n        uses: ./.github/actions/build-dependencies\n\n      - name: Run Tests\n        run: |\n          GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \\\n            -p serai-primitives \\\n            -p serai-coins-primitives \\\n            -p serai-coins-pallet \\\n            -p serai-dex-pallet \\\n            -p serai-validator-sets-primitives \\\n            -p serai-validator-sets-pallet \\\n            -p serai-genesis-liquidity-primitives \\\n            -p serai-genesis-liquidity-pallet \\\n            -p serai-emissions-primitives \\\n            -p serai-emissions-pallet \\\n            -p serai-economic-security-pallet \\\n            -p serai-in-instructions-primitives \\\n            -p serai-in-instructions-pallet \\\n            -p serai-signals-primitives \\\n            -p serai-signals-pallet \\\n            -p serai-abi \\\n            -p serai-runtime \\\n            -p serai-node\n\n  test-serai-client:\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac\n\n      - name: Build Dependencies\n        uses: ./.github/actions/build-dependencies\n\n      - name: Run Tests\n        run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client\n"
  },
  {
    "path": ".gitignore",
    "content": "target\n\n# Don't commit any `Cargo.lock` which aren't the workspace's\nCargo.lock\n!./Cargo.lock\n\n# Don't commit any `Dockerfile`, as they're auto-generated, except the only one which isn't\nDockerfile\nDockerfile.fast-epoch\n!orchestration/runtime/Dockerfile\n\n.test-logs\n\n.vscode\n"
  },
  {
    "path": ".rustfmt.toml",
    "content": "edition = \"2021\"\ntab_spaces = 2\n\nmax_width = 100\n# Let the developer decide based on the 100 char line limit\nuse_small_heuristics = \"Max\"\n\nerror_on_line_overflow = true\nerror_on_unformatted = true\n\nimports_granularity = \"Crate\"\nreorder_imports = false\nreorder_modules = false\n\nunstable_features = true\nspaces_around_ranges = true\nbinop_separator = \"Back\"\n"
  },
  {
    "path": "AGPL-3.0",
    "content": "                    GNU AFFERO GENERAL PUBLIC LICENSE\n                       Version 3, 19 November 2007\n\n Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>\n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n                            Preamble\n\n  The GNU Affero General Public License is a free, copyleft license for\nsoftware and other kinds of works, specifically designed to ensure\ncooperation with the community in the case of network server software.\n\n  The licenses for most software and other practical works are designed\nto take away your freedom to share and change the works.  By contrast,\nour General Public Licenses are intended to guarantee your freedom to\nshare and change all versions of a program--to make sure it remains free\nsoftware for all its users.\n\n  When we speak of free software, we are referring to freedom, not\nprice.  Our General Public Licenses are designed to make sure that you\nhave the freedom to distribute copies of free software (and charge for\nthem if you wish), that you receive source code or can get it if you\nwant it, that you can change the software or use pieces of it in new\nfree programs, and that you know you can do these things.\n\n  Developers that use our General Public Licenses protect your rights\nwith two steps: (1) assert copyright on the software, and (2) offer\nyou this License which gives you legal permission to copy, distribute\nand/or modify the software.\n\n  A secondary benefit of defending all users' freedom is that\nimprovements made in alternate versions of the program, if they\nreceive widespread use, become available for other developers to\nincorporate.  Many developers of free software are heartened and\nencouraged by the resulting cooperation.  However, in the case of\nsoftware used on network servers, this result may fail to come about.\nThe GNU General Public License permits making a modified version and\nletting the public access it on a server without ever releasing its\nsource code to the public.\n\n  The GNU Affero General Public License is designed specifically to\nensure that, in such cases, the modified source code becomes available\nto the community.  It requires the operator of a network server to\nprovide the source code of the modified version running there to the\nusers of that server.  Therefore, public use of a modified version, on\na publicly accessible server, gives the public access to the source\ncode of the modified version.\n\n  An older license, called the Affero General Public License and\npublished by Affero, was designed to accomplish similar goals.  This is\na different license, not a version of the Affero GPL, but Affero has\nreleased a new version of the Affero GPL which permits relicensing under\nthis license.\n\n  The precise terms and conditions for copying, distribution and\nmodification follow.\n\n                       TERMS AND CONDITIONS\n\n  0. Definitions.\n\n  \"This License\" refers to version 3 of the GNU Affero General Public License.\n\n  \"Copyright\" also means copyright-like laws that apply to other kinds of\nworks, such as semiconductor masks.\n\n  \"The Program\" refers to any copyrightable work licensed under this\nLicense.  Each licensee is addressed as \"you\".  \"Licensees\" and\n\"recipients\" may be individuals or organizations.\n\n  To \"modify\" a work means to copy from or adapt all or part of the work\nin a fashion requiring copyright permission, other than the making of an\nexact copy.  The resulting work is called a \"modified version\" of the\nearlier work or a work \"based on\" the earlier work.\n\n  A \"covered work\" means either the unmodified Program or a work based\non the Program.\n\n  To \"propagate\" a work means to do anything with it that, without\npermission, would make you directly or secondarily liable for\ninfringement under applicable copyright law, except executing it on a\ncomputer or modifying a private copy.  Propagation includes copying,\ndistribution (with or without modification), making available to the\npublic, and in some countries other activities as well.\n\n  To \"convey\" a work means any kind of propagation that enables other\nparties to make or receive copies.  Mere interaction with a user through\na computer network, with no transfer of a copy, is not conveying.\n\n  An interactive user interface displays \"Appropriate Legal Notices\"\nto the extent that it includes a convenient and prominently visible\nfeature that (1) displays an appropriate copyright notice, and (2)\ntells the user that there is no warranty for the work (except to the\nextent that warranties are provided), that licensees may convey the\nwork under this License, and how to view a copy of this License.  If\nthe interface presents a list of user commands or options, such as a\nmenu, a prominent item in the list meets this criterion.\n\n  1. Source Code.\n\n  The \"source code\" for a work means the preferred form of the work\nfor making modifications to it.  \"Object code\" means any non-source\nform of a work.\n\n  A \"Standard Interface\" means an interface that either is an official\nstandard defined by a recognized standards body, or, in the case of\ninterfaces specified for a particular programming language, one that\nis widely used among developers working in that language.\n\n  The \"System Libraries\" of an executable work include anything, other\nthan the work as a whole, that (a) is included in the normal form of\npackaging a Major Component, but which is not part of that Major\nComponent, and (b) serves only to enable use of the work with that\nMajor Component, or to implement a Standard Interface for which an\nimplementation is available to the public in source code form.  A\n\"Major Component\", in this context, means a major essential component\n(kernel, window system, and so on) of the specific operating system\n(if any) on which the executable work runs, or a compiler used to\nproduce the work, or an object code interpreter used to run it.\n\n  The \"Corresponding Source\" for a work in object code form means all\nthe source code needed to generate, install, and (for an executable\nwork) run the object code and to modify the work, including scripts to\ncontrol those activities.  However, it does not include the work's\nSystem Libraries, or general-purpose tools or generally available free\nprograms which are used unmodified in performing those activities but\nwhich are not part of the work.  For example, Corresponding Source\nincludes interface definition files associated with source files for\nthe work, and the source code for shared libraries and dynamically\nlinked subprograms that the work is specifically designed to require,\nsuch as by intimate data communication or control flow between those\nsubprograms and other parts of the work.\n\n  The Corresponding Source need not include anything that users\ncan regenerate automatically from other parts of the Corresponding\nSource.\n\n  The Corresponding Source for a work in source code form is that\nsame work.\n\n  2. Basic Permissions.\n\n  All rights granted under this License are granted for the term of\ncopyright on the Program, and are irrevocable provided the stated\nconditions are met.  This License explicitly affirms your unlimited\npermission to run the unmodified Program.  The output from running a\ncovered work is covered by this License only if the output, given its\ncontent, constitutes a covered work.  This License acknowledges your\nrights of fair use or other equivalent, as provided by copyright law.\n\n  You may make, run and propagate covered works that you do not\nconvey, without conditions so long as your license otherwise remains\nin force.  You may convey covered works to others for the sole purpose\nof having them make modifications exclusively for you, or provide you\nwith facilities for running those works, provided that you comply with\nthe terms of this License in conveying all material for which you do\nnot control copyright.  Those thus making or running the covered works\nfor you must do so exclusively on your behalf, under your direction\nand control, on terms that prohibit them from making any copies of\nyour copyrighted material outside their relationship with you.\n\n  Conveying under any other circumstances is permitted solely under\nthe conditions stated below.  Sublicensing is not allowed; section 10\nmakes it unnecessary.\n\n  3. Protecting Users' Legal Rights From Anti-Circumvention Law.\n\n  No covered work shall be deemed part of an effective technological\nmeasure under any applicable law fulfilling obligations under article\n11 of the WIPO copyright treaty adopted on 20 December 1996, or\nsimilar laws prohibiting or restricting circumvention of such\nmeasures.\n\n  When you convey a covered work, you waive any legal power to forbid\ncircumvention of technological measures to the extent such circumvention\nis effected by exercising rights under this License with respect to\nthe covered work, and you disclaim any intention to limit operation or\nmodification of the work as a means of enforcing, against the work's\nusers, your or third parties' legal rights to forbid circumvention of\ntechnological measures.\n\n  4. Conveying Verbatim Copies.\n\n  You may convey verbatim copies of the Program's source code as you\nreceive it, in any medium, provided that you conspicuously and\nappropriately publish on each copy an appropriate copyright notice;\nkeep intact all notices stating that this License and any\nnon-permissive terms added in accord with section 7 apply to the code;\nkeep intact all notices of the absence of any warranty; and give all\nrecipients a copy of this License along with the Program.\n\n  You may charge any price or no price for each copy that you convey,\nand you may offer support or warranty protection for a fee.\n\n  5. Conveying Modified Source Versions.\n\n  You may convey a work based on the Program, or the modifications to\nproduce it from the Program, in the form of source code under the\nterms of section 4, provided that you also meet all of these conditions:\n\n    a) The work must carry prominent notices stating that you modified\n    it, and giving a relevant date.\n\n    b) The work must carry prominent notices stating that it is\n    released under this License and any conditions added under section\n    7.  This requirement modifies the requirement in section 4 to\n    \"keep intact all notices\".\n\n    c) You must license the entire work, as a whole, under this\n    License to anyone who comes into possession of a copy.  This\n    License will therefore apply, along with any applicable section 7\n    additional terms, to the whole of the work, and all its parts,\n    regardless of how they are packaged.  This License gives no\n    permission to license the work in any other way, but it does not\n    invalidate such permission if you have separately received it.\n\n    d) If the work has interactive user interfaces, each must display\n    Appropriate Legal Notices; however, if the Program has interactive\n    interfaces that do not display Appropriate Legal Notices, your\n    work need not make them do so.\n\n  A compilation of a covered work with other separate and independent\nworks, which are not by their nature extensions of the covered work,\nand which are not combined with it such as to form a larger program,\nin or on a volume of a storage or distribution medium, is called an\n\"aggregate\" if the compilation and its resulting copyright are not\nused to limit the access or legal rights of the compilation's users\nbeyond what the individual works permit.  Inclusion of a covered work\nin an aggregate does not cause this License to apply to the other\nparts of the aggregate.\n\n  6. Conveying Non-Source Forms.\n\n  You may convey a covered work in object code form under the terms\nof sections 4 and 5, provided that you also convey the\nmachine-readable Corresponding Source under the terms of this License,\nin one of these ways:\n\n    a) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by the\n    Corresponding Source fixed on a durable physical medium\n    customarily used for software interchange.\n\n    b) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by a\n    written offer, valid for at least three years and valid for as\n    long as you offer spare parts or customer support for that product\n    model, to give anyone who possesses the object code either (1) a\n    copy of the Corresponding Source for all the software in the\n    product that is covered by this License, on a durable physical\n    medium customarily used for software interchange, for a price no\n    more than your reasonable cost of physically performing this\n    conveying of source, or (2) access to copy the\n    Corresponding Source from a network server at no charge.\n\n    c) Convey individual copies of the object code with a copy of the\n    written offer to provide the Corresponding Source.  This\n    alternative is allowed only occasionally and noncommercially, and\n    only if you received the object code with such an offer, in accord\n    with subsection 6b.\n\n    d) Convey the object code by offering access from a designated\n    place (gratis or for a charge), and offer equivalent access to the\n    Corresponding Source in the same way through the same place at no\n    further charge.  You need not require recipients to copy the\n    Corresponding Source along with the object code.  If the place to\n    copy the object code is a network server, the Corresponding Source\n    may be on a different server (operated by you or a third party)\n    that supports equivalent copying facilities, provided you maintain\n    clear directions next to the object code saying where to find the\n    Corresponding Source.  Regardless of what server hosts the\n    Corresponding Source, you remain obligated to ensure that it is\n    available for as long as needed to satisfy these requirements.\n\n    e) Convey the object code using peer-to-peer transmission, provided\n    you inform other peers where the object code and Corresponding\n    Source of the work are being offered to the general public at no\n    charge under subsection 6d.\n\n  A separable portion of the object code, whose source code is excluded\nfrom the Corresponding Source as a System Library, need not be\nincluded in conveying the object code work.\n\n  A \"User Product\" is either (1) a \"consumer product\", which means any\ntangible personal property which is normally used for personal, family,\nor household purposes, or (2) anything designed or sold for incorporation\ninto a dwelling.  In determining whether a product is a consumer product,\ndoubtful cases shall be resolved in favor of coverage.  For a particular\nproduct received by a particular user, \"normally used\" refers to a\ntypical or common use of that class of product, regardless of the status\nof the particular user or of the way in which the particular user\nactually uses, or expects or is expected to use, the product.  A product\nis a consumer product regardless of whether the product has substantial\ncommercial, industrial or non-consumer uses, unless such uses represent\nthe only significant mode of use of the product.\n\n  \"Installation Information\" for a User Product means any methods,\nprocedures, authorization keys, or other information required to install\nand execute modified versions of a covered work in that User Product from\na modified version of its Corresponding Source.  The information must\nsuffice to ensure that the continued functioning of the modified object\ncode is in no case prevented or interfered with solely because\nmodification has been made.\n\n  If you convey an object code work under this section in, or with, or\nspecifically for use in, a User Product, and the conveying occurs as\npart of a transaction in which the right of possession and use of the\nUser Product is transferred to the recipient in perpetuity or for a\nfixed term (regardless of how the transaction is characterized), the\nCorresponding Source conveyed under this section must be accompanied\nby the Installation Information.  But this requirement does not apply\nif neither you nor any third party retains the ability to install\nmodified object code on the User Product (for example, the work has\nbeen installed in ROM).\n\n  The requirement to provide Installation Information does not include a\nrequirement to continue to provide support service, warranty, or updates\nfor a work that has been modified or installed by the recipient, or for\nthe User Product in which it has been modified or installed.  Access to a\nnetwork may be denied when the modification itself materially and\nadversely affects the operation of the network or violates the rules and\nprotocols for communication across the network.\n\n  Corresponding Source conveyed, and Installation Information provided,\nin accord with this section must be in a format that is publicly\ndocumented (and with an implementation available to the public in\nsource code form), and must require no special password or key for\nunpacking, reading or copying.\n\n  7. Additional Terms.\n\n  \"Additional permissions\" are terms that supplement the terms of this\nLicense by making exceptions from one or more of its conditions.\nAdditional permissions that are applicable to the entire Program shall\nbe treated as though they were included in this License, to the extent\nthat they are valid under applicable law.  If additional permissions\napply only to part of the Program, that part may be used separately\nunder those permissions, but the entire Program remains governed by\nthis License without regard to the additional permissions.\n\n  When you convey a copy of a covered work, you may at your option\nremove any additional permissions from that copy, or from any part of\nit.  (Additional permissions may be written to require their own\nremoval in certain cases when you modify the work.)  You may place\nadditional permissions on material, added by you to a covered work,\nfor which you have or can give appropriate copyright permission.\n\n  Notwithstanding any other provision of this License, for material you\nadd to a covered work, you may (if authorized by the copyright holders of\nthat material) supplement the terms of this License with terms:\n\n    a) Disclaiming warranty or limiting liability differently from the\n    terms of sections 15 and 16 of this License; or\n\n    b) Requiring preservation of specified reasonable legal notices or\n    author attributions in that material or in the Appropriate Legal\n    Notices displayed by works containing it; or\n\n    c) Prohibiting misrepresentation of the origin of that material, or\n    requiring that modified versions of such material be marked in\n    reasonable ways as different from the original version; or\n\n    d) Limiting the use for publicity purposes of names of licensors or\n    authors of the material; or\n\n    e) Declining to grant rights under trademark law for use of some\n    trade names, trademarks, or service marks; or\n\n    f) Requiring indemnification of licensors and authors of that\n    material by anyone who conveys the material (or modified versions of\n    it) with contractual assumptions of liability to the recipient, for\n    any liability that these contractual assumptions directly impose on\n    those licensors and authors.\n\n  All other non-permissive additional terms are considered \"further\nrestrictions\" within the meaning of section 10.  If the Program as you\nreceived it, or any part of it, contains a notice stating that it is\ngoverned by this License along with a term that is a further\nrestriction, you may remove that term.  If a license document contains\na further restriction but permits relicensing or conveying under this\nLicense, you may add to a covered work material governed by the terms\nof that license document, provided that the further restriction does\nnot survive such relicensing or conveying.\n\n  If you add terms to a covered work in accord with this section, you\nmust place, in the relevant source files, a statement of the\nadditional terms that apply to those files, or a notice indicating\nwhere to find the applicable terms.\n\n  Additional terms, permissive or non-permissive, may be stated in the\nform of a separately written license, or stated as exceptions;\nthe above requirements apply either way.\n\n  8. Termination.\n\n  You may not propagate or modify a covered work except as expressly\nprovided under this License.  Any attempt otherwise to propagate or\nmodify it is void, and will automatically terminate your rights under\nthis License (including any patent licenses granted under the third\nparagraph of section 11).\n\n  However, if you cease all violation of this License, then your\nlicense from a particular copyright holder is reinstated (a)\nprovisionally, unless and until the copyright holder explicitly and\nfinally terminates your license, and (b) permanently, if the copyright\nholder fails to notify you of the violation by some reasonable means\nprior to 60 days after the cessation.\n\n  Moreover, your license from a particular copyright holder is\nreinstated permanently if the copyright holder notifies you of the\nviolation by some reasonable means, this is the first time you have\nreceived notice of violation of this License (for any work) from that\ncopyright holder, and you cure the violation prior to 30 days after\nyour receipt of the notice.\n\n  Termination of your rights under this section does not terminate the\nlicenses of parties who have received copies or rights from you under\nthis License.  If your rights have been terminated and not permanently\nreinstated, you do not qualify to receive new licenses for the same\nmaterial under section 10.\n\n  9. Acceptance Not Required for Having Copies.\n\n  You are not required to accept this License in order to receive or\nrun a copy of the Program.  Ancillary propagation of a covered work\noccurring solely as a consequence of using peer-to-peer transmission\nto receive a copy likewise does not require acceptance.  However,\nnothing other than this License grants you permission to propagate or\nmodify any covered work.  These actions infringe copyright if you do\nnot accept this License.  Therefore, by modifying or propagating a\ncovered work, you indicate your acceptance of this License to do so.\n\n  10. Automatic Licensing of Downstream Recipients.\n\n  Each time you convey a covered work, the recipient automatically\nreceives a license from the original licensors, to run, modify and\npropagate that work, subject to this License.  You are not responsible\nfor enforcing compliance by third parties with this License.\n\n  An \"entity transaction\" is a transaction transferring control of an\norganization, or substantially all assets of one, or subdividing an\norganization, or merging organizations.  If propagation of a covered\nwork results from an entity transaction, each party to that\ntransaction who receives a copy of the work also receives whatever\nlicenses to the work the party's predecessor in interest had or could\ngive under the previous paragraph, plus a right to possession of the\nCorresponding Source of the work from the predecessor in interest, if\nthe predecessor has it or can get it with reasonable efforts.\n\n  You may not impose any further restrictions on the exercise of the\nrights granted or affirmed under this License.  For example, you may\nnot impose a license fee, royalty, or other charge for exercise of\nrights granted under this License, and you may not initiate litigation\n(including a cross-claim or counterclaim in a lawsuit) alleging that\nany patent claim is infringed by making, using, selling, offering for\nsale, or importing the Program or any portion of it.\n\n  11. Patents.\n\n  A \"contributor\" is a copyright holder who authorizes use under this\nLicense of the Program or a work on which the Program is based.  The\nwork thus licensed is called the contributor's \"contributor version\".\n\n  A contributor's \"essential patent claims\" are all patent claims\nowned or controlled by the contributor, whether already acquired or\nhereafter acquired, that would be infringed by some manner, permitted\nby this License, of making, using, or selling its contributor version,\nbut do not include claims that would be infringed only as a\nconsequence of further modification of the contributor version.  For\npurposes of this definition, \"control\" includes the right to grant\npatent sublicenses in a manner consistent with the requirements of\nthis License.\n\n  Each contributor grants you a non-exclusive, worldwide, royalty-free\npatent license under the contributor's essential patent claims, to\nmake, use, sell, offer for sale, import and otherwise run, modify and\npropagate the contents of its contributor version.\n\n  In the following three paragraphs, a \"patent license\" is any express\nagreement or commitment, however denominated, not to enforce a patent\n(such as an express permission to practice a patent or covenant not to\nsue for patent infringement).  To \"grant\" such a patent license to a\nparty means to make such an agreement or commitment not to enforce a\npatent against the party.\n\n  If you convey a covered work, knowingly relying on a patent license,\nand the Corresponding Source of the work is not available for anyone\nto copy, free of charge and under the terms of this License, through a\npublicly available network server or other readily accessible means,\nthen you must either (1) cause the Corresponding Source to be so\navailable, or (2) arrange to deprive yourself of the benefit of the\npatent license for this particular work, or (3) arrange, in a manner\nconsistent with the requirements of this License, to extend the patent\nlicense to downstream recipients.  \"Knowingly relying\" means you have\nactual knowledge that, but for the patent license, your conveying the\ncovered work in a country, or your recipient's use of the covered work\nin a country, would infringe one or more identifiable patents in that\ncountry that you have reason to believe are valid.\n\n  If, pursuant to or in connection with a single transaction or\narrangement, you convey, or propagate by procuring conveyance of, a\ncovered work, and grant a patent license to some of the parties\nreceiving the covered work authorizing them to use, propagate, modify\nor convey a specific copy of the covered work, then the patent license\nyou grant is automatically extended to all recipients of the covered\nwork and works based on it.\n\n  A patent license is \"discriminatory\" if it does not include within\nthe scope of its coverage, prohibits the exercise of, or is\nconditioned on the non-exercise of one or more of the rights that are\nspecifically granted under this License.  You may not convey a covered\nwork if you are a party to an arrangement with a third party that is\nin the business of distributing software, under which you make payment\nto the third party based on the extent of your activity of conveying\nthe work, and under which the third party grants, to any of the\nparties who would receive the covered work from you, a discriminatory\npatent license (a) in connection with copies of the covered work\nconveyed by you (or copies made from those copies), or (b) primarily\nfor and in connection with specific products or compilations that\ncontain the covered work, unless you entered into that arrangement,\nor that patent license was granted, prior to 28 March 2007.\n\n  Nothing in this License shall be construed as excluding or limiting\nany implied license or other defenses to infringement that may\notherwise be available to you under applicable patent law.\n\n  12. No Surrender of Others' Freedom.\n\n  If conditions are imposed on you (whether by court order, agreement or\notherwise) that contradict the conditions of this License, they do not\nexcuse you from the conditions of this License.  If you cannot convey a\ncovered work so as to satisfy simultaneously your obligations under this\nLicense and any other pertinent obligations, then as a consequence you may\nnot convey it at all.  For example, if you agree to terms that obligate you\nto collect a royalty for further conveying from those to whom you convey\nthe Program, the only way you could satisfy both those terms and this\nLicense would be to refrain entirely from conveying the Program.\n\n  13. Remote Network Interaction; Use with the GNU General Public License.\n\n  Notwithstanding any other provision of this License, if you modify the\nProgram, your modified version must prominently offer all users\ninteracting with it remotely through a computer network (if your version\nsupports such interaction) an opportunity to receive the Corresponding\nSource of your version by providing access to the Corresponding Source\nfrom a network server at no charge, through some standard or customary\nmeans of facilitating copying of software.  This Corresponding Source\nshall include the Corresponding Source for any work covered by version 3\nof the GNU General Public License that is incorporated pursuant to the\nfollowing paragraph.\n\n  Notwithstanding any other provision of this License, you have\npermission to link or combine any covered work with a work licensed\nunder version 3 of the GNU General Public License into a single\ncombined work, and to convey the resulting work.  The terms of this\nLicense will continue to apply to the part which is the covered work,\nbut the work with which it is combined will remain governed by version\n3 of the GNU General Public License.\n\n  14. Revised Versions of this License.\n\n  The Free Software Foundation may publish revised and/or new versions of\nthe GNU Affero General Public License from time to time.  Such new versions\nwill be similar in spirit to the present version, but may differ in detail to\naddress new problems or concerns.\n\n  Each version is given a distinguishing version number.  If the\nProgram specifies that a certain numbered version of the GNU Affero General\nPublic License \"or any later version\" applies to it, you have the\noption of following the terms and conditions either of that numbered\nversion or of any later version published by the Free Software\nFoundation.  If the Program does not specify a version number of the\nGNU Affero General Public License, you may choose any version ever published\nby the Free Software Foundation.\n\n  If the Program specifies that a proxy can decide which future\nversions of the GNU Affero General Public License can be used, that proxy's\npublic statement of acceptance of a version permanently authorizes you\nto choose that version for the Program.\n\n  Later license versions may give you additional or different\npermissions.  However, no additional obligations are imposed on any\nauthor or copyright holder as a result of your choosing to follow a\nlater version.\n\n  15. Disclaimer of Warranty.\n\n  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY\nAPPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT\nHOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY\nOF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,\nTHE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\nPURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM\nIS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF\nALL NECESSARY SERVICING, REPAIR OR CORRECTION.\n\n  16. Limitation of Liability.\n\n  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\nWILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS\nTHE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY\nGENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE\nUSE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF\nDATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD\nPARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),\nEVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF\nSUCH DAMAGES.\n\n  17. Interpretation of Sections 15 and 16.\n\n  If the disclaimer of warranty and limitation of liability provided\nabove cannot be given local legal effect according to their terms,\nreviewing courts shall apply local law that most closely approximates\nan absolute waiver of all civil liability in connection with the\nProgram, unless a warranty or assumption of liability accompanies a\ncopy of the Program in return for a fee.\n\n                     END OF TERMS AND CONDITIONS\n\n            How to Apply These Terms to Your New Programs\n\n  If you develop a new program, and you want it to be of the greatest\npossible use to the public, the best way to achieve this is to make it\nfree software which everyone can redistribute and change under these terms.\n\n  To do so, attach the following notices to the program.  It is safest\nto attach them to the start of each source file to most effectively\nstate the exclusion of warranty; and each file should have at least\nthe \"copyright\" line and a pointer to where the full notice is found.\n\n    <one line to give the program's name and a brief idea of what it does.>\n    Copyright (C) <year>  <name of author>\n\n    This program is free software: you can redistribute it and/or modify\n    it under the terms of the GNU Affero General Public License as published by\n    the Free Software Foundation, either version 3 of the License, or\n    (at your option) any later version.\n\n    This program is distributed in the hope that it will be useful,\n    but WITHOUT ANY WARRANTY; without even the implied warranty of\n    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n    GNU Affero General Public License for more details.\n\n    You should have received a copy of the GNU Affero General Public License\n    along with this program.  If not, see <https://www.gnu.org/licenses/>.\n\nAlso add information on how to contact you by electronic and paper mail.\n\n  If your software can interact with users remotely through a computer\nnetwork, you should also make sure that it provides a way for users to\nget its source.  For example, if your program is a web application, its\ninterface could display a \"Source\" link that leads users to an archive\nof the code.  There are many ways you could offer source, and different\nsolutions will be better for different programs; see section 13 for the\nspecific requirements.\n\n  You should also get your employer (if you work as a programmer) or school,\nif any, to sign a \"copyright disclaimer\" for the program, if necessary.\nFor more information on this, and how to apply and follow the GNU AGPL, see\n<https://www.gnu.org/licenses/>.\n"
  },
  {
    "path": "CONTRIBUTING.md",
    "content": "# Contributing\n\nContributions come in a variety of forms. Developing Serai, helping document it,\nusing its libraries in another project, using and testing it, and simply sharing\nit are all valuable ways of contributing.\n\nThis document will specifically focus on contributions to this repository in the\nform of code and documentation.\n\n### Rules\n\n- Stable native Rust, nightly wasm and tools.\n- `cargo fmt` must be used.\n- `cargo clippy` must pass, except for the ignored rules (`type_complexity` and\n`dead_code`).\n- The CI must pass.\n\n- Only use uppercase variable names when relevant to cryptography.\n\n- Use a two-space ident when possible.\n- Put a space after comment markers.\n- Don't use multiple newlines between sections of code.\n- Have a newline before EOF.\n\n### Guidelines\n\n- Sort inputs as core, std, third party, and then Serai.\n- Comment code reasonably.\n- Include tests for new features.\n- Sign commits.\n\n### Submission\n\nAll submissions should be through GitHub. Contributions to a crate will be\nlicensed according to the crate's existing license, with the crate's copyright\nholders (distinct from authors) having the right to re-license the crate via a\nunanimous decision.\n"
  },
  {
    "path": "Cargo.toml",
    "content": "[workspace]\nresolver = \"2\"\nmembers = [\n  # std patches\n  \"patches/matches\",\n\n  # Rewrites/redirects\n  \"patches/option-ext\",\n  \"patches/directories-next\",\n\n  \"common/std-shims\",\n  \"common/zalloc\",\n  \"common/patchable-async-sleep\",\n  \"common/db\",\n  \"common/env\",\n  \"common/request\",\n\n  \"crypto/transcript\",\n\n  \"crypto/ff-group-tests\",\n  \"crypto/dalek-ff-group\",\n  \"crypto/ed448\",\n  \"crypto/ciphersuite\",\n  \"crypto/ciphersuite/kp256\",\n\n  \"crypto/multiexp\",\n\n  \"crypto/schnorr\",\n  \"crypto/dleq\",\n  \"crypto/dkg\",\n  \"crypto/dkg/recovery\",\n  \"crypto/dkg/dealer\",\n  \"crypto/dkg/promote\",\n  \"crypto/dkg/musig\",\n  \"crypto/dkg/pedpop\",\n  \"crypto/frost\",\n  \"crypto/schnorrkel\",\n\n  \"networks/bitcoin\",\n\n  \"networks/ethereum/alloy-simple-request-transport\",\n  \"networks/ethereum\",\n  \"networks/ethereum/relayer\",\n\n  \"message-queue\",\n\n  \"processor/messages\",\n  \"processor\",\n\n  \"coordinator/tributary/tendermint\",\n  \"coordinator/tributary\",\n  \"coordinator\",\n\n  \"substrate/primitives\",\n\n  \"substrate/coins/primitives\",\n  \"substrate/coins/pallet\",\n\n  \"substrate/dex/pallet\",\n\n  \"substrate/validator-sets/primitives\",\n  \"substrate/validator-sets/pallet\",\n\n  \"substrate/genesis-liquidity/primitives\",\n  \"substrate/genesis-liquidity/pallet\",\n\n  \"substrate/emissions/primitives\",\n  \"substrate/emissions/pallet\",\n\n  \"substrate/economic-security/pallet\",\n\n  \"substrate/in-instructions/primitives\",\n  \"substrate/in-instructions/pallet\",\n\n  \"substrate/signals/primitives\",\n  \"substrate/signals/pallet\",\n\n  \"substrate/abi\",\n\n  \"substrate/runtime\",\n  \"substrate/node\",\n\n  \"substrate/client\",\n\n  \"orchestration\",\n\n  \"mini\",\n\n  \"tests/no-std\",\n\n  \"tests/docker\",\n  \"tests/message-queue\",\n  \"tests/processor\",\n  \"tests/coordinator\",\n  \"tests/full-stack\",\n  \"tests/reproducible-runtime\",\n]\n\n# Always compile Monero (and a variety of dependencies) with optimizations due\n# to the extensive operations required for Bulletproofs\n[profile.dev.package]\nsubtle = { opt-level = 3 }\ncurve25519-dalek = { opt-level = 3 }\n\nff = { opt-level = 3 }\ngroup = { opt-level = 3 }\n\ncrypto-bigint = { opt-level = 3 }\ndalek-ff-group = { opt-level = 3 }\nminimal-ed448 = { opt-level = 3 }\n\nmultiexp = { opt-level = 3 }\n\nmonero-oxide = { opt-level = 3 }\n\n[profile.release]\npanic = \"unwind\"\noverflow-checks = true\n\n[patch.crates-io]\n# Dependencies from monero-oxide which originate from within our own tree\nstd-shims = { path = \"common/std-shims\" }\nsimple-request = { path = \"common/request\" }\ndalek-ff-group = { path = \"crypto/dalek-ff-group\" }\nflexible-transcript = { path = \"crypto/transcript\" }\nmodular-frost = { path = \"crypto/frost\" }\n\n# https://github.com/rust-lang-nursery/lazy-static.rs/issues/201\nlazy_static = { git = \"https://github.com/rust-lang-nursery/lazy-static.rs\", rev = \"5735630d46572f1e5377c8f2ba0f79d18f53b10c\" }\n\n# These have `std` alternatives\nmatches = { path = \"patches/matches\" }\nhome = { path = \"patches/home\" }\n\n# directories-next was created because directories was unmaintained\n# directories-next is now unmaintained while directories is maintained\n# The directories author pulls in ridiculously pointless crates and prefers\n# copyleft licenses\n# The following two patches resolve everything\noption-ext = { path = \"patches/option-ext\" }\ndirectories-next = { path = \"patches/directories-next\" }\n\n[workspace.lints.clippy]\nuninlined_format_args = \"allow\" # TODO\nunwrap_or_default = \"allow\"\nmanual_is_multiple_of = \"allow\"\nincompatible_msrv = \"allow\" # Manually verified with a GitHub workflow\nborrow_as_ptr = \"deny\"\ncast_lossless = \"deny\"\ncast_possible_truncation = \"deny\"\ncast_possible_wrap = \"deny\"\ncast_precision_loss = \"deny\"\ncast_ptr_alignment = \"deny\"\ncast_sign_loss = \"deny\"\nchecked_conversions = \"deny\"\ncloned_instead_of_copied = \"deny\"\nenum_glob_use = \"deny\"\nexpl_impl_clone_on_copy = \"deny\"\nexplicit_into_iter_loop = \"deny\"\nexplicit_iter_loop = \"deny\"\nflat_map_option = \"deny\"\nfloat_cmp = \"deny\"\nfn_params_excessive_bools = \"deny\"\nignored_unit_patterns = \"deny\"\nimplicit_clone = \"deny\"\ninefficient_to_string = \"deny\"\ninvalid_upcast_comparisons = \"deny\"\nlarge_stack_arrays = \"deny\"\nlinkedlist = \"deny\"\nmacro_use_imports = \"deny\"\nmanual_instant_elapsed = \"deny\"\n# TODO manual_let_else = \"deny\"\nmanual_ok_or = \"deny\"\nmanual_string_new = \"deny\"\nmap_unwrap_or = \"deny\"\nmatch_bool = \"deny\"\nmatch_same_arms = \"deny\"\nmissing_fields_in_debug = \"deny\"\n# TODO needless_continue = \"deny\"\nneedless_pass_by_value = \"deny\"\nptr_cast_constness = \"deny\"\nrange_minus_one = \"deny\"\nrange_plus_one = \"deny\"\nredundant_closure_for_method_calls = \"deny\"\nredundant_else = \"deny\"\nstring_add_assign = \"deny\"\nunchecked_time_subtraction = \"deny\"\nunnecessary_box_returns = \"deny\"\nunnecessary_join = \"deny\"\nunnecessary_wraps = \"deny\"\nunnested_or_patterns = \"deny\"\nunused_async = \"deny\"\nunused_self = \"deny\"\nzero_sized_map_values = \"deny\"\n\n# TODO: These were incurred when updating Rust as necessary for compilation, yet aren't being fixed\n# at this time due to the impacts it'd have throughout the repository (when this isn't actively the\n# primary branch, `next` is)\nneedless_continue = \"allow\"\nneedless_lifetimes = \"allow\"\nuseless_conversion = \"allow\"\nempty_line_after_doc_comments = \"allow\"\nmanual_div_ceil = \"allow\"\nmanual_let_else = \"allow\"\nunnecessary_map_or = \"allow\"\nresult_large_err = \"allow\"\nunneeded_struct_pattern = \"allow\"\n[workspace.lints.rust]\nunused = \"allow\" # TODO: https://github.com/rust-lang/rust/issues/147648\nmismatched_lifetime_syntaxes = \"allow\"\nunused_attributes = \"allow\"\nunused_parens = \"allow\"\n"
  },
  {
    "path": "LICENSE",
    "content": "Serai crates are licensed under one of two licenses, either MIT or AGPL-3.0,\ndepending on the crate in question. Each crate declares their license in their\n`Cargo.toml` and includes a `LICENSE` file detailing its status. Additionally,\na full copy of the AGPL-3.0 License is included in the root of this repository\nas a reference text. This copy should be provided with any distribution of a\ncrate licensed under the AGPL-3.0, as per its terms.\n\nThe GitHub actions/workflows (`.github`) are licensed under the MIT license.\n"
  },
  {
    "path": "README.md",
    "content": "# Serai\n\nSerai is a new DEX, built from the ground up, initially planning on listing\nBitcoin, Ethereum, DAI, and Monero, offering a liquidity-pool-based trading\nexperience. Funds are stored in an economically secured threshold-multisig\nwallet.\n\n[Getting Started](spec/Getting%20Started.md)\n\n### Layout\n\n- `audits`: Audits for various parts of Serai.\n\n- `spec`: The specification of the Serai protocol, both internally and as\n  networked.\n\n- `docs`: User-facing documentation on the Serai protocol.\n\n- `common`: Crates containing utilities common to a variety of areas under\n  Serai, none neatly fitting under another category.\n\n- `crypto`: A series of composable cryptographic libraries built around the\n  `ff`/`group` APIs, achieving a variety of tasks. These range from generic\n  infrastructure, to our IETF-compliant FROST implementation, to a DLEq proof as\n  needed for Bitcoin-Monero atomic swaps.\n\n- `networks`: Various libraries intended for usage in Serai yet also by the\n  wider community. This means they will always support the functionality Serai\n  needs, yet won't disadvantage other use cases when possible.\n\n- `message-queue`: An ordered message server so services can talk to each other,\n  even when the other is offline.\n\n- `processor`: A generic chain processor to process data for Serai and process\n  events from Serai, executing transactions as expected and needed.\n\n- `coordinator`: A service to manage processors and communicate over a P2P\n  network with other validators.\n\n- `substrate`: Substrate crates used to instantiate the Serai network.\n\n- `orchestration`: Dockerfiles and scripts to deploy a Serai node/test\n  environment.\n\n- `tests`: Tests for various crates. Generally, `crate/src/tests` is used, or\n  `crate/tests`, yet any tests requiring crates' binaries are placed here.\n\n### Security\n\nSerai hosts a bug bounty program via\n[Immunefi](https://immunefi.com/bounty/serai/). For in-scope critical\nvulnerabilities, we will reward whitehats with up to $30,000.\n\nAnything not in-scope should still be submitted through Immunefi, with rewards\nissued at the discretion of the Immunefi program managers.\n\n### Links\n\n- [Website](https://serai.exchange/): https://serai.exchange/\n- [Immunefi](https://immunefi.com/bounty/serai/): https://immunefi.com/bounty/serai/\n- [Twitter](https://twitter.com/SeraiDEX): https://twitter.com/SeraiDEX\n- [Discord](https://discord.gg/mpEUtJR3vz): https://discord.gg/mpEUtJR3vz\n- [Matrix](https://matrix.to/#/#serai:matrix.org): https://matrix.to/#/#serai:matrix.org\n- [Reddit](https://www.reddit.com/r/SeraiDEX/): https://www.reddit.com/r/SeraiDEX/\n- [Telegram](https://t.me/SeraiDEX): https://t.me/SeraiDEX\n"
  },
  {
    "path": "audits/Cypher Stack crypto March 2023/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2023 Cypher Stack\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "audits/Cypher Stack crypto March 2023/README.md",
    "content": "# Cypher Stack /crypto Audit, March 2023\n\nThis audit was over the /crypto folder, excluding the ed448 crate, the `Ed448`\nciphersuite in the ciphersuite crate, and the `dleq/experimental` feature. It is\nencompassing up to commit 669d2dbffc1dafb82a09d9419ea182667115df06.\n\nPlease see https://github.com/cypherstack/serai-audit for provenance.\n"
  },
  {
    "path": "audits/Cypher Stack networks bitcoin August 2023/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2023 Cypher Stack\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "audits/Cypher Stack networks bitcoin August 2023/README.md",
    "content": "# Cypher Stack /networks/bitcoin Audit, August 2023\n\nThis audit was over the `/networks/bitcoin` folder (at the time located at\n`/coins/bitcoin`). It is encompassing up to commit\n5121ca75199dff7bd34230880a1fdd793012068c.\n\nPlease see https://github.com/cypherstack/serai-btc-audit for provenance.\n"
  },
  {
    "path": "common/db/Cargo.toml",
    "content": "[package]\nname = \"serai-db\"\nversion = \"0.1.0\"\ndescription = \"A simple database trait and backends for it\"\nlicense = \"MIT\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/common/db\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nkeywords = []\nedition = \"2021\"\nrust-version = \"1.65\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nparity-db = { version = \"0.4\", default-features = false, optional = true }\nrocksdb = { version = \"0.24\", default-features = false, features = [\"zstd\"], optional = true }\n\n[features]\nparity-db = [\"dep:parity-db\"]\nrocksdb = [\"dep:rocksdb\"]\n"
  },
  {
    "path": "common/db/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2022-2023 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "common/db/src/create_db.rs",
    "content": "#[doc(hidden)]\npub fn serai_db_key(\n  db_dst: &'static [u8],\n  item_dst: &'static [u8],\n  key: impl AsRef<[u8]>,\n) -> Vec<u8> {\n  let db_len = u8::try_from(db_dst.len()).unwrap();\n  let dst_len = u8::try_from(item_dst.len()).unwrap();\n  [[db_len].as_ref(), db_dst, [dst_len].as_ref(), item_dst, key.as_ref()].concat()\n}\n\n/// Creates a series of structs which provide namespacing for keys\n///\n/// # Description\n///\n/// Creates a unit struct and a default implementation for the `key`, `get`, and `set`. The macro\n/// uses a syntax similar to defining a function. Parameters are concatenated to produce a key,\n/// they must be `scale` encodable. The return type is used to auto encode and decode the database\n/// value bytes using `borsh`.\n///\n/// # Arguments\n///\n/// * `db_name` - A database name\n/// * `field_name` - An item name\n/// * `args` - Comma separated list of key arguments\n/// * `field_type` - The return type\n///\n/// # Example\n///\n/// ```ignore\n/// create_db!(\n///   TributariesDb {\n///     AttemptsDb: (key_bytes: &[u8], attempt_id: u32) -> u64,\n///     ExpiredDb: (genesis: [u8; 32]) -> Vec<u8>\n///   }\n/// )\n/// ```\n#[macro_export]\nmacro_rules! create_db {\n  ($db_name: ident {\n    $($field_name: ident: ($($arg: ident: $arg_type: ty),*) -> $field_type: ty$(,)?)*\n  }) => {\n    $(\n      #[derive(Clone, Debug)]\n      pub(crate) struct $field_name;\n      impl $field_name {\n        pub(crate) fn key($($arg: $arg_type),*) -> Vec<u8> {\n          use scale::Encode;\n          $crate::serai_db_key(\n            stringify!($db_name).as_bytes(),\n            stringify!($field_name).as_bytes(),\n            ($($arg),*).encode()\n          )\n        }\n        pub(crate) fn set(txn: &mut impl DbTxn $(, $arg: $arg_type)*, data: &$field_type) {\n          let key = $field_name::key($($arg),*);\n          txn.put(&key, borsh::to_vec(data).unwrap());\n        }\n        pub(crate) fn get(getter: &impl Get, $($arg: $arg_type),*) -> Option<$field_type> {\n          getter.get($field_name::key($($arg),*)).map(|data| {\n            borsh::from_slice(data.as_ref()).unwrap()\n          })\n        }\n        #[allow(dead_code)]\n        pub(crate) fn del(txn: &mut impl DbTxn $(, $arg: $arg_type)*) {\n          txn.del(&$field_name::key($($arg),*))\n        }\n      }\n    )*\n  };\n}\n\n#[macro_export]\nmacro_rules! db_channel {\n  ($db_name: ident {\n    $($field_name: ident: ($($arg: ident: $arg_type: ty),*) -> $field_type: ty$(,)?)*\n  }) => {\n    $(\n      create_db! {\n        $db_name {\n          $field_name: ($($arg: $arg_type,)* index: u32) -> $field_type,\n        }\n      }\n\n      impl $field_name {\n        pub(crate) fn send(txn: &mut impl DbTxn $(, $arg: $arg_type)*, value: &$field_type) {\n          // Use index 0 to store the amount of messages\n          let messages_sent_key = $field_name::key($($arg),*, 0);\n          let messages_sent = txn.get(&messages_sent_key).map(|counter| {\n            u32::from_le_bytes(counter.try_into().unwrap())\n          }).unwrap_or(0);\n          txn.put(&messages_sent_key, (messages_sent + 1).to_le_bytes());\n\n          // + 2 as index 1 is used for the amount of messages read\n          // Using distinct counters enables send to be called without mutating anything recv may\n          // at the same time\n          let index_to_use = messages_sent + 2;\n\n          $field_name::set(txn, $($arg),*, index_to_use, value);\n        }\n        pub(crate) fn try_recv(txn: &mut impl DbTxn $(, $arg: $arg_type)*) -> Option<$field_type> {\n          let messages_recvd_key = $field_name::key($($arg),*, 1);\n          let messages_recvd = txn.get(&messages_recvd_key).map(|counter| {\n            u32::from_le_bytes(counter.try_into().unwrap())\n          }).unwrap_or(0);\n\n          let index_to_read = messages_recvd + 2;\n\n          let res = $field_name::get(txn, $($arg),*, index_to_read);\n          if res.is_some() {\n            $field_name::del(txn, $($arg),*, index_to_read);\n            txn.put(&messages_recvd_key, (messages_recvd + 1).to_le_bytes());\n          }\n          res\n        }\n      }\n    )*\n  };\n}\n"
  },
  {
    "path": "common/db/src/lib.rs",
    "content": "mod create_db;\npub use create_db::*;\n\nmod mem;\npub use mem::*;\n\n#[cfg(feature = \"rocksdb\")]\nmod rocks;\n#[cfg(feature = \"rocksdb\")]\npub use rocks::{RocksDB, new_rocksdb};\n\n#[cfg(feature = \"parity-db\")]\nmod parity_db;\n#[cfg(feature = \"parity-db\")]\npub use parity_db::{ParityDb, new_parity_db};\n\n/// An object implementing get.\npub trait Get {\n  fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>>;\n}\n\n/// An atomic database operation.\n#[must_use]\npub trait DbTxn: Send + Get {\n  fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>);\n  fn del(&mut self, key: impl AsRef<[u8]>);\n  fn commit(self);\n}\n\n/// A database supporting atomic operations.\npub trait Db: 'static + Send + Sync + Clone + Get {\n  type Transaction<'a>: DbTxn;\n  fn key(db_dst: &'static [u8], item_dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec<u8> {\n    let db_len = u8::try_from(db_dst.len()).unwrap();\n    let dst_len = u8::try_from(item_dst.len()).unwrap();\n    [[db_len].as_ref(), db_dst, [dst_len].as_ref(), item_dst, key.as_ref()].concat()\n  }\n  fn txn(&mut self) -> Self::Transaction<'_>;\n}\n"
  },
  {
    "path": "common/db/src/mem.rs",
    "content": "use core::fmt::Debug;\nuse std::{\n  sync::{Arc, RwLock},\n  collections::{HashSet, HashMap},\n};\n\nuse crate::*;\n\n/// An atomic operation for the in-memory database.\n#[must_use]\n#[derive(PartialEq, Eq, Debug)]\npub struct MemDbTxn<'a>(&'a MemDb, HashMap<Vec<u8>, Vec<u8>>, HashSet<Vec<u8>>);\n\nimpl<'a> Get for MemDbTxn<'a> {\n  fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {\n    if self.2.contains(key.as_ref()) {\n      return None;\n    }\n    self\n      .1\n      .get(key.as_ref())\n      .cloned()\n      .or_else(|| self.0 .0.read().unwrap().get(key.as_ref()).cloned())\n  }\n}\nimpl<'a> DbTxn for MemDbTxn<'a> {\n  fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) {\n    self.2.remove(key.as_ref());\n    self.1.insert(key.as_ref().to_vec(), value.as_ref().to_vec());\n  }\n  fn del(&mut self, key: impl AsRef<[u8]>) {\n    self.1.remove(key.as_ref());\n    self.2.insert(key.as_ref().to_vec());\n  }\n  fn commit(mut self) {\n    let mut db = self.0 .0.write().unwrap();\n    for (key, value) in self.1.drain() {\n      db.insert(key, value);\n    }\n    for key in self.2 {\n      db.remove(&key);\n    }\n  }\n}\n\n/// An in-memory database.\n#[derive(Clone, Debug)]\npub struct MemDb(Arc<RwLock<HashMap<Vec<u8>, Vec<u8>>>>);\n\nimpl PartialEq for MemDb {\n  fn eq(&self, other: &MemDb) -> bool {\n    *self.0.read().unwrap() == *other.0.read().unwrap()\n  }\n}\nimpl Eq for MemDb {}\n\nimpl Default for MemDb {\n  fn default() -> MemDb {\n    MemDb(Arc::new(RwLock::new(HashMap::new())))\n  }\n}\n\nimpl MemDb {\n  /// Create a new in-memory database.\n  pub fn new() -> MemDb {\n    MemDb::default()\n  }\n}\n\nimpl Get for MemDb {\n  fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {\n    self.0.read().unwrap().get(key.as_ref()).cloned()\n  }\n}\nimpl Db for MemDb {\n  type Transaction<'a> = MemDbTxn<'a>;\n  fn txn(&mut self) -> MemDbTxn<'_> {\n    MemDbTxn(self, HashMap::new(), HashSet::new())\n  }\n}\n"
  },
  {
    "path": "common/db/src/parity_db.rs",
    "content": "use std::sync::Arc;\n\npub use ::parity_db::{Options, Db as ParityDb};\n\nuse crate::*;\n\n#[must_use]\npub struct Transaction<'a>(&'a Arc<ParityDb>, Vec<(u8, Vec<u8>, Option<Vec<u8>>)>);\n\nimpl Get for Transaction<'_> {\n  fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {\n    let mut res = self.0.get(&key);\n    for change in &self.1 {\n      if change.1 == key.as_ref() {\n        res.clone_from(&change.2);\n      }\n    }\n    res\n  }\n}\nimpl DbTxn for Transaction<'_> {\n  fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) {\n    self.1.push((0, key.as_ref().to_vec(), Some(value.as_ref().to_vec())))\n  }\n  fn del(&mut self, key: impl AsRef<[u8]>) {\n    self.1.push((0, key.as_ref().to_vec(), None))\n  }\n  fn commit(self) {\n    self.0.commit(self.1).unwrap()\n  }\n}\n\nimpl Get for Arc<ParityDb> {\n  fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {\n    ParityDb::get(self, 0, key.as_ref()).unwrap()\n  }\n}\nimpl Db for Arc<ParityDb> {\n  type Transaction<'a> = Transaction<'a>;\n  fn txn(&mut self) -> Self::Transaction<'_> {\n    Transaction(self, vec![])\n  }\n}\n\npub fn new_parity_db(path: &str) -> Arc<ParityDb> {\n  Arc::new(ParityDb::open_or_create(&Options::with_columns(std::path::Path::new(path), 1)).unwrap())\n}\n"
  },
  {
    "path": "common/db/src/rocks.rs",
    "content": "use std::sync::Arc;\n\nuse rocksdb::{\n  DBCompressionType, ThreadMode, SingleThreaded, LogLevel, WriteOptions,\n  Transaction as RocksTransaction, Options, OptimisticTransactionDB,\n};\n\nuse crate::*;\n\n#[must_use]\npub struct Transaction<'a, T: ThreadMode>(\n  RocksTransaction<'a, OptimisticTransactionDB<T>>,\n  &'a OptimisticTransactionDB<T>,\n);\n\nimpl<T: ThreadMode> Get for Transaction<'_, T> {\n  fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {\n    self.0.get(key).expect(\"couldn't read from RocksDB via transaction\")\n  }\n}\nimpl<T: ThreadMode> DbTxn for Transaction<'_, T> {\n  fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) {\n    self.0.put(key, value).expect(\"couldn't write to RocksDB via transaction\")\n  }\n  fn del(&mut self, key: impl AsRef<[u8]>) {\n    self.0.delete(key).expect(\"couldn't delete from RocksDB via transaction\")\n  }\n  fn commit(self) {\n    self.0.commit().expect(\"couldn't commit to RocksDB via transaction\");\n    self.1.flush_wal(true).expect(\"couldn't flush RocksDB WAL\");\n    self.1.flush().expect(\"couldn't flush RocksDB\");\n  }\n}\n\nimpl<T: ThreadMode> Get for Arc<OptimisticTransactionDB<T>> {\n  fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {\n    OptimisticTransactionDB::get(self, key).expect(\"couldn't read from RocksDB\")\n  }\n}\nimpl<T: Send + ThreadMode + 'static> Db for Arc<OptimisticTransactionDB<T>> {\n  type Transaction<'a> = Transaction<'a, T>;\n  fn txn(&mut self) -> Self::Transaction<'_> {\n    let mut opts = WriteOptions::default();\n    opts.set_sync(true);\n    Transaction(self.transaction_opt(&opts, &Default::default()), &**self)\n  }\n}\n\npub type RocksDB = Arc<OptimisticTransactionDB<SingleThreaded>>;\npub fn new_rocksdb(path: &str) -> RocksDB {\n  let mut options = Options::default();\n  options.create_if_missing(true);\n  options.set_compression_type(DBCompressionType::Zstd);\n\n  options.set_wal_compression_type(DBCompressionType::Zstd);\n  // 10 MB\n  options.set_max_total_wal_size(10 * 1024 * 1024);\n  options.set_wal_size_limit_mb(10);\n\n  options.set_log_level(LogLevel::Warn);\n  // 1 MB\n  options.set_max_log_file_size(1024 * 1024);\n  options.set_recycle_log_file_num(1);\n\n  Arc::new(OptimisticTransactionDB::open(&options, path).unwrap())\n}\n"
  },
  {
    "path": "common/env/Cargo.toml",
    "content": "[package]\nname = \"serai-env\"\nversion = \"0.1.0\"\ndescription = \"A common library for Serai apps to access environment variables\"\nlicense = \"AGPL-3.0-only\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/common/env\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nkeywords = []\nedition = \"2021\"\nrust-version = \"1.60\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n"
  },
  {
    "path": "common/env/LICENSE",
    "content": "AGPL-3.0-only license\n\nCopyright (c) 2023 Luke Parker\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License Version 3 as\npublished by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n"
  },
  {
    "path": "common/env/src/lib.rs",
    "content": "#![cfg_attr(docsrs, feature(doc_cfg))]\n\n// Obtain a variable from the Serai environment/secret store.\npub fn var(variable: &str) -> Option<String> {\n  // TODO: Move this to a proper secret store\n  // TODO: Unset this variable\n  std::env::var(variable).ok()\n}\n"
  },
  {
    "path": "common/patchable-async-sleep/Cargo.toml",
    "content": "[package]\nname = \"patchable-async-sleep\"\nversion = \"0.1.0\"\ndescription = \"An async sleep function, patchable to the preferred runtime\"\nlicense = \"MIT\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/common/patchable-async-sleep\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nkeywords = [\"async\", \"sleep\", \"tokio\", \"smol\", \"async-std\"]\nedition = \"2021\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\ntokio = { version = \"1\", default-features = false, features = [ \"time\"] }\n"
  },
  {
    "path": "common/patchable-async-sleep/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2024 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "common/patchable-async-sleep/README.md",
    "content": "# Patchable Async Sleep\n\nAn async sleep function, patchable to the preferred runtime.\n\nThis crate is `tokio`-backed. Applications which don't want to use `tokio`\nshould patch this crate to one which works witht heir preferred runtime. The\npoint of it is to have a minimal API surface to trivially facilitate such work.\n"
  },
  {
    "path": "common/patchable-async-sleep/src/lib.rs",
    "content": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![doc = include_str!(\"../README.md\")]\n#![deny(missing_docs)]\n\nuse core::time::Duration;\n\n/// Sleep for the specified duration.\npub fn sleep(duration: Duration) -> impl core::future::Future<Output = ()> {\n  tokio::time::sleep(duration)\n}\n"
  },
  {
    "path": "common/request/Cargo.toml",
    "content": "[package]\nname = \"simple-request\"\nversion = \"0.1.0\"\ndescription = \"A simple HTTP(S) request library\"\nlicense = \"MIT\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/common/simple-request\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nkeywords = [\"http\", \"https\", \"async\", \"request\", \"ssl\"]\nedition = \"2021\"\nrust-version = \"1.70\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\ntower-service = { version = \"0.3\", default-features = false }\nhyper = { version = \"1\", default-features = false, features = [\"http1\", \"client\"] }\nhyper-util = { version = \"0.1\", default-features = false, features = [\"http1\", \"client-legacy\", \"tokio\"] }\nhttp-body-util = { version = \"0.1\", default-features = false }\ntokio = { version = \"1\", default-features = false }\n\nhyper-rustls = { version = \"0.27\", default-features = false, features = [\"http1\", \"ring\", \"rustls-native-certs\", \"native-tokio\"], optional = true }\n\nzeroize = { version = \"1\", optional = true }\nbase64ct = { version = \"1\", features = [\"alloc\"], optional = true }\n\n[features]\ntls = [\"hyper-rustls\"]\nbasic-auth = [\"zeroize\", \"base64ct\"]\ndefault = [\"tls\"]\n"
  },
  {
    "path": "common/request/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2023 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "common/request/README.md",
    "content": "# Simple Request\n\nA simple alternative to reqwest, supporting HTTPS, intended to support a\nmajority of use cases with a fraction of the dependency tree.\n\nThis library is built directly around `hyper`, `hyper-rustls`, and does require\n`tokio`. Support for `async-std` would be welcome.\n"
  },
  {
    "path": "common/request/src/lib.rs",
    "content": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![doc = include_str!(\"../README.md\")]\n\nuse std::sync::Arc;\n\nuse tokio::sync::Mutex;\n\nuse tower_service::Service as TowerService;\n#[cfg(feature = \"tls\")]\nuse hyper_rustls::{HttpsConnectorBuilder, HttpsConnector};\nuse hyper::{Uri, header::HeaderValue, body::Bytes, client::conn::http1::SendRequest};\nuse hyper_util::{\n  rt::tokio::TokioExecutor,\n  client::legacy::{Client as HyperClient, connect::HttpConnector},\n};\npub use hyper;\n\nmod request;\npub use request::*;\n\nmod response;\npub use response::*;\n\n#[derive(Debug)]\npub enum Error {\n  InvalidUri,\n  MissingHost,\n  InconsistentHost,\n  ConnectionError(Box<dyn Send + Sync + std::error::Error>),\n  Hyper(hyper::Error),\n  HyperUtil(hyper_util::client::legacy::Error),\n}\n\n#[cfg(not(feature = \"tls\"))]\ntype Connector = HttpConnector;\n#[cfg(feature = \"tls\")]\ntype Connector = HttpsConnector<HttpConnector>;\n\n#[derive(Clone, Debug)]\nenum Connection {\n  ConnectionPool(HyperClient<Connector, Full<Bytes>>),\n  Connection {\n    connector: Connector,\n    host: Uri,\n    connection: Arc<Mutex<Option<SendRequest<Full<Bytes>>>>>,\n  },\n}\n\n#[derive(Clone, Debug)]\npub struct Client {\n  connection: Connection,\n}\n\nimpl Client {\n  fn connector() -> Connector {\n    let mut res = HttpConnector::new();\n    res.set_keepalive(Some(core::time::Duration::from_secs(60)));\n    res.set_nodelay(true);\n    res.set_reuse_address(true);\n    #[cfg(feature = \"tls\")]\n    res.enforce_http(false);\n    #[cfg(feature = \"tls\")]\n    let res = HttpsConnectorBuilder::new()\n      .with_native_roots()\n      .expect(\"couldn't fetch system's SSL roots\")\n      .https_or_http()\n      .enable_http1()\n      .wrap_connector(res);\n    res\n  }\n\n  pub fn with_connection_pool() -> Client {\n    Client {\n      connection: Connection::ConnectionPool(\n        HyperClient::builder(TokioExecutor::new())\n          .pool_idle_timeout(core::time::Duration::from_secs(60))\n          .build(Self::connector()),\n      ),\n    }\n  }\n\n  pub fn without_connection_pool(host: &str) -> Result<Client, Error> {\n    Ok(Client {\n      connection: Connection::Connection {\n        connector: Self::connector(),\n        host: {\n          let uri: Uri = host.parse().map_err(|_| Error::InvalidUri)?;\n          if uri.host().is_none() {\n            Err(Error::MissingHost)?;\n          };\n          uri\n        },\n        connection: Arc::new(Mutex::new(None)),\n      },\n    })\n  }\n\n  pub async fn request<R: Into<Request>>(&self, request: R) -> Result<Response<'_>, Error> {\n    let request: Request = request.into();\n    let mut request = request.0;\n    if let Some(header_host) = request.headers().get(hyper::header::HOST) {\n      match &self.connection {\n        Connection::ConnectionPool(_) => {}\n        Connection::Connection { host, .. } => {\n          if header_host.to_str().map_err(|_| Error::InvalidUri)? != host.host().unwrap() {\n            Err(Error::InconsistentHost)?;\n          }\n        }\n      }\n    } else {\n      let host = match &self.connection {\n        Connection::ConnectionPool(_) => {\n          request.uri().host().ok_or(Error::MissingHost)?.to_string()\n        }\n        Connection::Connection { host, .. } => {\n          let host_str = host.host().unwrap();\n          if let Some(uri_host) = request.uri().host() {\n            if host_str != uri_host {\n              Err(Error::InconsistentHost)?;\n            }\n          }\n          host_str.to_string()\n        }\n      };\n      request\n        .headers_mut()\n        .insert(hyper::header::HOST, HeaderValue::from_str(&host).map_err(|_| Error::InvalidUri)?);\n    }\n\n    let response = match &self.connection {\n      Connection::ConnectionPool(client) => {\n        client.request(request).await.map_err(Error::HyperUtil)?\n      }\n      Connection::Connection { connector, host, connection } => {\n        let mut connection_lock = connection.lock().await;\n\n        // If there's not a connection...\n        if connection_lock.is_none() {\n          let call_res = connector.clone().call(host.clone()).await;\n          #[cfg(not(feature = \"tls\"))]\n          let call_res = call_res.map_err(|e| Error::ConnectionError(format!(\"{e:?}\").into()));\n          #[cfg(feature = \"tls\")]\n          let call_res = call_res.map_err(Error::ConnectionError);\n          let (requester, connection) =\n            hyper::client::conn::http1::handshake(call_res?).await.map_err(Error::Hyper)?;\n          // This will die when we drop the requester, so we don't need to track an AbortHandle\n          // for it\n          tokio::spawn(connection);\n          *connection_lock = Some(requester);\n        }\n\n        let connection = connection_lock.as_mut().unwrap();\n        let mut err = connection.ready().await.err();\n        if err.is_none() {\n          // Send the request\n          let res = connection.send_request(request).await;\n          if let Ok(res) = res {\n            return Ok(Response(res, self));\n          }\n          err = res.err();\n        }\n        // Since this connection has been put into an error state, drop it\n        *connection_lock = None;\n        Err(Error::Hyper(err.unwrap()))?\n      }\n    };\n\n    Ok(Response(response, self))\n  }\n}\n"
  },
  {
    "path": "common/request/src/request.rs",
    "content": "use hyper::body::Bytes;\n#[cfg(feature = \"basic-auth\")]\nuse hyper::header::HeaderValue;\npub use http_body_util::Full;\n\n#[cfg(feature = \"basic-auth\")]\nuse crate::Error;\n\n#[derive(Debug)]\npub struct Request(pub(crate) hyper::Request<Full<Bytes>>);\nimpl Request {\n  #[cfg(feature = \"basic-auth\")]\n  fn username_password_from_uri(&self) -> Result<(String, String), Error> {\n    if let Some(authority) = self.0.uri().authority() {\n      let authority = authority.as_str();\n      if authority.contains('@') {\n        // Decode the username and password from the URI\n        let mut userpass = authority.split('@').next().unwrap().to_string();\n\n        let mut userpass_iter = userpass.split(':');\n        let username = userpass_iter.next().unwrap().to_string();\n        let password = userpass_iter.next().map_or_else(String::new, str::to_string);\n        zeroize::Zeroize::zeroize(&mut userpass);\n\n        return Ok((username, password));\n      }\n    }\n    Err(Error::InvalidUri)\n  }\n\n  #[cfg(feature = \"basic-auth\")]\n  pub fn basic_auth(&mut self, username: &str, password: &str) {\n    use zeroize::Zeroize;\n    use base64ct::{Encoding, Base64};\n\n    let mut formatted = format!(\"{username}:{password}\");\n    let mut encoded = Base64::encode_string(formatted.as_bytes());\n    formatted.zeroize();\n    self.0.headers_mut().insert(\n      hyper::header::AUTHORIZATION,\n      HeaderValue::from_str(&format!(\"Basic {encoded}\")).unwrap(),\n    );\n    encoded.zeroize();\n  }\n\n  #[cfg(feature = \"basic-auth\")]\n  pub fn basic_auth_from_uri(&mut self) -> Result<(), Error> {\n    let (mut username, mut password) = self.username_password_from_uri()?;\n    self.basic_auth(&username, &password);\n\n    use zeroize::Zeroize;\n    username.zeroize();\n    password.zeroize();\n\n    Ok(())\n  }\n\n  #[cfg(feature = \"basic-auth\")]\n  pub fn with_basic_auth(&mut self) {\n    let _ = self.basic_auth_from_uri();\n  }\n}\nimpl From<hyper::Request<Full<Bytes>>> for Request {\n  fn from(request: hyper::Request<Full<Bytes>>) -> Request {\n    Request(request)\n  }\n}\n"
  },
  {
    "path": "common/request/src/response.rs",
    "content": "use hyper::{\n  StatusCode,\n  header::{HeaderValue, HeaderMap},\n  body::{Buf, Incoming},\n};\nuse http_body_util::BodyExt;\n\nuse crate::{Client, Error};\n\n// Borrows the client so its async task lives as long as this response exists.\n#[allow(dead_code)]\n#[derive(Debug)]\npub struct Response<'a>(pub(crate) hyper::Response<Incoming>, pub(crate) &'a Client);\nimpl<'a> Response<'a> {\n  pub fn status(&self) -> StatusCode {\n    self.0.status()\n  }\n  pub fn headers(&self) -> &HeaderMap<HeaderValue> {\n    self.0.headers()\n  }\n  pub async fn body(self) -> Result<impl std::io::Read, Error> {\n    Ok(self.0.into_body().collect().await.map_err(Error::Hyper)?.aggregate().reader())\n  }\n}\n"
  },
  {
    "path": "common/std-shims/Cargo.toml",
    "content": "[package]\nname = \"std-shims\"\nversion = \"0.1.4\"\ndescription = \"A series of std shims to make alloc more feasible\"\nlicense = \"MIT\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/common/std-shims\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nkeywords = [\"nostd\", \"no_std\", \"alloc\", \"io\"]\nedition = \"2021\"\nrust-version = \"1.64\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nrustversion = { version = \"1\", default-features = false }\nspin = { version = \"0.10\", default-features = false, features = [\"use_ticket_mutex\", \"once\", \"lazy\"] }\nhashbrown = { version = \"0.14\", default-features = false, features = [\"ahash\", \"inline-more\"] }\n\n[features]\nstd = []\ndefault = [\"std\"]\n"
  },
  {
    "path": "common/std-shims/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2023 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "common/std-shims/README.md",
    "content": "# std shims\n\nA crate which passes through to std when the default `std` feature is enabled,\nyet provides a series of shims when it isn't.\n\nNo guarantee of one-to-one parity is provided. The shims provided aim to be sufficient for the\naverage case.\n\n`HashSet` and `HashMap` are provided via `hashbrown`. Synchronization primitives are provided via\n`spin` (avoiding a requirement on `critical-section`).\ntypes are not guaranteed to be\n"
  },
  {
    "path": "common/std-shims/src/collections.rs",
    "content": "#[cfg(feature = \"std\")]\npub use std::collections::*;\n\n#[cfg(not(feature = \"std\"))]\npub use alloc::collections::*;\n#[cfg(not(feature = \"std\"))]\npub use hashbrown::{HashSet, HashMap};\n"
  },
  {
    "path": "common/std-shims/src/io.rs",
    "content": "#[cfg(feature = \"std\")]\npub use std::io::*;\n\n#[cfg(not(feature = \"std\"))]\nmod shims {\n  use core::fmt::{Debug, Formatter};\n  use alloc::{boxed::Box, vec::Vec};\n\n  #[derive(Clone, Copy, PartialEq, Eq, Debug)]\n  pub enum ErrorKind {\n    UnexpectedEof,\n    Other,\n  }\n\n  pub struct Error {\n    kind: ErrorKind,\n    error: Box<dyn Send + Sync>,\n  }\n\n  impl Debug for Error {\n    fn fmt(&self, fmt: &mut Formatter<'_>) -> core::result::Result<(), core::fmt::Error> {\n      fmt.debug_struct(\"Error\").field(\"kind\", &self.kind).finish_non_exhaustive()\n    }\n  }\n\n  impl Error {\n    pub fn new<E: 'static + Send + Sync>(kind: ErrorKind, error: E) -> Error {\n      Error { kind, error: Box::new(error) }\n    }\n\n    pub fn other<E: 'static + Send + Sync>(error: E) -> Error {\n      Error { kind: ErrorKind::Other, error: Box::new(error) }\n    }\n\n    pub fn kind(&self) -> ErrorKind {\n      self.kind\n    }\n\n    pub fn into_inner(self) -> Option<Box<dyn Send + Sync>> {\n      Some(self.error)\n    }\n  }\n\n  pub type Result<T> = core::result::Result<T, Error>;\n\n  pub trait Read {\n    fn read(&mut self, buf: &mut [u8]) -> Result<usize>;\n\n    fn read_exact(&mut self, buf: &mut [u8]) -> Result<()> {\n      let read = self.read(buf)?;\n      if read != buf.len() {\n        Err(Error::new(ErrorKind::UnexpectedEof, \"reader ran out of bytes\"))?;\n      }\n      Ok(())\n    }\n  }\n\n  impl Read for &[u8] {\n    fn read(&mut self, buf: &mut [u8]) -> Result<usize> {\n      let read = buf.len().min(self.len());\n      buf[.. read].copy_from_slice(&self[.. read]);\n      *self = &self[read ..];\n      Ok(read)\n    }\n  }\n\n  pub trait BufRead: Read {\n    fn fill_buf(&mut self) -> Result<&[u8]>;\n    fn consume(&mut self, amt: usize);\n  }\n\n  impl BufRead for &[u8] {\n    fn fill_buf(&mut self) -> Result<&[u8]> {\n      Ok(*self)\n    }\n    fn consume(&mut self, amt: usize) {\n      *self = &self[amt ..];\n    }\n  }\n\n  pub trait Write {\n    fn write(&mut self, buf: &[u8]) -> Result<usize>;\n    fn write_all(&mut self, buf: &[u8]) -> Result<()> {\n      if self.write(buf)? != buf.len() {\n        Err(Error::new(ErrorKind::UnexpectedEof, \"writer ran out of bytes\"))?;\n      }\n      Ok(())\n    }\n  }\n\n  impl Write for Vec<u8> {\n    fn write(&mut self, buf: &[u8]) -> Result<usize> {\n      self.extend(buf);\n      Ok(buf.len())\n    }\n  }\n}\n\n#[cfg(not(feature = \"std\"))]\npub use shims::*;\n"
  },
  {
    "path": "common/std-shims/src/lib.rs",
    "content": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![doc = include_str!(\"../README.md\")]\n#![cfg_attr(not(feature = \"std\"), no_std)]\n\npub extern crate alloc;\n\npub mod sync;\npub mod collections;\npub mod io;\n\npub use alloc::vec;\npub use alloc::str;\npub use alloc::string;\n\npub mod prelude {\n  #[rustversion::before(1.73)]\n  #[doc(hidden)]\n  pub trait StdShimsDivCeil {\n    fn div_ceil(self, rhs: Self) -> Self;\n  }\n  #[rustversion::before(1.73)]\n  mod impl_divceil {\n    use super::StdShimsDivCeil;\n    impl StdShimsDivCeil for u8 {\n      fn div_ceil(self, rhs: Self) -> Self {\n        (self + (rhs - 1)) / rhs\n      }\n    }\n    impl StdShimsDivCeil for u16 {\n      fn div_ceil(self, rhs: Self) -> Self {\n        (self + (rhs - 1)) / rhs\n      }\n    }\n    impl StdShimsDivCeil for u32 {\n      fn div_ceil(self, rhs: Self) -> Self {\n        (self + (rhs - 1)) / rhs\n      }\n    }\n    impl StdShimsDivCeil for u64 {\n      fn div_ceil(self, rhs: Self) -> Self {\n        (self + (rhs - 1)) / rhs\n      }\n    }\n    impl StdShimsDivCeil for u128 {\n      fn div_ceil(self, rhs: Self) -> Self {\n        (self + (rhs - 1)) / rhs\n      }\n    }\n    impl StdShimsDivCeil for usize {\n      fn div_ceil(self, rhs: Self) -> Self {\n        (self + (rhs - 1)) / rhs\n      }\n    }\n  }\n\n  #[cfg(feature = \"std\")]\n  #[rustversion::before(1.74)]\n  #[doc(hidden)]\n  pub trait StdShimsIoErrorOther {\n    fn other<E>(error: E) -> Self\n    where\n      E: Into<Box<dyn std::error::Error + Send + Sync>>;\n  }\n  #[cfg(feature = \"std\")]\n  #[rustversion::before(1.74)]\n  impl StdShimsIoErrorOther for std::io::Error {\n    fn other<E>(error: E) -> Self\n    where\n      E: Into<Box<dyn std::error::Error + Send + Sync>>,\n    {\n      std::io::Error::new(std::io::ErrorKind::Other, error)\n    }\n  }\n}\n"
  },
  {
    "path": "common/std-shims/src/sync.rs",
    "content": "pub use core::sync::*;\npub use alloc::sync::*;\n\nmod mutex_shim {\n  #[cfg(feature = \"std\")]\n  pub use std::sync::*;\n  #[cfg(not(feature = \"std\"))]\n  pub use spin::*;\n\n  #[derive(Default, Debug)]\n  pub struct ShimMutex<T>(Mutex<T>);\n  impl<T> ShimMutex<T> {\n    pub const fn new(value: T) -> Self {\n      Self(Mutex::new(value))\n    }\n\n    pub fn lock(&self) -> MutexGuard<'_, T> {\n      #[cfg(feature = \"std\")]\n      let res = self.0.lock().unwrap();\n      #[cfg(not(feature = \"std\"))]\n      let res = self.0.lock();\n      res\n    }\n  }\n}\npub use mutex_shim::{ShimMutex as Mutex, MutexGuard};\n\n#[cfg(not(feature = \"std\"))]\npub use spin::Lazy as LazyLock;\n#[rustversion::before(1.80)]\n#[cfg(feature = \"std\")]\npub use spin::Lazy as LazyLock;\n#[rustversion::since(1.80)]\n#[cfg(feature = \"std\")]\npub use std::sync::LazyLock;\n"
  },
  {
    "path": "common/zalloc/Cargo.toml",
    "content": "[package]\nname = \"zalloc\"\nversion = \"0.1.0\"\ndescription = \"An allocator wrapper which zeroizes memory on dealloc\"\nlicense = \"MIT\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/common/zalloc\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nkeywords = []\nedition = \"2021\"\nrust-version = \"1.77\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nzeroize = { version = \"^1.5\", default-features = false }\n\n[build-dependencies]\nrustversion = { version = \"1\", default-features = false }\n\n[features]\nstd = [\"zeroize/std\"]\ndefault = [\"std\"]\nallocator = []\n"
  },
  {
    "path": "common/zalloc/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2022-2023 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "common/zalloc/build.rs",
    "content": "#[rustversion::nightly]\nfn main() {\n  println!(\"cargo::rustc-check-cfg=cfg(zalloc_rustc_nightly)\");\n  println!(\"cargo::rustc-cfg=zalloc_rustc_nightly\");\n}\n\n#[rustversion::not(nightly)]\nfn main() {\n  println!(\"cargo::rustc-check-cfg=cfg(zalloc_rustc_nightly)\");\n}\n"
  },
  {
    "path": "common/zalloc/src/lib.rs",
    "content": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![cfg_attr(docsrs, feature(doc_cfg))]\n#![cfg_attr(all(zalloc_rustc_nightly, feature = \"allocator\"), feature(allocator_api))]\n\n//! Implementation of a Zeroizing Allocator, enabling zeroizing memory on deallocation.\n//! This can either be used with Box (requires nightly and the \"allocator\" feature) to provide the\n//! functionality of zeroize on types which don't implement zeroize, or used as a wrapper around\n//! the global allocator to ensure *all* memory is zeroized.\n\nuse core::{\n  slice,\n  alloc::{Layout, GlobalAlloc},\n};\n\nuse zeroize::Zeroize;\n\n/// An allocator wrapper which zeroizes its memory on dealloc.\npub struct ZeroizingAlloc<T>(pub T);\n\n#[cfg(all(zalloc_rustc_nightly, feature = \"allocator\"))]\nuse core::{\n  ptr::NonNull,\n  alloc::{AllocError, Allocator},\n};\n#[cfg(all(zalloc_rustc_nightly, feature = \"allocator\"))]\nunsafe impl<T: Allocator> Allocator for ZeroizingAlloc<T> {\n  fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {\n    self.0.allocate(layout)\n  }\n\n  unsafe fn deallocate(&self, mut ptr: NonNull<u8>, layout: Layout) {\n    slice::from_raw_parts_mut(ptr.as_mut(), layout.size()).zeroize();\n    self.0.deallocate(ptr, layout);\n  }\n}\n\nunsafe impl<T: GlobalAlloc> GlobalAlloc for ZeroizingAlloc<T> {\n  unsafe fn alloc(&self, layout: Layout) -> *mut u8 {\n    self.0.alloc(layout)\n  }\n\n  unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {\n    slice::from_raw_parts_mut(ptr, layout.size()).zeroize();\n    self.0.dealloc(ptr, layout);\n  }\n}\n"
  },
  {
    "path": "coordinator/Cargo.toml",
    "content": "[package]\nname = \"serai-coordinator\"\nversion = \"0.1.0\"\ndescription = \"Serai coordinator to prepare batches and sign transactions\"\nlicense = \"AGPL-3.0-only\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/coordinator\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nkeywords = []\nedition = \"2021\"\npublish = false\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nasync-trait = { version = \"0.1\", default-features = false }\n\nzeroize = { version = \"^1.5\", default-features = false, features = [\"std\"] }\nrand_core = { version = \"0.6\", default-features = false, features = [\"std\"] }\n\nblake2 = { version = \"0.10\", default-features = false, features = [\"std\"] }\n\ntranscript = { package = \"flexible-transcript\", path = \"../crypto/transcript\", default-features = false, features = [\"std\", \"recommended\"] }\ndalek-ff-group = { path = \"../crypto/dalek-ff-group\", default-features = false, features = [\"std\"] }\nciphersuite = { path = \"../crypto/ciphersuite\", default-features = false, features = [\"std\"] }\nschnorr = { package = \"schnorr-signatures\", path = \"../crypto/schnorr\", default-features = false, features = [\"std\", \"aggregate\"] }\ndkg-musig = { path = \"../crypto/dkg/musig\", default-features = false, features = [\"std\"] }\nfrost = { package = \"modular-frost\", path = \"../crypto/frost\" }\nfrost-schnorrkel = { path = \"../crypto/schnorrkel\" }\n\nscale = { package = \"parity-scale-codec\", version = \"3\", default-features = false, features = [\"std\", \"derive\"] }\n\nzalloc = { path = \"../common/zalloc\" }\nserai-db = { path = \"../common/db\" }\nserai-env = { path = \"../common/env\" }\n\nprocessor-messages = { package = \"serai-processor-messages\", path = \"../processor/messages\" }\nmessage-queue = { package = \"serai-message-queue\", path = \"../message-queue\" }\ntributary = { package = \"tributary-chain\", path = \"./tributary\" }\n\nsp-application-crypto = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false, features = [\"std\"] }\nserai-client = { path = \"../substrate/client\", default-features = false, features = [\"serai\", \"borsh\"] }\n\nhex = { version = \"0.4\", default-features = false, features = [\"std\"] }\nborsh = { version = \"1\", default-features = false, features = [\"std\", \"derive\", \"de_strict_order\"] }\n\nlog = { version = \"0.4\", default-features = false, features = [\"std\"] }\nenv_logger = { version = \"0.10\", default-features = false, features = [\"humantime\"] }\n\nfutures-util = { version = \"0.3\", default-features = false, features = [\"std\"] }\ntokio = { version = \"1\", default-features = false, features = [\"rt-multi-thread\", \"sync\", \"time\", \"macros\"] }\nlibp2p = { version = \"0.52\", default-features = false, features = [\"tokio\", \"tcp\", \"noise\", \"yamux\", \"request-response\", \"gossipsub\", \"macros\"] }\n\n[dev-dependencies]\ntributary = { package = \"tributary-chain\", path = \"./tributary\", features = [\"tests\"] }\nsp-application-crypto = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false, features = [\"std\"] }\nsp-runtime = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false, features = [\"std\"] }\n\n[features]\nlonger-reattempts = []\nparity-db = [\"serai-db/parity-db\"]\nrocksdb = [\"serai-db/rocksdb\"]\n"
  },
  {
    "path": "coordinator/LICENSE",
    "content": "AGPL-3.0-only license\n\nCopyright (c) 2023 Luke Parker\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License Version 3 as\npublished by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n"
  },
  {
    "path": "coordinator/README.md",
    "content": "# Coordinator\n\nThe Serai coordinator communicates with other coordinators to prepare batches\nfor Serai and sign transactions.\n\nIn order to achieve consensus over gossip, and order certain events, a\nmicro-blockchain is instantiated.\n"
  },
  {
    "path": "coordinator/src/cosign_evaluator.rs",
    "content": "use core::time::Duration;\nuse std::{\n  sync::Arc,\n  collections::{HashSet, HashMap},\n};\n\nuse tokio::{\n  sync::{mpsc, Mutex, RwLock},\n  time::sleep,\n};\n\nuse borsh::BorshSerialize;\nuse sp_application_crypto::RuntimePublic;\nuse serai_client::{\n  primitives::{ExternalNetworkId, EXTERNAL_NETWORKS},\n  validator_sets::primitives::{ExternalValidatorSet, Session},\n  Serai, SeraiError, TemporalSerai,\n};\n\nuse serai_db::{Get, DbTxn, Db, create_db};\n\nuse processor_messages::coordinator::cosign_block_msg;\n\nuse crate::{\n  p2p::{CosignedBlock, GossipMessageKind, P2p},\n  substrate::LatestCosignedBlock,\n};\n\ncreate_db! {\n  CosignDb {\n    ReceivedCosign: (set: ExternalValidatorSet, block: [u8; 32]) -> CosignedBlock,\n    LatestCosign: (network: ExternalNetworkId) -> CosignedBlock,\n    DistinctChain: (set: ExternalValidatorSet) -> (),\n  }\n}\n\npub struct CosignEvaluator<D: Db> {\n  db: Mutex<D>,\n  serai: Arc<Serai>,\n  stakes: RwLock<Option<HashMap<ExternalNetworkId, u64>>>,\n  latest_cosigns: RwLock<HashMap<ExternalNetworkId, CosignedBlock>>,\n}\n\nimpl<D: Db> CosignEvaluator<D> {\n  async fn update_latest_cosign(&self) {\n    let stakes_lock = self.stakes.read().await;\n    // If we haven't gotten the stake data yet, return\n    let Some(stakes) = stakes_lock.as_ref() else { return };\n\n    let total_stake = stakes.values().copied().sum::<u64>();\n\n    let latest_cosigns = self.latest_cosigns.read().await;\n    let mut highest_block = 0;\n    for cosign in latest_cosigns.values() {\n      let mut networks = HashSet::new();\n      for (network, sub_cosign) in &*latest_cosigns {\n        if sub_cosign.block_number >= cosign.block_number {\n          networks.insert(network);\n        }\n      }\n      let sum_stake =\n        networks.into_iter().map(|network| stakes.get(network).unwrap_or(&0)).sum::<u64>();\n      let needed_stake = ((total_stake * 2) / 3) + 1;\n      if (total_stake == 0) || (sum_stake > needed_stake) {\n        highest_block = highest_block.max(cosign.block_number);\n      }\n    }\n\n    let mut db_lock = self.db.lock().await;\n    let mut txn = db_lock.txn();\n    if highest_block > LatestCosignedBlock::latest_cosigned_block(&txn) {\n      log::info!(\"setting latest cosigned block to {}\", highest_block);\n      LatestCosignedBlock::set(&mut txn, &highest_block);\n    }\n    txn.commit();\n  }\n\n  async fn update_stakes(&self) -> Result<(), SeraiError> {\n    let serai = self.serai.as_of_latest_finalized_block().await?;\n\n    let mut stakes = HashMap::new();\n    for network in EXTERNAL_NETWORKS {\n      // Use if this network has published a Batch for a short-circuit of if they've ever set a key\n      let set_key = serai.in_instructions().last_batch_for_network(network).await?.is_some();\n      if set_key {\n        stakes.insert(\n          network,\n          serai\n            .validator_sets()\n            .total_allocated_stake(network.into())\n            .await?\n            .expect(\"network which published a batch didn't have a stake set\")\n            .0,\n        );\n      }\n    }\n\n    // Since we've successfully built stakes, set it\n    *self.stakes.write().await = Some(stakes);\n\n    self.update_latest_cosign().await;\n\n    Ok(())\n  }\n\n  // Uses Err to signify a message should be retried\n  async fn handle_new_cosign(&self, cosign: CosignedBlock) -> Result<(), SeraiError> {\n    // If we already have this cosign or a newer cosign, return\n    if let Some(latest) = self.latest_cosigns.read().await.get(&cosign.network) {\n      if latest.block_number >= cosign.block_number {\n        return Ok(());\n      }\n    }\n\n    // If this an old cosign (older than a day), drop it\n    let latest_block = self.serai.latest_finalized_block().await?;\n    if (cosign.block_number + (24 * 60 * 60 / 6)) < latest_block.number() {\n      log::debug!(\"received old cosign supposedly signed by {:?}\", cosign.network);\n      return Ok(());\n    }\n\n    let Some(block) = self.serai.finalized_block_by_number(cosign.block_number).await? else {\n      log::warn!(\"received cosign with a block number which doesn't map to a block\");\n      return Ok(());\n    };\n\n    async fn set_with_keys_fn(\n      serai: &TemporalSerai<'_>,\n      network: ExternalNetworkId,\n    ) -> Result<Option<ExternalValidatorSet>, SeraiError> {\n      let Some(latest_session) = serai.validator_sets().session(network.into()).await? else {\n        log::warn!(\"received cosign from {:?}, which doesn't yet have a session\", network);\n        return Ok(None);\n      };\n      let prior_session = Session(latest_session.0.saturating_sub(1));\n      Ok(Some(\n        if serai\n          .validator_sets()\n          .keys(ExternalValidatorSet { network, session: prior_session })\n          .await?\n          .is_some()\n        {\n          ExternalValidatorSet { network, session: prior_session }\n        } else {\n          ExternalValidatorSet { network, session: latest_session }\n        },\n      ))\n    }\n\n    // Get the key for this network as of the prior block\n    // If we have two chains, this value may be different across chains depending on if one chain\n    // included the set_keys and one didn't\n    // Because set_keys will force a cosign, it will force detection of distinct blocks\n    // re: set_keys using keys prior to set_keys (assumed amenable to all)\n    let serai = self.serai.as_of(block.header.parent_hash.into());\n\n    let Some(set_with_keys) = set_with_keys_fn(&serai, cosign.network).await? else {\n      return Ok(());\n    };\n    let Some(keys) = serai.validator_sets().keys(set_with_keys).await? else {\n      log::warn!(\"received cosign for a block we didn't have keys for\");\n      return Ok(());\n    };\n\n    if !keys\n      .0\n      .verify(&cosign_block_msg(cosign.block_number, cosign.block), &cosign.signature.into())\n    {\n      log::warn!(\"received cosigned block with an invalid signature\");\n      return Ok(());\n    }\n\n    log::info!(\n      \"received cosign for block {} ({}) by {:?}\",\n      block.number(),\n      hex::encode(cosign.block),\n      cosign.network\n    );\n\n    // Save this cosign to the DB\n    {\n      let mut db = self.db.lock().await;\n      let mut txn = db.txn();\n      ReceivedCosign::set(&mut txn, set_with_keys, cosign.block, &cosign);\n      LatestCosign::set(&mut txn, set_with_keys.network, &(cosign));\n      txn.commit();\n    }\n\n    if cosign.block != block.hash() {\n      log::error!(\n        \"received cosign for a distinct block at {}. we have {}. cosign had {}\",\n        cosign.block_number,\n        hex::encode(block.hash()),\n        hex::encode(cosign.block)\n      );\n\n      let serai = self.serai.as_of(latest_block.hash());\n\n      let mut db = self.db.lock().await;\n      // Save this set as being on a different chain\n      let mut txn = db.txn();\n      DistinctChain::set(&mut txn, set_with_keys, &());\n      txn.commit();\n\n      let mut total_stake = 0;\n      let mut total_on_distinct_chain = 0;\n      for network in EXTERNAL_NETWORKS {\n        // Get the current set for this network\n        let set_with_keys = {\n          let mut res;\n          while {\n            res = set_with_keys_fn(&serai, network).await;\n            res.is_err()\n          } {\n            log::error!(\n              \"couldn't get the set with keys when checking for a distinct chain: {:?}\",\n              res\n            );\n            tokio::time::sleep(core::time::Duration::from_secs(3)).await;\n          }\n          res.unwrap()\n        };\n\n        // Get its stake\n        // Doesn't use the stakes inside self to prevent deadlocks re: multi-lock acquisition\n        if let Some(set_with_keys) = set_with_keys {\n          let stake = {\n            let mut res;\n            while {\n              res =\n                serai.validator_sets().total_allocated_stake(set_with_keys.network.into()).await;\n              res.is_err()\n            } {\n              log::error!(\n                \"couldn't get total allocated stake when checking for a distinct chain: {:?}\",\n                res\n              );\n              tokio::time::sleep(core::time::Duration::from_secs(3)).await;\n            }\n            res.unwrap()\n          };\n\n          if let Some(stake) = stake {\n            total_stake += stake.0;\n\n            if DistinctChain::get(&*db, set_with_keys).is_some() {\n              total_on_distinct_chain += stake.0;\n            }\n          }\n        }\n      }\n\n      // See https://github.com/serai-dex/serai/issues/339 for the reasoning on 17%\n      if (total_stake * 17 / 100) <= total_on_distinct_chain {\n        panic!(\"17% of validator sets (by stake) have co-signed a distinct chain\");\n      }\n    } else {\n      {\n        let mut latest_cosigns = self.latest_cosigns.write().await;\n        latest_cosigns.insert(cosign.network, cosign);\n      }\n      self.update_latest_cosign().await;\n    }\n\n    Ok(())\n  }\n\n  #[allow(clippy::new_ret_no_self)]\n  pub fn new<P: P2p>(db: D, p2p: P, serai: Arc<Serai>) -> mpsc::UnboundedSender<CosignedBlock> {\n    let mut latest_cosigns = HashMap::new();\n    for network in EXTERNAL_NETWORKS {\n      if let Some(cosign) = LatestCosign::get(&db, network) {\n        latest_cosigns.insert(network, cosign);\n      }\n    }\n\n    let evaluator = Arc::new(Self {\n      db: Mutex::new(db),\n      serai,\n      stakes: RwLock::new(None),\n      latest_cosigns: RwLock::new(latest_cosigns),\n    });\n\n    // Spawn a task to update stakes regularly\n    tokio::spawn({\n      let evaluator = evaluator.clone();\n      async move {\n        loop {\n          // Run this until it passes\n          while evaluator.update_stakes().await.is_err() {\n            log::warn!(\"couldn't update stakes in the cosign evaluator\");\n            // Try again in 10 seconds\n            sleep(Duration::from_secs(10)).await;\n          }\n          // Run it every 10 minutes as we don't need the exact stake data for this to be valid\n          sleep(Duration::from_secs(10 * 60)).await;\n        }\n      }\n    });\n\n    // Spawn a task to receive cosigns and handle them\n    let (send, mut recv) = mpsc::unbounded_channel();\n    tokio::spawn({\n      let evaluator = evaluator.clone();\n      async move {\n        while let Some(msg) = recv.recv().await {\n          while evaluator.handle_new_cosign(msg).await.is_err() {\n            // Try again in 10 seconds\n            sleep(Duration::from_secs(10)).await;\n          }\n        }\n      }\n    });\n\n    // Spawn a task to rebroadcast the most recent cosigns\n    tokio::spawn({\n      async move {\n        loop {\n          let cosigns = evaluator.latest_cosigns.read().await.values().copied().collect::<Vec<_>>();\n          for cosign in cosigns {\n            let mut buf = vec![];\n            cosign.serialize(&mut buf).unwrap();\n            P2p::broadcast(&p2p, GossipMessageKind::CosignedBlock, buf).await;\n          }\n          sleep(Duration::from_secs(60)).await;\n        }\n      }\n    });\n\n    // Return the channel to send cosigns\n    send\n  }\n}\n"
  },
  {
    "path": "coordinator/src/db.rs",
    "content": "use blake2::{\n  digest::{consts::U32, Digest},\n  Blake2b,\n};\n\nuse scale::Encode;\nuse borsh::{BorshSerialize, BorshDeserialize};\nuse serai_client::{\n  in_instructions::primitives::{Batch, SignedBatch},\n  primitives::ExternalNetworkId,\n  validator_sets::primitives::{ExternalValidatorSet, Session},\n};\n\npub use serai_db::*;\n\nuse ::tributary::ReadWrite;\nuse crate::tributary::{TributarySpec, Transaction, scanner::RecognizedIdType};\n\ncreate_db!(\n  MainDb {\n    HandledMessageDb: (network: ExternalNetworkId) -> u64,\n    ActiveTributaryDb: () -> Vec<u8>,\n    RetiredTributaryDb: (set: ExternalValidatorSet) -> (),\n    FirstPreprocessDb: (\n      network: ExternalNetworkId,\n      id_type: RecognizedIdType,\n      id: &[u8]\n    ) -> Vec<Vec<u8>>,\n    LastReceivedBatchDb: (network: ExternalNetworkId) -> u32,\n    ExpectedBatchDb: (network: ExternalNetworkId, id: u32) -> [u8; 32],\n    BatchDb: (network: ExternalNetworkId, id: u32)  -> SignedBatch,\n    LastVerifiedBatchDb: (network: ExternalNetworkId) -> u32,\n    HandoverBatchDb: (set: ExternalValidatorSet) -> u32,\n    LookupHandoverBatchDb: (network: ExternalNetworkId, batch: u32) -> Session,\n    QueuedBatchesDb: (set: ExternalValidatorSet) -> Vec<u8>\n  }\n);\n\nimpl ActiveTributaryDb {\n  pub fn active_tributaries<G: Get>(getter: &G) -> (Vec<u8>, Vec<TributarySpec>) {\n    let bytes = Self::get(getter).unwrap_or_default();\n    let mut bytes_ref: &[u8] = bytes.as_ref();\n\n    let mut tributaries = vec![];\n    while !bytes_ref.is_empty() {\n      tributaries.push(TributarySpec::deserialize_reader(&mut bytes_ref).unwrap());\n    }\n\n    (bytes, tributaries)\n  }\n\n  pub fn add_participating_in_tributary(txn: &mut impl DbTxn, spec: &TributarySpec) {\n    let (mut existing_bytes, existing) = ActiveTributaryDb::active_tributaries(txn);\n    for tributary in &existing {\n      if tributary == spec {\n        return;\n      }\n    }\n\n    spec.serialize(&mut existing_bytes).unwrap();\n    ActiveTributaryDb::set(txn, &existing_bytes);\n  }\n\n  pub fn retire_tributary(txn: &mut impl DbTxn, set: ExternalValidatorSet) {\n    let mut active = Self::active_tributaries(txn).1;\n    for i in 0 .. active.len() {\n      if active[i].set() == set {\n        active.remove(i);\n        break;\n      }\n    }\n\n    let mut bytes = vec![];\n    for active in active {\n      active.serialize(&mut bytes).unwrap();\n    }\n    Self::set(txn, &bytes);\n    RetiredTributaryDb::set(txn, set, &());\n  }\n}\n\nimpl FirstPreprocessDb {\n  pub fn save_first_preprocess(\n    txn: &mut impl DbTxn,\n    network: ExternalNetworkId,\n    id_type: RecognizedIdType,\n    id: &[u8],\n    preprocess: &Vec<Vec<u8>>,\n  ) {\n    if let Some(existing) = FirstPreprocessDb::get(txn, network, id_type, id) {\n      assert_eq!(&existing, preprocess, \"saved a distinct first preprocess\");\n      return;\n    }\n    FirstPreprocessDb::set(txn, network, id_type, id, preprocess);\n  }\n}\n\nimpl ExpectedBatchDb {\n  pub fn save_expected_batch(txn: &mut impl DbTxn, batch: &Batch) {\n    LastReceivedBatchDb::set(txn, batch.network, &batch.id);\n    Self::set(\n      txn,\n      batch.network,\n      batch.id,\n      &Blake2b::<U32>::digest(batch.instructions.encode()).into(),\n    );\n  }\n}\n\nimpl HandoverBatchDb {\n  pub fn set_handover_batch(txn: &mut impl DbTxn, set: ExternalValidatorSet, batch: u32) {\n    Self::set(txn, set, &batch);\n    LookupHandoverBatchDb::set(txn, set.network, batch, &set.session);\n  }\n}\nimpl QueuedBatchesDb {\n  pub fn queue(txn: &mut impl DbTxn, set: ExternalValidatorSet, batch: &Transaction) {\n    let mut batches = Self::get(txn, set).unwrap_or_default();\n    batch.write(&mut batches).unwrap();\n    Self::set(txn, set, &batches);\n  }\n\n  pub fn take(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Vec<Transaction> {\n    let batches_vec = Self::get(txn, set).unwrap_or_default();\n    txn.del(Self::key(set));\n\n    let mut batches: &[u8] = &batches_vec;\n    let mut res = vec![];\n    while !batches.is_empty() {\n      res.push(Transaction::read(&mut batches).unwrap());\n    }\n    res\n  }\n}\n"
  },
  {
    "path": "coordinator/src/main.rs",
    "content": "#![expect(clippy::cast_possible_truncation)]\n\nuse core::ops::Deref;\nuse std::{\n  sync::{OnceLock, Arc},\n  time::Duration,\n  collections::{VecDeque, HashSet, HashMap},\n};\n\nuse zeroize::{Zeroize, Zeroizing};\nuse rand_core::OsRng;\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::{\n  group::{\n    ff::{Field, PrimeField},\n    GroupEncoding,\n  },\n  Ciphersuite,\n};\nuse schnorr::SchnorrSignature;\nuse frost::Participant;\n\nuse serai_db::{DbTxn, Db};\n\nuse scale::Encode;\nuse borsh::BorshSerialize;\nuse serai_client::{\n  primitives::ExternalNetworkId,\n  validator_sets::primitives::{ExternalValidatorSet, KeyPair, Session},\n  Public, Serai, SeraiInInstructions,\n};\n\nuse message_queue::{Service, client::MessageQueue};\n\nuse tokio::{\n  sync::{Mutex, RwLock, mpsc, broadcast},\n  time::sleep,\n};\n\nuse ::tributary::{ProvidedError, TransactionKind, TransactionTrait, Block, Tributary};\n\nmod tributary;\nuse crate::tributary::{\n  TributarySpec, Label, SignData, Transaction, scanner::RecognizedIdType, PlanIds,\n};\n\nmod db;\nuse db::*;\n\nmod p2p;\npub use p2p::*;\n\nuse processor_messages::{\n  key_gen, sign,\n  coordinator::{self, SubstrateSignableId},\n  ProcessorMessage,\n};\n\npub mod processors;\nuse processors::Processors;\n\nmod substrate;\nuse substrate::CosignTransactions;\n\nmod cosign_evaluator;\nuse cosign_evaluator::CosignEvaluator;\n\n#[cfg(test)]\npub mod tests;\n\n#[global_allocator]\nstatic ALLOCATOR: zalloc::ZeroizingAlloc<std::alloc::System> =\n  zalloc::ZeroizingAlloc(std::alloc::System);\n\n#[derive(Clone)]\npub struct ActiveTributary<D: Db, P: P2p> {\n  pub spec: TributarySpec,\n  pub tributary: Arc<Tributary<D, Transaction, P>>,\n}\n\n#[derive(Clone)]\npub enum TributaryEvent<D: Db, P: P2p> {\n  NewTributary(ActiveTributary<D, P>),\n  TributaryRetired(ExternalValidatorSet),\n}\n\n// Creates a new tributary and sends it to all listeners.\nasync fn add_tributary<D: Db, Pro: Processors, P: P2p>(\n  db: D,\n  key: Zeroizing<<Ristretto as Ciphersuite>::F>,\n  processors: &Pro,\n  p2p: P,\n  tributaries: &broadcast::Sender<TributaryEvent<D, P>>,\n  spec: TributarySpec,\n) {\n  if RetiredTributaryDb::get(&db, spec.set()).is_some() {\n    log::info!(\"not adding tributary {:?} since it's been retired\", spec.set());\n  }\n\n  log::info!(\"adding tributary {:?}\", spec.set());\n\n  let tributary = Tributary::<_, Transaction, _>::new(\n    // TODO2: Use a db on a distinct volume to protect against DoS attacks\n    // TODO2: Delete said db once the Tributary is dropped\n    db,\n    spec.genesis(),\n    spec.start_time(),\n    key.clone(),\n    spec.validators(),\n    p2p,\n  )\n  .await\n  .unwrap();\n\n  // Trigger a DKG for the newly added Tributary\n  // If we're rebooting, we'll re-fire this message\n  // This is safe due to the message-queue deduplicating based off the intent system\n  let set = spec.set();\n  let our_i = spec\n    .i(&[], Ristretto::generator() * key.deref())\n    .expect(\"adding a tributary for a set we aren't in set for\");\n  processors\n    .send(\n      set.network,\n      processor_messages::key_gen::CoordinatorMessage::GenerateKey {\n        id: processor_messages::key_gen::KeyGenId { session: set.session, attempt: 0 },\n        params: frost::ThresholdParams::new(spec.t(), spec.n(&[]), our_i.start).unwrap(),\n        shares: u16::from(our_i.end) - u16::from(our_i.start),\n      },\n    )\n    .await;\n\n  tributaries\n    .send(TributaryEvent::NewTributary(ActiveTributary { spec, tributary: Arc::new(tributary) }))\n    .map_err(|_| \"all ActiveTributary recipients closed\")\n    .unwrap();\n}\n\n// TODO: Find a better pattern for this\nstatic HANDOVER_VERIFY_QUEUE_LOCK: OnceLock<Mutex<()>> = OnceLock::new();\n\n#[allow(clippy::too_many_arguments)]\nasync fn handle_processor_message<D: Db, P: P2p>(\n  db: &mut D,\n  key: &Zeroizing<<Ristretto as Ciphersuite>::F>,\n  serai: &Serai,\n  p2p: &P,\n  cosign_channel: &mpsc::UnboundedSender<CosignedBlock>,\n  tributaries: &HashMap<Session, ActiveTributary<D, P>>,\n  network: ExternalNetworkId,\n  msg: &processors::Message,\n) -> bool {\n  #[allow(clippy::nonminimal_bool)]\n  if let Some(already_handled) = HandledMessageDb::get(db, msg.network) {\n    assert!(!(already_handled > msg.id));\n    assert!((already_handled == msg.id) || (already_handled == msg.id - 1));\n    if already_handled == msg.id {\n      return true;\n    }\n  } else {\n    assert_eq!(msg.id, 0);\n  }\n\n  let _hvq_lock = HANDOVER_VERIFY_QUEUE_LOCK.get_or_init(|| Mutex::new(())).lock().await;\n  let mut txn = db.txn();\n\n  let mut relevant_tributary = match &msg.msg {\n    // We'll only receive these if we fired GenerateKey, which we'll only do if if we're\n    // in-set, making the Tributary relevant\n    ProcessorMessage::KeyGen(inner_msg) => match inner_msg {\n      key_gen::ProcessorMessage::Commitments { id, .. } |\n      key_gen::ProcessorMessage::InvalidCommitments { id, .. } |\n      key_gen::ProcessorMessage::Shares { id, .. } |\n      key_gen::ProcessorMessage::InvalidShare { id, .. } |\n      key_gen::ProcessorMessage::GeneratedKeyPair { id, .. } |\n      key_gen::ProcessorMessage::Blame { id, .. } => Some(id.session),\n    },\n    ProcessorMessage::Sign(inner_msg) => match inner_msg {\n      // We'll only receive InvalidParticipant/Preprocess/Share if we're actively signing\n      sign::ProcessorMessage::InvalidParticipant { id, .. } |\n      sign::ProcessorMessage::Preprocess { id, .. } |\n      sign::ProcessorMessage::Share { id, .. } => Some(id.session),\n      // While the Processor's Scanner will always emit Completed, that's routed through the\n      // Signer and only becomes a ProcessorMessage::Completed if the Signer is present and\n      // confirms it\n      sign::ProcessorMessage::Completed { session, .. } => Some(*session),\n    },\n    ProcessorMessage::Coordinator(inner_msg) => match inner_msg {\n      // This is a special case as it's relevant to *all* Tributaries for this network we're\n      // signing in\n      // It doesn't return a Tributary to become `relevant_tributary` though\n      coordinator::ProcessorMessage::SubstrateBlockAck { block, plans } => {\n        // Get the sessions for these keys\n        let sessions = plans\n          .iter()\n          .map(|plan| plan.session)\n          .filter(|session| {\n            RetiredTributaryDb::get(&txn, ExternalValidatorSet { network, session: *session })\n              .is_none()\n          })\n          .collect::<HashSet<_>>();\n\n        // Ensure we have the Tributaries\n        for session in &sessions {\n          if !tributaries.contains_key(session) {\n            return false;\n          }\n        }\n\n        for session in sessions {\n          let tributary = &tributaries[&session];\n          let plans = plans\n            .iter()\n            .filter_map(|plan| Some(plan.id).filter(|_| plan.session == session))\n            .collect::<Vec<_>>();\n          PlanIds::set(&mut txn, &tributary.spec.genesis(), *block, &plans);\n\n          let tx = Transaction::SubstrateBlock(*block);\n          log::trace!(\n            \"processor message effected transaction {} {:?}\",\n            hex::encode(tx.hash()),\n            &tx\n          );\n          log::trace!(\"providing transaction {}\", hex::encode(tx.hash()));\n          let res = tributary.tributary.provide_transaction(tx).await;\n          if !(res.is_ok() || (res == Err(ProvidedError::AlreadyProvided))) {\n            if res == Err(ProvidedError::LocalMismatchesOnChain) {\n              // Spin, since this is a crit for this Tributary\n              loop {\n                log::error!(\n                  \"{}. tributary: {}, provided: SubstrateBlock({})\",\n                  \"tributary added distinct provided to delayed locally provided TX\",\n                  hex::encode(tributary.spec.genesis()),\n                  block,\n                );\n                sleep(Duration::from_secs(60)).await;\n              }\n            }\n            panic!(\"provided an invalid transaction: {res:?}\");\n          }\n        }\n\n        None\n      }\n      // We'll only fire these if we are the Substrate signer, making the Tributary relevant\n      coordinator::ProcessorMessage::InvalidParticipant { id, .. } |\n      coordinator::ProcessorMessage::CosignPreprocess { id, .. } |\n      coordinator::ProcessorMessage::BatchPreprocess { id, .. } |\n      coordinator::ProcessorMessage::SlashReportPreprocess { id, .. } |\n      coordinator::ProcessorMessage::SubstrateShare { id, .. } => Some(id.session),\n      // This causes an action on our P2P net yet not on any Tributary\n      coordinator::ProcessorMessage::CosignedBlock { block_number, block, signature } => {\n        let cosigned_block = CosignedBlock {\n          network,\n          block_number: *block_number,\n          block: *block,\n          signature: {\n            let mut arr = [0; 64];\n            arr.copy_from_slice(signature);\n            arr\n          },\n        };\n        cosign_channel.send(cosigned_block).unwrap();\n        let mut buf = vec![];\n        cosigned_block.serialize(&mut buf).unwrap();\n        P2p::broadcast(p2p, GossipMessageKind::CosignedBlock, buf).await;\n        None\n      }\n      // This causes an action on Substrate yet not on any Tributary\n      coordinator::ProcessorMessage::SignedSlashReport { session, signature } => {\n        let set = ExternalValidatorSet { network, session: *session };\n        let signature: &[u8] = signature.as_ref();\n        let signature = <[u8; 64]>::try_from(signature).unwrap();\n        let signature: serai_client::Signature = signature.into();\n\n        let slashes = crate::tributary::SlashReport::get(&txn, set)\n          .expect(\"signed slash report despite not having slash report locally\");\n        let slashes_pubs = slashes\n          .iter()\n          .map(|(address, points)| (Public::from(*address), *points))\n          .collect::<Vec<_>>();\n\n        let tx = serai_client::SeraiValidatorSets::report_slashes(\n          network,\n          slashes\n            .into_iter()\n            .map(|(address, points)| (serai_client::SeraiAddress(address), points))\n            .collect::<Vec<_>>()\n            .try_into()\n            .unwrap(),\n          signature,\n        );\n\n        loop {\n          if serai.publish(&tx).await.is_ok() {\n            break None;\n          }\n\n          // Check if the slashes shouldn't still be reported. If not, break.\n          let Ok(serai) = serai.as_of_latest_finalized_block().await else {\n            tokio::time::sleep(core::time::Duration::from_secs(5)).await;\n            continue;\n          };\n          let Ok(key) = serai.validator_sets().key_pending_slash_report(network).await else {\n            tokio::time::sleep(core::time::Duration::from_secs(5)).await;\n            continue;\n          };\n          let Some(key) = key else {\n            break None;\n          };\n          // If this is the key for this slash report, then this will verify\n          use sp_application_crypto::RuntimePublic;\n          if !key.verify(\n            &serai_client::validator_sets::primitives::report_slashes_message(&set, &slashes_pubs),\n            &signature,\n          ) {\n            break None;\n          }\n        }\n      }\n    },\n    // These don't return a relevant Tributary as there's no Tributary with action expected\n    ProcessorMessage::Substrate(inner_msg) => match inner_msg {\n      processor_messages::substrate::ProcessorMessage::Batch { batch } => {\n        assert_eq!(\n          batch.network, msg.network,\n          \"processor sent us a batch for a different network than it was for\",\n        );\n        ExpectedBatchDb::save_expected_batch(&mut txn, batch);\n        None\n      }\n      // If this is a new Batch, immediately publish it (if we can)\n      processor_messages::substrate::ProcessorMessage::SignedBatch { batch } => {\n        assert_eq!(\n          batch.batch.network, msg.network,\n          \"processor sent us a signed batch for a different network than it was for\",\n        );\n\n        log::debug!(\"received batch {:?} {}\", batch.batch.network, batch.batch.id);\n\n        // Save this batch to the disk\n        BatchDb::set(&mut txn, batch.batch.network, batch.batch.id, &batch.clone());\n\n        // Get the next-to-execute batch ID\n        let Ok(mut next) = substrate::expected_next_batch(serai, network).await else {\n          return false;\n        };\n\n        // Since we have a new batch, publish all batches yet to be published to Serai\n        // This handles the edge-case where batch n+1 is signed before batch n is\n        let mut batches = VecDeque::new();\n        while let Some(batch) = BatchDb::get(&txn, network, next) {\n          batches.push_back(batch);\n          next += 1;\n        }\n\n        while let Some(batch) = batches.pop_front() {\n          // If this Batch should no longer be published, continue\n          let Ok(expected_next_batch) = substrate::expected_next_batch(serai, network).await else {\n            return false;\n          };\n          if expected_next_batch > batch.batch.id {\n            continue;\n          }\n\n          let tx = SeraiInInstructions::execute_batch(batch.clone());\n          log::debug!(\"attempting to publish batch {:?} {}\", batch.batch.network, batch.batch.id,);\n          // This publish may fail if this transactions already exists in the mempool, which is\n          // possible, or if this batch was already executed on-chain\n          // Either case will have eventual resolution and be handled by the above check on if\n          // this batch should execute\n          let res = serai.publish(&tx).await;\n          if res.is_ok() {\n            log::info!(\n              \"published batch {network:?} {} (block {})\",\n              batch.batch.id,\n              hex::encode(batch.batch.block),\n            );\n          } else {\n            log::debug!(\n              \"couldn't publish batch {:?} {}: {:?}\",\n              batch.batch.network,\n              batch.batch.id,\n              res,\n            );\n            // If we failed to publish it, restore it\n            batches.push_front(batch);\n            // Sleep for a few seconds before retrying to prevent hammering the node\n            sleep(Duration::from_secs(5)).await;\n          }\n        }\n\n        None\n      }\n    },\n  };\n\n  // If we have a relevant Tributary, check it's actually still relevant and has yet to be retired\n  if let Some(relevant_tributary_value) = relevant_tributary {\n    if RetiredTributaryDb::get(\n      &txn,\n      ExternalValidatorSet { network: msg.network, session: relevant_tributary_value },\n    )\n    .is_some()\n    {\n      relevant_tributary = None;\n    }\n  }\n\n  // If there's a relevant Tributary...\n  if let Some(relevant_tributary) = relevant_tributary {\n    // Make sure we have it\n    // Per the reasoning above, we only return a Tributary as relevant if we're a participant\n    // Accordingly, we do *need* to have this Tributary now to handle it UNLESS the Tributary has\n    // already completed and this is simply an old message (which we prior checked)\n    let Some(ActiveTributary { spec, tributary }) = tributaries.get(&relevant_tributary) else {\n      // Since we don't, sleep for a fraction of a second and return false, signaling we didn't\n      // handle this message\n      // At the start of the loop which calls this function, we'll check for new tributaries,\n      // making this eventually resolve\n      sleep(Duration::from_millis(100)).await;\n      return false;\n    };\n\n    let genesis = spec.genesis();\n    let pub_key = Ristretto::generator() * key.deref();\n\n    let txs = match msg.msg.clone() {\n      ProcessorMessage::KeyGen(inner_msg) => match inner_msg {\n        key_gen::ProcessorMessage::Commitments { id, commitments } => {\n          vec![Transaction::DkgCommitments {\n            attempt: id.attempt,\n            commitments,\n            signed: Transaction::empty_signed(),\n          }]\n        }\n        key_gen::ProcessorMessage::InvalidCommitments { id, faulty } => {\n          // This doesn't have guaranteed timing\n          //\n          // While the party *should* be fatally slashed and not included in future attempts,\n          // they'll actually be fatally slashed (assuming liveness before the Tributary retires)\n          // and not included in future attempts *which begin after the latency window completes*\n          let participant = spec\n            .reverse_lookup_i(\n              &crate::tributary::removed_as_of_dkg_attempt(&txn, spec.genesis(), id.attempt)\n                .expect(\"participating in DKG attempt yet we didn't save who was removed\"),\n              faulty,\n            )\n            .unwrap();\n          vec![Transaction::RemoveParticipantDueToDkg {\n            participant,\n            signed: Transaction::empty_signed(),\n          }]\n        }\n        key_gen::ProcessorMessage::Shares { id, mut shares } => {\n          // Create a MuSig-based machine to inform Substrate of this key generation\n          let nonces = crate::tributary::dkg_confirmation_nonces(key, spec, &mut txn, id.attempt);\n\n          let removed = crate::tributary::removed_as_of_dkg_attempt(&txn, genesis, id.attempt)\n            .expect(\"participating in a DKG attempt yet we didn't track who was removed yet?\");\n          let our_i = spec\n            .i(&removed, pub_key)\n            .expect(\"processor message to DKG for an attempt we aren't a validator in\");\n\n          // `tx_shares` needs to be done here as while it can be serialized from the HashMap\n          // without further context, it can't be deserialized without context\n          let mut tx_shares = Vec::with_capacity(shares.len());\n          for shares in &mut shares {\n            tx_shares.push(vec![]);\n            for i in 1 ..= spec.n(&removed) {\n              let i = Participant::new(i).unwrap();\n              if our_i.contains(&i) {\n                if shares.contains_key(&i) {\n                  panic!(\"processor sent us our own shares\");\n                }\n                continue;\n              }\n              tx_shares.last_mut().unwrap().push(\n                shares.remove(&i).expect(\"processor didn't send share for another validator\"),\n              );\n            }\n          }\n\n          vec![Transaction::DkgShares {\n            attempt: id.attempt,\n            shares: tx_shares,\n            confirmation_nonces: nonces,\n            signed: Transaction::empty_signed(),\n          }]\n        }\n        key_gen::ProcessorMessage::InvalidShare { id, accuser, faulty, blame } => {\n          vec![Transaction::InvalidDkgShare {\n            attempt: id.attempt,\n            accuser,\n            faulty,\n            blame,\n            signed: Transaction::empty_signed(),\n          }]\n        }\n        key_gen::ProcessorMessage::GeneratedKeyPair { id, substrate_key, network_key } => {\n          // TODO2: Check the KeyGenId fields\n\n          // Tell the Tributary the key pair, get back the share for the MuSig signature\n          let share = crate::tributary::generated_key_pair::<D>(\n            &mut txn,\n            key,\n            spec,\n            &KeyPair(Public::from(substrate_key), network_key.try_into().unwrap()),\n            id.attempt,\n          );\n\n          // TODO: Move this into generated_key_pair?\n          match share {\n            Ok(share) => {\n              vec![Transaction::DkgConfirmed {\n                attempt: id.attempt,\n                confirmation_share: share,\n                signed: Transaction::empty_signed(),\n              }]\n            }\n            Err(p) => {\n              let participant = spec\n                .reverse_lookup_i(\n                  &crate::tributary::removed_as_of_dkg_attempt(&txn, spec.genesis(), id.attempt)\n                    .expect(\"participating in DKG attempt yet we didn't save who was removed\"),\n                  p,\n                )\n                .unwrap();\n              vec![Transaction::RemoveParticipantDueToDkg {\n                participant,\n                signed: Transaction::empty_signed(),\n              }]\n            }\n          }\n        }\n        key_gen::ProcessorMessage::Blame { id, participant } => {\n          let participant = spec\n            .reverse_lookup_i(\n              &crate::tributary::removed_as_of_dkg_attempt(&txn, spec.genesis(), id.attempt)\n                .expect(\"participating in DKG attempt yet we didn't save who was removed\"),\n              participant,\n            )\n            .unwrap();\n          vec![Transaction::RemoveParticipantDueToDkg {\n            participant,\n            signed: Transaction::empty_signed(),\n          }]\n        }\n      },\n      ProcessorMessage::Sign(msg) => match msg {\n        sign::ProcessorMessage::InvalidParticipant { .. } => {\n          // TODO: Locally increase slash points to maximum (distinct from an explicitly fatal\n          // slash) and censor transactions (yet don't explicitly ban)\n          vec![]\n        }\n        sign::ProcessorMessage::Preprocess { id, preprocesses } => {\n          if id.attempt == 0 {\n            FirstPreprocessDb::save_first_preprocess(\n              &mut txn,\n              network,\n              RecognizedIdType::Plan,\n              &id.id,\n              &preprocesses,\n            );\n\n            vec![]\n          } else {\n            vec![Transaction::Sign(SignData {\n              plan: id.id,\n              attempt: id.attempt,\n              label: Label::Preprocess,\n              data: preprocesses,\n              signed: Transaction::empty_signed(),\n            })]\n          }\n        }\n        sign::ProcessorMessage::Share { id, shares } => {\n          vec![Transaction::Sign(SignData {\n            plan: id.id,\n            attempt: id.attempt,\n            label: Label::Share,\n            data: shares,\n            signed: Transaction::empty_signed(),\n          })]\n        }\n        sign::ProcessorMessage::Completed { session: _, id, tx } => {\n          let r = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng));\n          #[allow(non_snake_case)]\n          let R = <Ristretto as Ciphersuite>::generator() * r.deref();\n          let mut tx = Transaction::SignCompleted {\n            plan: id,\n            tx_hash: tx,\n            first_signer: pub_key,\n            signature: SchnorrSignature { R, s: <Ristretto as Ciphersuite>::F::ZERO },\n          };\n          let signed = SchnorrSignature::sign(key, r, tx.sign_completed_challenge());\n          match &mut tx {\n            Transaction::SignCompleted { signature, .. } => {\n              *signature = signed;\n            }\n            _ => unreachable!(),\n          }\n          vec![tx]\n        }\n      },\n      ProcessorMessage::Coordinator(inner_msg) => match inner_msg {\n        coordinator::ProcessorMessage::SubstrateBlockAck { .. } => unreachable!(),\n        coordinator::ProcessorMessage::InvalidParticipant { .. } => {\n          // TODO: Locally increase slash points to maximum (distinct from an explicitly fatal\n          // slash) and censor transactions (yet don't explicitly ban)\n          vec![]\n        }\n        coordinator::ProcessorMessage::CosignPreprocess { id, preprocesses } |\n        coordinator::ProcessorMessage::SlashReportPreprocess { id, preprocesses } => {\n          vec![Transaction::SubstrateSign(SignData {\n            plan: id.id,\n            attempt: id.attempt,\n            label: Label::Preprocess,\n            data: preprocesses.into_iter().map(Into::into).collect(),\n            signed: Transaction::empty_signed(),\n          })]\n        }\n        coordinator::ProcessorMessage::BatchPreprocess { id, block, preprocesses } => {\n          log::info!(\n            \"informed of batch (sign ID {}, attempt {}) for block {}\",\n            hex::encode(id.id.encode()),\n            id.attempt,\n            hex::encode(block),\n          );\n\n          // If this is the first attempt instance, wait until we synchronize around the batch\n          // first\n          if id.attempt == 0 {\n            FirstPreprocessDb::save_first_preprocess(\n              &mut txn,\n              spec.set().network,\n              RecognizedIdType::Batch,\n              &{\n                let SubstrateSignableId::Batch(id) = id.id else {\n                  panic!(\"BatchPreprocess SubstrateSignableId wasn't Batch\")\n                };\n                id.to_le_bytes()\n              },\n              &preprocesses.into_iter().map(Into::into).collect::<Vec<_>>(),\n            );\n\n            let intended = Transaction::Batch {\n              block: block.0,\n              batch: match id.id {\n                SubstrateSignableId::Batch(id) => id,\n                _ => panic!(\"BatchPreprocess did not contain Batch ID\"),\n              },\n            };\n\n            // If this is the new key's first Batch, only create this TX once we verify all\n            // all prior published `Batch`s\n            // TODO: This assumes BatchPreprocess is immediately after Batch\n            // Ensure that assumption\n            let last_received = LastReceivedBatchDb::get(&txn, msg.network).unwrap();\n            let handover_batch = HandoverBatchDb::get(&txn, spec.set());\n            let mut queue = false;\n            if let Some(handover_batch) = handover_batch {\n              // There is a race condition here. We may verify all `Batch`s from the prior set,\n              // start signing the handover `Batch` `n`, start signing `n+1`, have `n+1` signed\n              // before `n` (or at the same time), yet then the prior set forges a malicious\n              // `Batch` `n`.\n              //\n              // The malicious `Batch` `n` would be publishable to Serai, as Serai can't\n              // distinguish what's intended to be a handover `Batch`, yet then anyone could\n              // publish the new set's `n+1`, causing their acceptance of the handover.\n              //\n              // To fix this, if this is after the handover `Batch` and we have yet to verify\n              // publication of the handover `Batch`, don't yet yield the provided.\n              if last_received > handover_batch {\n                if let Some(last_verified) = LastVerifiedBatchDb::get(&txn, msg.network) {\n                  if last_verified < handover_batch {\n                    queue = true;\n                  }\n                } else {\n                  queue = true;\n                }\n              }\n            } else {\n              HandoverBatchDb::set_handover_batch(&mut txn, spec.set(), last_received);\n              // If this isn't the first batch, meaning we do have to verify all prior batches, and\n              // the prior Batch hasn't been verified yet...\n              if (last_received != 0) &&\n                LastVerifiedBatchDb::get(&txn, msg.network)\n                  .map_or(true, |last_verified| last_verified < (last_received - 1))\n              {\n                // Withhold this TX until we verify all prior `Batch`s\n                queue = true;\n              }\n            }\n\n            if queue {\n              QueuedBatchesDb::queue(&mut txn, spec.set(), &intended);\n              vec![]\n            } else {\n              // Because this is post-verification of the handover batch, take all queued `Batch`s\n              // now to ensure we don't provide this before an already queued Batch\n              // This *may* be an unreachable case due to how last_verified_batch is set, yet it\n              // doesn't hurt to have as a defensive pattern\n              let mut res = QueuedBatchesDb::take(&mut txn, spec.set());\n              res.push(intended);\n              res\n            }\n          } else {\n            vec![Transaction::SubstrateSign(SignData {\n              plan: id.id,\n              attempt: id.attempt,\n              label: Label::Preprocess,\n              data: preprocesses.into_iter().map(Into::into).collect(),\n              signed: Transaction::empty_signed(),\n            })]\n          }\n        }\n        coordinator::ProcessorMessage::SubstrateShare { id, shares } => {\n          vec![Transaction::SubstrateSign(SignData {\n            plan: id.id,\n            attempt: id.attempt,\n            label: Label::Share,\n            data: shares.into_iter().map(|share| share.to_vec()).collect(),\n            signed: Transaction::empty_signed(),\n          })]\n        }\n        #[allow(clippy::match_same_arms)] // Allowed to preserve layout\n        coordinator::ProcessorMessage::CosignedBlock { .. } => unreachable!(),\n        #[allow(clippy::match_same_arms)]\n        coordinator::ProcessorMessage::SignedSlashReport { .. } => unreachable!(),\n      },\n      ProcessorMessage::Substrate(inner_msg) => match inner_msg {\n        processor_messages::substrate::ProcessorMessage::Batch { .. } |\n        processor_messages::substrate::ProcessorMessage::SignedBatch { .. } => unreachable!(),\n      },\n    };\n\n    // If this created transactions, publish them\n    for mut tx in txs {\n      log::trace!(\"processor message effected transaction {} {:?}\", hex::encode(tx.hash()), &tx);\n\n      match tx.kind() {\n        TransactionKind::Provided(_) => {\n          log::trace!(\"providing transaction {}\", hex::encode(tx.hash()));\n          let res = tributary.provide_transaction(tx.clone()).await;\n          if !(res.is_ok() || (res == Err(ProvidedError::AlreadyProvided))) {\n            if res == Err(ProvidedError::LocalMismatchesOnChain) {\n              // Spin, since this is a crit for this Tributary\n              loop {\n                log::error!(\n                  \"{}. tributary: {}, provided: {:?}\",\n                  \"tributary added distinct provided to delayed locally provided TX\",\n                  hex::encode(spec.genesis()),\n                  &tx,\n                );\n                sleep(Duration::from_secs(60)).await;\n              }\n            }\n            panic!(\"provided an invalid transaction: {res:?}\");\n          }\n        }\n        TransactionKind::Unsigned => {\n          log::trace!(\"publishing unsigned transaction {}\", hex::encode(tx.hash()));\n          match tributary.add_transaction(tx.clone()).await {\n            Ok(_) => {}\n            Err(e) => panic!(\"created an invalid unsigned transaction: {e:?}\"),\n          }\n        }\n        TransactionKind::Signed(_, _) => {\n          tx.sign(&mut OsRng, genesis, key);\n          tributary::publish_signed_transaction(&mut txn, tributary, tx).await;\n        }\n      }\n    }\n  }\n\n  HandledMessageDb::set(&mut txn, msg.network, &msg.id);\n  txn.commit();\n\n  true\n}\n\n#[allow(clippy::too_many_arguments)]\nasync fn handle_processor_messages<D: Db, Pro: Processors, P: P2p>(\n  mut db: D,\n  key: Zeroizing<<Ristretto as Ciphersuite>::F>,\n  serai: Arc<Serai>,\n  processors: Pro,\n  p2p: P,\n  cosign_channel: mpsc::UnboundedSender<CosignedBlock>,\n  network: ExternalNetworkId,\n  mut tributary_event: mpsc::UnboundedReceiver<TributaryEvent<D, P>>,\n) {\n  let mut tributaries = HashMap::new();\n  loop {\n    match tributary_event.try_recv() {\n      Ok(event) => match event {\n        TributaryEvent::NewTributary(tributary) => {\n          let set = tributary.spec.set();\n          assert_eq!(set.network, network);\n          tributaries.insert(set.session, tributary);\n        }\n        TributaryEvent::TributaryRetired(set) => {\n          tributaries.remove(&set.session);\n        }\n      },\n      Err(mpsc::error::TryRecvError::Empty) => {}\n      Err(mpsc::error::TryRecvError::Disconnected) => {\n        panic!(\"handle_processor_messages tributary_event sender closed\")\n      }\n    }\n\n    // TODO: Check this ID is sane (last handled ID or expected next ID)\n    let Ok(msg) = tokio::time::timeout(Duration::from_secs(1), processors.recv(network)).await\n    else {\n      continue;\n    };\n    log::trace!(\"entering handle_processor_message for {:?}\", network);\n    if handle_processor_message(\n      &mut db,\n      &key,\n      &serai,\n      &p2p,\n      &cosign_channel,\n      &tributaries,\n      network,\n      &msg,\n    )\n    .await\n    {\n      processors.ack(msg).await;\n    }\n    log::trace!(\"exited handle_processor_message for {:?}\", network);\n  }\n}\n\n#[allow(clippy::too_many_arguments)]\nasync fn handle_cosigns_and_batch_publication<D: Db, P: P2p>(\n  mut db: D,\n  network: ExternalNetworkId,\n  mut tributary_event: mpsc::UnboundedReceiver<TributaryEvent<D, P>>,\n) {\n  let mut tributaries = HashMap::new();\n  'outer: loop {\n    // TODO: Create a better async flow for this\n    tokio::time::sleep(core::time::Duration::from_millis(100)).await;\n\n    match tributary_event.try_recv() {\n      Ok(event) => match event {\n        TributaryEvent::NewTributary(tributary) => {\n          let set = tributary.spec.set();\n          assert_eq!(set.network, network);\n          tributaries.insert(set.session, tributary);\n        }\n        TributaryEvent::TributaryRetired(set) => {\n          tributaries.remove(&set.session);\n        }\n      },\n      Err(mpsc::error::TryRecvError::Empty) => {}\n      Err(mpsc::error::TryRecvError::Disconnected) => {\n        panic!(\"handle_processor_messages tributary_event sender closed\")\n      }\n    }\n\n    // Handle pending cosigns\n    {\n      let mut txn = db.txn();\n      while let Some((session, block, hash)) = CosignTransactions::try_recv(&mut txn, network) {\n        let Some(ActiveTributary { spec, tributary }) = tributaries.get(&session) else {\n          log::warn!(\"didn't yet have tributary we're supposed to cosign with\");\n          break;\n        };\n        log::info!(\n          \"{network:?} {session:?} cosigning block #{block} (hash {}...)\",\n          hex::encode(&hash[.. 8])\n        );\n        let tx = Transaction::CosignSubstrateBlock(hash);\n        let res = tributary.provide_transaction(tx.clone()).await;\n        if !(res.is_ok() || (res == Err(ProvidedError::AlreadyProvided))) {\n          if res == Err(ProvidedError::LocalMismatchesOnChain) {\n            // Spin, since this is a crit for this Tributary\n            loop {\n              log::error!(\n                \"{}. tributary: {}, provided: {:?}\",\n                \"tributary added distinct CosignSubstrateBlock\",\n                hex::encode(spec.genesis()),\n                &tx,\n              );\n              sleep(Duration::from_secs(60)).await;\n            }\n          }\n          panic!(\"provided an invalid CosignSubstrateBlock: {res:?}\");\n        }\n      }\n      txn.commit();\n    }\n\n    // Verify any publifshed `Batch`s\n    {\n      let _hvq_lock = HANDOVER_VERIFY_QUEUE_LOCK.get_or_init(|| Mutex::new(())).lock().await;\n      let mut txn = db.txn();\n      let mut to_publish = vec![];\n      let start_id =\n        LastVerifiedBatchDb::get(&txn, network).map_or(0, |already_verified| already_verified + 1);\n      if let Some(last_id) =\n        substrate::verify_published_batches::<D>(&mut txn, network, u32::MAX).await\n      {\n        // Check if any of these `Batch`s were a handover `Batch` or the `Batch` before a handover\n        // `Batch`\n        // If so, we need to publish queued provided `Batch` transactions\n        for batch in start_id ..= last_id {\n          let is_pre_handover = LookupHandoverBatchDb::get(&txn, network, batch + 1);\n          if let Some(session) = is_pre_handover {\n            let set = ExternalValidatorSet { network, session };\n            let mut queued = QueuedBatchesDb::take(&mut txn, set);\n            // is_handover_batch is only set for handover `Batch`s we're participating in, making\n            // this safe\n            if queued.is_empty() {\n              panic!(\"knew the next Batch was a handover yet didn't queue it\");\n            }\n\n            // Only publish the handover Batch\n            to_publish.push((set.session, queued.remove(0)));\n            // Re-queue the remaining batches\n            for remaining in queued {\n              QueuedBatchesDb::queue(&mut txn, set, &remaining);\n            }\n          }\n\n          let is_handover = LookupHandoverBatchDb::get(&txn, network, batch);\n          if let Some(session) = is_handover {\n            for queued in QueuedBatchesDb::take(&mut txn, ExternalValidatorSet { network, session })\n            {\n              to_publish.push((session, queued));\n            }\n          }\n        }\n      }\n\n      for (session, tx) in to_publish {\n        let Some(ActiveTributary { spec, tributary }) = tributaries.get(&session) else {\n          log::warn!(\"didn't yet have tributary we're supposed to provide a queued Batch for\");\n          // Safe since this will drop the txn updating the most recently queued batch\n          continue 'outer;\n        };\n        log::debug!(\"providing Batch transaction {:?}\", &tx);\n        let res = tributary.provide_transaction(tx.clone()).await;\n        if !(res.is_ok() || (res == Err(ProvidedError::AlreadyProvided))) {\n          if res == Err(ProvidedError::LocalMismatchesOnChain) {\n            // Spin, since this is a crit for this Tributary\n            loop {\n              log::error!(\n                \"{}. tributary: {}, provided: {:?}\",\n                \"tributary added distinct Batch\",\n                hex::encode(spec.genesis()),\n                &tx,\n              );\n              sleep(Duration::from_secs(60)).await;\n            }\n          }\n          panic!(\"provided an invalid Batch: {res:?}\");\n        }\n      }\n\n      txn.commit();\n    }\n  }\n}\n\npub async fn handle_processors<D: Db, Pro: Processors, P: P2p>(\n  db: D,\n  key: Zeroizing<<Ristretto as Ciphersuite>::F>,\n  serai: Arc<Serai>,\n  processors: Pro,\n  p2p: P,\n  cosign_channel: mpsc::UnboundedSender<CosignedBlock>,\n  mut tributary_event: broadcast::Receiver<TributaryEvent<D, P>>,\n) {\n  let mut channels = HashMap::new();\n  for network in serai_client::primitives::EXTERNAL_NETWORKS {\n    let (processor_send, processor_recv) = mpsc::unbounded_channel();\n    tokio::spawn(handle_processor_messages(\n      db.clone(),\n      key.clone(),\n      serai.clone(),\n      processors.clone(),\n      p2p.clone(),\n      cosign_channel.clone(),\n      network,\n      processor_recv,\n    ));\n    let (cosign_send, cosign_recv) = mpsc::unbounded_channel();\n    tokio::spawn(handle_cosigns_and_batch_publication(db.clone(), network, cosign_recv));\n    channels.insert(network, (processor_send, cosign_send));\n  }\n\n  // Listen to new tributary events\n  loop {\n    match tributary_event.recv().await.unwrap() {\n      TributaryEvent::NewTributary(tributary) => {\n        let (c1, c2) = &channels[&tributary.spec.set().network];\n        c1.send(TributaryEvent::NewTributary(tributary.clone())).unwrap();\n        c2.send(TributaryEvent::NewTributary(tributary)).unwrap();\n      }\n      TributaryEvent::TributaryRetired(set) => {\n        let (c1, c2) = &channels[&set.network];\n        c1.send(TributaryEvent::TributaryRetired(set)).unwrap();\n        c2.send(TributaryEvent::TributaryRetired(set)).unwrap();\n      }\n    };\n  }\n}\n\npub async fn run<D: Db, Pro: Processors, P: P2p>(\n  raw_db: D,\n  key: Zeroizing<<Ristretto as Ciphersuite>::F>,\n  p2p: P,\n  processors: Pro,\n  serai: Arc<Serai>,\n) {\n  let (new_tributary_spec_send, mut new_tributary_spec_recv) = mpsc::unbounded_channel();\n  // Reload active tributaries from the database\n  for spec in ActiveTributaryDb::active_tributaries(&raw_db).1 {\n    new_tributary_spec_send.send(spec).unwrap();\n  }\n\n  let (perform_slash_report_send, mut perform_slash_report_recv) = mpsc::unbounded_channel();\n\n  let (tributary_retired_send, mut tributary_retired_recv) = mpsc::unbounded_channel();\n\n  // Handle new Substrate blocks\n  tokio::spawn(crate::substrate::scan_task(\n    raw_db.clone(),\n    key.clone(),\n    processors.clone(),\n    serai.clone(),\n    new_tributary_spec_send,\n    perform_slash_report_send,\n    tributary_retired_send,\n  ));\n\n  // Handle the Tributaries\n\n  // This should be large enough for an entire rotation of all tributaries\n  // If it's too small, the coordinator fail to boot, which is a decent sanity check\n  let (tributary_event, mut tributary_event_listener_1) = broadcast::channel(32);\n  let tributary_event_listener_2 = tributary_event.subscribe();\n  let tributary_event_listener_3 = tributary_event.subscribe();\n  let tributary_event_listener_4 = tributary_event.subscribe();\n  let tributary_event_listener_5 = tributary_event.subscribe();\n\n  // Emit TributaryEvent::TributaryRetired\n  tokio::spawn({\n    let tributary_event = tributary_event.clone();\n    async move {\n      loop {\n        let retired = tributary_retired_recv.recv().await.unwrap();\n        tributary_event.send(TributaryEvent::TributaryRetired(retired)).map_err(|_| ()).unwrap();\n      }\n    }\n  });\n\n  // Spawn a task to further add Tributaries as needed\n  tokio::spawn({\n    let raw_db = raw_db.clone();\n    let key = key.clone();\n    let processors = processors.clone();\n    let p2p = p2p.clone();\n    async move {\n      loop {\n        let spec = new_tributary_spec_recv.recv().await.unwrap();\n        // Uses an inner task as Tributary::new may take several seconds\n        tokio::spawn({\n          let raw_db = raw_db.clone();\n          let key = key.clone();\n          let processors = processors.clone();\n          let p2p = p2p.clone();\n          let tributary_event = tributary_event.clone();\n          async move {\n            add_tributary(raw_db, key, &processors, p2p, &tributary_event, spec).await;\n          }\n        });\n      }\n    }\n  });\n\n  // When we reach synchrony on an event requiring signing, send our preprocess for it\n  // TODO: Properly place this into the Tributary scanner, as it's a mess out here\n  let recognized_id = {\n    let raw_db = raw_db.clone();\n    let key = key.clone();\n\n    let specs = Arc::new(RwLock::new(HashMap::new()));\n    let tributaries = Arc::new(RwLock::new(HashMap::new()));\n    // Spawn a task to maintain a local view of the tributaries for whenever recognized_id is\n    // called\n    tokio::spawn({\n      let specs = specs.clone();\n      let tributaries = tributaries.clone();\n      let mut set_to_genesis = HashMap::new();\n      async move {\n        loop {\n          match tributary_event_listener_1.recv().await {\n            Ok(TributaryEvent::NewTributary(tributary)) => {\n              set_to_genesis.insert(tributary.spec.set(), tributary.spec.genesis());\n              tributaries.write().await.insert(tributary.spec.genesis(), tributary.tributary);\n              specs.write().await.insert(tributary.spec.set(), tributary.spec);\n            }\n            Ok(TributaryEvent::TributaryRetired(set)) => {\n              if let Some(genesis) = set_to_genesis.remove(&set) {\n                specs.write().await.remove(&set);\n                tributaries.write().await.remove(&genesis);\n              }\n            }\n            Err(broadcast::error::RecvError::Lagged(_)) => {\n              panic!(\"recognized_id lagged to handle tributary_event\")\n            }\n            Err(broadcast::error::RecvError::Closed) => panic!(\"tributary_event sender closed\"),\n          }\n        }\n      }\n    });\n\n    // Also spawn a task to handle slash reports, as this needs such a view of tributaries\n    tokio::spawn({\n      let mut raw_db = raw_db.clone();\n      let key = key.clone();\n      let tributaries = tributaries.clone();\n      async move {\n        'task_loop: loop {\n          match perform_slash_report_recv.recv().await {\n            Some(set) => {\n              let (genesis, validators) = loop {\n                let specs = specs.read().await;\n                let Some(spec) = specs.get(&set) else {\n                  // If we don't have this Tributary because it's retired, break and move on\n                  if RetiredTributaryDb::get(&raw_db, set).is_some() {\n                    continue 'task_loop;\n                  }\n\n                  // This may happen if the task above is simply slow\n                  log::warn!(\"tributary we don't have yet is supposed to perform a slash report\");\n                  continue;\n                };\n                break (spec.genesis(), spec.validators());\n              };\n\n              let mut slashes = vec![];\n              for (validator, _) in validators {\n                if validator == (<Ristretto as Ciphersuite>::generator() * key.deref()) {\n                  continue;\n                }\n                let validator = validator.to_bytes();\n\n                let fatally = tributary::FatallySlashed::get(&raw_db, genesis, validator).is_some();\n                // TODO: Properly type this\n                let points = if fatally {\n                  u32::MAX\n                } else {\n                  tributary::SlashPoints::get(&raw_db, genesis, validator).unwrap_or(0)\n                };\n                slashes.push(points);\n              }\n\n              let mut tx = Transaction::SlashReport(slashes, Transaction::empty_signed());\n              tx.sign(&mut OsRng, genesis, &key);\n\n              let mut first = true;\n              loop {\n                if !first {\n                  sleep(Duration::from_millis(100)).await;\n                }\n                first = false;\n\n                let tributaries = tributaries.read().await;\n                let Some(tributary) = tributaries.get(&genesis) else {\n                  // If we don't have this Tributary because it's retired, break and move on\n                  if RetiredTributaryDb::get(&raw_db, set).is_some() {\n                    break;\n                  }\n\n                  // This may happen if the task above is simply slow\n                  log::warn!(\"tributary we don't have yet is supposed to perform a slash report\");\n                  continue;\n                };\n                // This is safe to perform multiple times and solely needs atomicity with regards\n                // to itself\n                // TODO: Should this not take a txn accordingly? It's best practice to take a txn,\n                // yet taking a txn fails to declare its achieved independence\n                let mut txn = raw_db.txn();\n                tributary::publish_signed_transaction(&mut txn, tributary, tx).await;\n                txn.commit();\n                break;\n              }\n            }\n            None => panic!(\"perform slash report sender closed\"),\n          }\n        }\n      }\n    });\n\n    move |set: ExternalValidatorSet, genesis, id_type, id: Vec<u8>| {\n      log::debug!(\"recognized ID {:?} {}\", id_type, hex::encode(&id));\n      let mut raw_db = raw_db.clone();\n      let key = key.clone();\n      let tributaries = tributaries.clone();\n      async move {\n        // The transactions for these are fired before the preprocesses are actually\n        // received/saved, creating a race between Tributary ack and the availability of all\n        // Preprocesses\n        // This waits until the necessary preprocess is available 0,\n        let get_preprocess = |raw_db, id_type, id| async move {\n          loop {\n            let Some(preprocess) = FirstPreprocessDb::get(raw_db, set.network, id_type, id) else {\n              log::warn!(\"waiting for preprocess for recognized ID\");\n              sleep(Duration::from_millis(100)).await;\n              continue;\n            };\n            return preprocess;\n          }\n        };\n\n        let mut tx = match id_type {\n          RecognizedIdType::Batch => Transaction::SubstrateSign(SignData {\n            data: get_preprocess(&raw_db, id_type, &id).await,\n            plan: SubstrateSignableId::Batch(u32::from_le_bytes(id.try_into().unwrap())),\n            label: Label::Preprocess,\n            attempt: 0,\n            signed: Transaction::empty_signed(),\n          }),\n\n          RecognizedIdType::Plan => Transaction::Sign(SignData {\n            data: get_preprocess(&raw_db, id_type, &id).await,\n            plan: id.try_into().unwrap(),\n            label: Label::Preprocess,\n            attempt: 0,\n            signed: Transaction::empty_signed(),\n          }),\n        };\n\n        tx.sign(&mut OsRng, genesis, &key);\n\n        let mut first = true;\n        loop {\n          if !first {\n            sleep(Duration::from_millis(100)).await;\n          }\n          first = false;\n\n          let tributaries = tributaries.read().await;\n          let Some(tributary) = tributaries.get(&genesis) else {\n            // If we don't have this Tributary because it's retired, break and move on\n            if RetiredTributaryDb::get(&raw_db, set).is_some() {\n              break;\n            }\n\n            // This may happen if the task above is simply slow\n            log::warn!(\"tributary we don't have yet came to consensus on an Batch\");\n            continue;\n          };\n          // This is safe to perform multiple times and solely needs atomicity with regards to\n          // itself\n          // TODO: Should this not take a txn accordingly? It's best practice to take a txn, yet\n          // taking a txn fails to declare its achieved independence\n          let mut txn = raw_db.txn();\n          tributary::publish_signed_transaction(&mut txn, tributary, tx).await;\n          txn.commit();\n          break;\n        }\n      }\n    }\n  };\n\n  // Handle new blocks for each Tributary\n  {\n    let raw_db = raw_db.clone();\n    tokio::spawn(tributary::scanner::scan_tributaries_task(\n      raw_db,\n      key.clone(),\n      recognized_id,\n      processors.clone(),\n      serai.clone(),\n      tributary_event_listener_2,\n    ));\n  }\n\n  // Spawn the heartbeat task, which will trigger syncing if there hasn't been a Tributary block\n  // in a while (presumably because we're behind)\n  tokio::spawn(p2p::heartbeat_tributaries_task(p2p.clone(), tributary_event_listener_3));\n\n  // Create the Cosign evaluator\n  let cosign_channel = CosignEvaluator::new(raw_db.clone(), p2p.clone(), serai.clone());\n\n  // Handle P2P messages\n  tokio::spawn(p2p::handle_p2p_task(\n    p2p.clone(),\n    cosign_channel.clone(),\n    tributary_event_listener_4,\n  ));\n\n  // Handle all messages from processors\n  handle_processors(\n    raw_db,\n    key,\n    serai,\n    processors,\n    p2p,\n    cosign_channel,\n    tributary_event_listener_5,\n  )\n  .await;\n}\n\n#[tokio::main]\nasync fn main() {\n  // Override the panic handler with one which will panic if any tokio task panics\n  {\n    let existing = std::panic::take_hook();\n    std::panic::set_hook(Box::new(move |panic| {\n      existing(panic);\n      const MSG: &str = \"exiting the process due to a task panicking\";\n      println!(\"{MSG}\");\n      log::error!(\"{MSG}\");\n      std::process::exit(1);\n    }));\n  }\n\n  if std::env::var(\"RUST_LOG\").is_err() {\n    std::env::set_var(\"RUST_LOG\", serai_env::var(\"RUST_LOG\").unwrap_or_else(|| \"info\".to_string()));\n  }\n  env_logger::init();\n\n  log::info!(\"starting coordinator service...\");\n\n  #[allow(unused_variables, unreachable_code)]\n  let db = {\n    #[cfg(all(feature = \"parity-db\", feature = \"rocksdb\"))]\n    panic!(\"built with parity-db and rocksdb\");\n    #[cfg(all(feature = \"parity-db\", not(feature = \"rocksdb\")))]\n    let db =\n      serai_db::new_parity_db(&serai_env::var(\"DB_PATH\").expect(\"path to DB wasn't specified\"));\n    #[cfg(feature = \"rocksdb\")]\n    let db =\n      serai_db::new_rocksdb(&serai_env::var(\"DB_PATH\").expect(\"path to DB wasn't specified\"));\n    db\n  };\n\n  let key = {\n    let mut key_hex = serai_env::var(\"SERAI_KEY\").expect(\"Serai key wasn't provided\");\n    let mut key_vec = hex::decode(&key_hex).map_err(|_| ()).expect(\"Serai key wasn't hex-encoded\");\n    key_hex.zeroize();\n    if key_vec.len() != 32 {\n      key_vec.zeroize();\n      panic!(\"Serai key had an invalid length\");\n    }\n    let mut key_bytes = [0; 32];\n    key_bytes.copy_from_slice(&key_vec);\n    key_vec.zeroize();\n    let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::from_repr(key_bytes).unwrap());\n    key_bytes.zeroize();\n    key\n  };\n\n  let processors = Arc::new(MessageQueue::from_env(Service::Coordinator));\n\n  let serai = (async {\n    loop {\n      let Ok(serai) = Serai::new(format!(\n        \"http://{}:9944\",\n        serai_env::var(\"SERAI_HOSTNAME\").expect(\"Serai hostname wasn't provided\")\n      ))\n      .await\n      else {\n        log::error!(\"couldn't connect to the Serai node\");\n        sleep(Duration::from_secs(5)).await;\n        continue;\n      };\n      log::info!(\"made initial connection to Serai node\");\n      return Arc::new(serai);\n    }\n  })\n  .await;\n  let p2p = LibP2p::new(serai.clone());\n  run(db, key, p2p, processors, serai).await\n}\n"
  },
  {
    "path": "coordinator/src/p2p.rs",
    "content": "use core::{time::Duration, fmt};\nuse std::{\n  sync::Arc,\n  io::{self, Read},\n  collections::{HashSet, HashMap},\n  time::{SystemTime, Instant},\n};\n\nuse async_trait::async_trait;\nuse rand_core::{RngCore, OsRng};\n\nuse scale::{Decode, Encode};\nuse borsh::{BorshSerialize, BorshDeserialize};\nuse serai_client::{\n  primitives::ExternalNetworkId, validator_sets::primitives::ExternalValidatorSet, Serai,\n};\n\nuse serai_db::Db;\n\nuse futures_util::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, StreamExt};\nuse tokio::{\n  sync::{Mutex, RwLock, mpsc, broadcast},\n  time::sleep,\n};\n\nuse libp2p::{\n  core::multiaddr::{Protocol, Multiaddr},\n  identity::Keypair,\n  PeerId,\n  tcp::Config as TcpConfig,\n  noise, yamux,\n  request_response::{\n    Codec as RrCodecTrait, Message as RrMessage, Event as RrEvent, Config as RrConfig,\n    Behaviour as RrBehavior, ProtocolSupport,\n  },\n  gossipsub::{\n    IdentTopic, FastMessageId, MessageId, MessageAuthenticity, ValidationMode, ConfigBuilder,\n    IdentityTransform, AllowAllSubscriptionFilter, Event as GsEvent, PublishError,\n    Behaviour as GsBehavior,\n  },\n  swarm::{NetworkBehaviour, SwarmEvent},\n  SwarmBuilder,\n};\n\npub(crate) use tributary::{ReadWrite, P2p as TributaryP2p};\n\nuse crate::{Transaction, Block, Tributary, ActiveTributary, TributaryEvent};\n\n// Block size limit + 1 KB of space for signatures/metadata\nconst MAX_LIBP2P_GOSSIP_MESSAGE_SIZE: usize = tributary::BLOCK_SIZE_LIMIT + 1024;\n\nconst MAX_LIBP2P_REQRES_MESSAGE_SIZE: usize =\n  (tributary::BLOCK_SIZE_LIMIT * BLOCKS_PER_BATCH) + 1024;\n\nconst MAX_LIBP2P_MESSAGE_SIZE: usize = {\n  // Manual `max` since `max` isn't a const fn\n  if MAX_LIBP2P_GOSSIP_MESSAGE_SIZE > MAX_LIBP2P_REQRES_MESSAGE_SIZE {\n    MAX_LIBP2P_GOSSIP_MESSAGE_SIZE\n  } else {\n    MAX_LIBP2P_REQRES_MESSAGE_SIZE\n  }\n};\n\nconst LIBP2P_TOPIC: &str = \"serai-coordinator\";\n\n// Amount of blocks in a minute\nconst BLOCKS_PER_MINUTE: usize = (60 / (tributary::tendermint::TARGET_BLOCK_TIME / 1000)) as usize;\n\n// Maximum amount of blocks to send in a batch\nconst BLOCKS_PER_BATCH: usize = BLOCKS_PER_MINUTE + 1;\n\n#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, BorshSerialize, BorshDeserialize)]\npub struct CosignedBlock {\n  pub network: ExternalNetworkId,\n  pub block_number: u64,\n  pub block: [u8; 32],\n  pub signature: [u8; 64],\n}\n\n#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]\npub enum ReqResMessageKind {\n  KeepAlive,\n  Heartbeat([u8; 32]),\n  Block([u8; 32]),\n}\n\nimpl ReqResMessageKind {\n  pub fn read<R: Read>(reader: &mut R) -> Option<ReqResMessageKind> {\n    let mut kind = [0; 1];\n    reader.read_exact(&mut kind).ok()?;\n    match kind[0] {\n      0 => Some(ReqResMessageKind::KeepAlive),\n      1 => Some({\n        let mut genesis = [0; 32];\n        reader.read_exact(&mut genesis).ok()?;\n        ReqResMessageKind::Heartbeat(genesis)\n      }),\n      2 => Some({\n        let mut genesis = [0; 32];\n        reader.read_exact(&mut genesis).ok()?;\n        ReqResMessageKind::Block(genesis)\n      }),\n      _ => None,\n    }\n  }\n\n  pub fn serialize(&self) -> Vec<u8> {\n    match self {\n      ReqResMessageKind::KeepAlive => vec![0],\n      ReqResMessageKind::Heartbeat(genesis) => {\n        let mut res = vec![1];\n        res.extend(genesis);\n        res\n      }\n      ReqResMessageKind::Block(genesis) => {\n        let mut res = vec![2];\n        res.extend(genesis);\n        res\n      }\n    }\n  }\n}\n\n#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]\npub enum GossipMessageKind {\n  Tributary([u8; 32]),\n  CosignedBlock,\n}\n\nimpl GossipMessageKind {\n  pub fn read<R: Read>(reader: &mut R) -> Option<GossipMessageKind> {\n    let mut kind = [0; 1];\n    reader.read_exact(&mut kind).ok()?;\n    match kind[0] {\n      0 => Some({\n        let mut genesis = [0; 32];\n        reader.read_exact(&mut genesis).ok()?;\n        GossipMessageKind::Tributary(genesis)\n      }),\n      1 => Some(GossipMessageKind::CosignedBlock),\n      _ => None,\n    }\n  }\n\n  pub fn serialize(&self) -> Vec<u8> {\n    match self {\n      GossipMessageKind::Tributary(genesis) => {\n        let mut res = vec![0];\n        res.extend(genesis);\n        res\n      }\n      GossipMessageKind::CosignedBlock => {\n        vec![1]\n      }\n    }\n  }\n}\n\n#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]\npub enum P2pMessageKind {\n  ReqRes(ReqResMessageKind),\n  Gossip(GossipMessageKind),\n}\n\nimpl P2pMessageKind {\n  fn genesis(&self) -> Option<[u8; 32]> {\n    match self {\n      P2pMessageKind::ReqRes(ReqResMessageKind::KeepAlive) |\n      P2pMessageKind::Gossip(GossipMessageKind::CosignedBlock) => None,\n      P2pMessageKind::ReqRes(\n        ReqResMessageKind::Heartbeat(genesis) | ReqResMessageKind::Block(genesis),\n      ) |\n      P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => Some(*genesis),\n    }\n  }\n}\n\nimpl From<ReqResMessageKind> for P2pMessageKind {\n  fn from(kind: ReqResMessageKind) -> P2pMessageKind {\n    P2pMessageKind::ReqRes(kind)\n  }\n}\n\nimpl From<GossipMessageKind> for P2pMessageKind {\n  fn from(kind: GossipMessageKind) -> P2pMessageKind {\n    P2pMessageKind::Gossip(kind)\n  }\n}\n\n#[derive(Clone, Debug)]\npub struct Message<P: P2p> {\n  pub sender: P::Id,\n  pub kind: P2pMessageKind,\n  pub msg: Vec<u8>,\n}\n\n#[derive(Clone, Debug, Encode, Decode)]\npub struct BlockCommit {\n  pub block: Vec<u8>,\n  pub commit: Vec<u8>,\n}\n\n#[derive(Clone, Debug, Encode, Decode)]\npub struct HeartbeatBatch {\n  pub blocks: Vec<BlockCommit>,\n  pub timestamp: u64,\n}\n\n#[async_trait]\npub trait P2p: Send + Sync + Clone + fmt::Debug + TributaryP2p {\n  type Id: Send + Sync + Clone + Copy + fmt::Debug;\n\n  async fn subscribe(&self, set: ExternalValidatorSet, genesis: [u8; 32]);\n  async fn unsubscribe(&self, set: ExternalValidatorSet, genesis: [u8; 32]);\n\n  async fn send_raw(&self, to: Self::Id, msg: Vec<u8>);\n  async fn broadcast_raw(&self, kind: P2pMessageKind, msg: Vec<u8>);\n  async fn receive(&self) -> Message<Self>;\n\n  async fn send(&self, to: Self::Id, kind: ReqResMessageKind, msg: Vec<u8>) {\n    let mut actual_msg = kind.serialize();\n    actual_msg.extend(msg);\n    self.send_raw(to, actual_msg).await;\n  }\n  async fn broadcast(&self, kind: impl Send + Into<P2pMessageKind>, msg: Vec<u8>) {\n    let kind = kind.into();\n    let mut actual_msg = match kind {\n      P2pMessageKind::ReqRes(kind) => kind.serialize(),\n      P2pMessageKind::Gossip(kind) => kind.serialize(),\n    };\n    actual_msg.extend(msg);\n    /*\n    log::trace!(\n      \"broadcasting p2p message (kind {})\",\n      match kind {\n        P2pMessageKind::KeepAlive => \"KeepAlive\".to_string(),\n        P2pMessageKind::Tributary(genesis) => format!(\"Tributary({})\", hex::encode(genesis)),\n        P2pMessageKind::Heartbeat(genesis) => format!(\"Heartbeat({})\", hex::encode(genesis)),\n        P2pMessageKind::Block(genesis) => format!(\"Block({})\", hex::encode(genesis)),\n        P2pMessageKind::CosignedBlock => \"CosignedBlock\".to_string(),\n      }\n    );\n    */\n    self.broadcast_raw(kind, actual_msg).await;\n  }\n}\n\n#[derive(Default, Clone, Copy, PartialEq, Eq, Debug)]\nstruct RrCodec;\n#[async_trait]\nimpl RrCodecTrait for RrCodec {\n  type Protocol = &'static str;\n  type Request = Vec<u8>;\n  type Response = Vec<u8>;\n\n  async fn read_request<R: Send + Unpin + AsyncRead>(\n    &mut self,\n    _: &Self::Protocol,\n    io: &mut R,\n  ) -> io::Result<Vec<u8>> {\n    let mut len = [0; 4];\n    io.read_exact(&mut len).await?;\n    let len = usize::try_from(u32::from_le_bytes(len)).expect(\"not at least a 32-bit platform?\");\n    if len > MAX_LIBP2P_REQRES_MESSAGE_SIZE {\n      Err(io::Error::other(\"request length exceeded MAX_LIBP2P_REQRES_MESSAGE_SIZE\"))?;\n    }\n    // This may be a non-trivial allocation easily causable\n    // While we could chunk the read, meaning we only perform the allocation as bandwidth is used,\n    // the max message size should be sufficiently sane\n    let mut buf = vec![0; len];\n    io.read_exact(&mut buf).await?;\n    Ok(buf)\n  }\n  async fn read_response<R: Send + Unpin + AsyncRead>(\n    &mut self,\n    proto: &Self::Protocol,\n    io: &mut R,\n  ) -> io::Result<Vec<u8>> {\n    self.read_request(proto, io).await\n  }\n  async fn write_request<W: Send + Unpin + AsyncWrite>(\n    &mut self,\n    _: &Self::Protocol,\n    io: &mut W,\n    req: Vec<u8>,\n  ) -> io::Result<()> {\n    io.write_all(\n      &u32::try_from(req.len())\n        .map_err(|_| io::Error::other(\"request length exceeded 2**32\"))?\n        .to_le_bytes(),\n    )\n    .await?;\n    io.write_all(&req).await\n  }\n  async fn write_response<W: Send + Unpin + AsyncWrite>(\n    &mut self,\n    proto: &Self::Protocol,\n    io: &mut W,\n    res: Vec<u8>,\n  ) -> io::Result<()> {\n    self.write_request(proto, io, res).await\n  }\n}\n\n#[derive(NetworkBehaviour)]\nstruct Behavior {\n  reqres: RrBehavior<RrCodec>,\n  gossipsub: GsBehavior,\n}\n\n#[allow(clippy::type_complexity)]\n#[derive(Clone)]\npub struct LibP2p {\n  subscribe: Arc<Mutex<mpsc::UnboundedSender<(bool, ExternalValidatorSet, [u8; 32])>>>,\n  send: Arc<Mutex<mpsc::UnboundedSender<(PeerId, Vec<u8>)>>>,\n  broadcast: Arc<Mutex<mpsc::UnboundedSender<(P2pMessageKind, Vec<u8>)>>>,\n  receive: Arc<Mutex<mpsc::UnboundedReceiver<Message<Self>>>>,\n}\nimpl fmt::Debug for LibP2p {\n  fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {\n    fmt.debug_struct(\"LibP2p\").finish_non_exhaustive()\n  }\n}\n\nimpl LibP2p {\n  #[allow(clippy::new_without_default)]\n  pub fn new(serai: Arc<Serai>) -> Self {\n    log::info!(\"creating a libp2p instance\");\n\n    let throwaway_key_pair = Keypair::generate_ed25519();\n\n    let behavior = Behavior {\n      reqres: { RrBehavior::new([(\"/coordinator\", ProtocolSupport::Full)], RrConfig::default()) },\n      gossipsub: {\n        let heartbeat_interval = tributary::tendermint::LATENCY_TIME / 2;\n        let heartbeats_per_block =\n          usize::try_from(tributary::tendermint::TARGET_BLOCK_TIME / heartbeat_interval).unwrap();\n\n        use blake2::{Digest, Blake2s256};\n        let config = ConfigBuilder::default()\n          .heartbeat_interval(Duration::from_millis(heartbeat_interval.into()))\n          .history_length(heartbeats_per_block * 2)\n          .history_gossip(heartbeats_per_block)\n          .max_transmit_size(MAX_LIBP2P_GOSSIP_MESSAGE_SIZE)\n          // We send KeepAlive after 80s\n          .idle_timeout(Duration::from_secs(85))\n          .validation_mode(ValidationMode::Strict)\n          // Uses a content based message ID to avoid duplicates as much as possible\n          .message_id_fn(|msg| {\n            MessageId::new(&Blake2s256::digest([msg.topic.as_str().as_bytes(), &msg.data].concat()))\n          })\n          // Re-defines for fast ID to prevent needing to convert into a Message to run\n          // message_id_fn\n          // This function is valid for both\n          .fast_message_id_fn(|msg| {\n            FastMessageId::new(&Blake2s256::digest(\n              [msg.topic.as_str().as_bytes(), &msg.data].concat(),\n            ))\n          })\n          .build();\n        let mut gossipsub = GsBehavior::<IdentityTransform, AllowAllSubscriptionFilter>::new(\n          MessageAuthenticity::Signed(throwaway_key_pair.clone()),\n          config.unwrap(),\n        )\n        .unwrap();\n\n        // Subscribe to the base topic\n        let topic = IdentTopic::new(LIBP2P_TOPIC);\n        gossipsub.subscribe(&topic).unwrap();\n\n        gossipsub\n      },\n    };\n\n    // Uses noise for authentication, yamux for multiplexing\n    // TODO: Do we want to add a custom authentication protocol to only accept connections from\n    // fellow validators? Doing so would reduce the potential for spam\n    // TODO: Relay client?\n    let mut swarm = SwarmBuilder::with_existing_identity(throwaway_key_pair)\n      .with_tokio()\n      .with_tcp(TcpConfig::default().nodelay(true), noise::Config::new, || {\n        let mut config = yamux::Config::default();\n        // 1 MiB default + max message size\n        config.set_max_buffer_size((1024 * 1024) + MAX_LIBP2P_MESSAGE_SIZE);\n        // 256 KiB default + max message size\n        config\n          .set_receive_window_size(((256 * 1024) + MAX_LIBP2P_MESSAGE_SIZE).try_into().unwrap());\n        config\n      })\n      .unwrap()\n      .with_behaviour(|_| behavior)\n      .unwrap()\n      .build();\n    const PORT: u16 = 30563; // 5132 ^ (('c' << 8) | 'o')\n    swarm.listen_on(format!(\"/ip4/0.0.0.0/tcp/{PORT}\").parse().unwrap()).unwrap();\n\n    let (send_send, mut send_recv) = mpsc::unbounded_channel();\n    let (broadcast_send, mut broadcast_recv) = mpsc::unbounded_channel();\n    let (receive_send, receive_recv) = mpsc::unbounded_channel();\n    let (subscribe_send, mut subscribe_recv) = mpsc::unbounded_channel();\n\n    fn topic_for_set(set: ExternalValidatorSet) -> IdentTopic {\n      IdentTopic::new(format!(\"{LIBP2P_TOPIC}-{}\", hex::encode(set.encode())))\n    }\n\n    // TODO: If a network has less than TARGET_PEERS, this will cause retries ad infinitum\n    const TARGET_PEERS: usize = 5;\n\n    // The addrs we're currently dialing, and the networks associated with them\n    let dialing_peers = Arc::new(RwLock::new(HashMap::new()));\n    // The peers we're currently connected to, and the networks associated with them\n    let connected_peers =\n      Arc::new(RwLock::new(HashMap::<Multiaddr, HashSet<ExternalNetworkId>>::new()));\n\n    // Find and connect to peers\n    let (connect_to_network_send, mut connect_to_network_recv) =\n      tokio::sync::mpsc::unbounded_channel();\n    let (to_dial_send, mut to_dial_recv) = tokio::sync::mpsc::unbounded_channel();\n    tokio::spawn({\n      let dialing_peers = dialing_peers.clone();\n      let connected_peers = connected_peers.clone();\n\n      let connect_to_network_send = connect_to_network_send.clone();\n      async move {\n        loop {\n          let connect = |network: ExternalNetworkId, addr: Multiaddr| {\n            let dialing_peers = dialing_peers.clone();\n            let connected_peers = connected_peers.clone();\n            let to_dial_send = to_dial_send.clone();\n            let connect_to_network_send = connect_to_network_send.clone();\n            async move {\n              log::info!(\"found peer from substrate: {addr}\");\n\n              let protocols = addr.iter().filter_map(|piece| match piece {\n                // Drop PeerIds from the Substrate P2p network\n                Protocol::P2p(_) => None,\n                // Use our own TCP port\n                Protocol::Tcp(_) => Some(Protocol::Tcp(PORT)),\n                other => Some(other),\n              });\n\n              let mut new_addr = Multiaddr::empty();\n              for protocol in protocols {\n                new_addr.push(protocol);\n              }\n              let addr = new_addr;\n              log::debug!(\"transformed found peer: {addr}\");\n\n              let (is_fresh_dial, nets) = {\n                let mut dialing_peers = dialing_peers.write().await;\n                let is_fresh_dial = !dialing_peers.contains_key(&addr);\n                if is_fresh_dial {\n                  dialing_peers.insert(addr.clone(), HashSet::new());\n                }\n                // Associate this network with this peer\n                dialing_peers.get_mut(&addr).unwrap().insert(network);\n\n                let nets = dialing_peers.get(&addr).unwrap().clone();\n                (is_fresh_dial, nets)\n              };\n\n              // Spawn a task to remove this peer from 'dialing' in sixty seconds, in case dialing\n              // fails\n              // This performs cleanup and bounds the size of the map to whatever growth occurs\n              // within a temporal window\n              tokio::spawn({\n                let dialing_peers = dialing_peers.clone();\n                let connected_peers = connected_peers.clone();\n                let connect_to_network_send = connect_to_network_send.clone();\n                let addr = addr.clone();\n                async move {\n                  tokio::time::sleep(core::time::Duration::from_secs(60)).await;\n                  let mut dialing_peers = dialing_peers.write().await;\n                  if let Some(expected_nets) = dialing_peers.remove(&addr) {\n                    log::debug!(\"removed addr from dialing upon timeout: {addr}\");\n\n                    // TODO: De-duplicate this below instance\n                    // If we failed to dial and haven't gotten enough actual connections, retry\n                    let connected_peers = connected_peers.read().await;\n                    for net in expected_nets {\n                      let mut remaining_peers = 0;\n                      for nets in connected_peers.values() {\n                        if nets.contains(&net) {\n                          remaining_peers += 1;\n                        }\n                      }\n                      // If we do not, start connecting to this network again\n                      if remaining_peers < TARGET_PEERS {\n                        connect_to_network_send.send(net).expect(\n                          \"couldn't send net to connect to due to disconnects (receiver dropped?)\",\n                        );\n                      }\n                    }\n                  }\n                }\n              });\n\n              if is_fresh_dial {\n                to_dial_send.send((addr, nets)).unwrap();\n              }\n            }\n          };\n\n          // TODO: We should also connect to random peers from random nets as needed for\n          // cosigning\n\n          // Drain the chainnel, de-duplicating any networks in it\n          let mut connect_to_network_networks = HashSet::new();\n          while let Ok(network) = connect_to_network_recv.try_recv() {\n            connect_to_network_networks.insert(network);\n          }\n          for network in connect_to_network_networks {\n            if let Ok(mut nodes) = serai.p2p_validators(network.into()).await {\n              // If there's an insufficient amount of nodes known, connect to all yet add it\n              // back and break\n              if nodes.len() < TARGET_PEERS {\n                log::warn!(\n                  \"insufficient amount of P2P nodes known for {:?}: {}\",\n                  network,\n                  nodes.len()\n                );\n                // Retry this later\n                connect_to_network_send.send(network).unwrap();\n                for node in nodes {\n                  connect(network, node).await;\n                }\n                continue;\n              }\n\n              // Randomly select up to 150% of the TARGET_PEERS\n              for _ in 0 .. ((3 * TARGET_PEERS) / 2) {\n                if !nodes.is_empty() {\n                  let to_connect = nodes.swap_remove(\n                    usize::try_from(OsRng.next_u64() % u64::try_from(nodes.len()).unwrap())\n                      .unwrap(),\n                  );\n                  connect(network, to_connect).await;\n                }\n              }\n            }\n          }\n          // Sleep 60 seconds before moving to the next iteration\n          tokio::time::sleep(core::time::Duration::from_secs(60)).await;\n        }\n      }\n    });\n\n    // Manage the actual swarm\n    tokio::spawn({\n      let mut time_of_last_p2p_message = Instant::now();\n\n      async move {\n        let connected_peers = connected_peers.clone();\n\n        let mut set_for_genesis = HashMap::new();\n        loop {\n          let time_since_last = Instant::now().duration_since(time_of_last_p2p_message);\n          tokio::select! {\n            biased;\n\n            // Subscribe to any new topics\n            set = subscribe_recv.recv() => {\n              let (subscribe, set, genesis): (_, ExternalValidatorSet, [u8; 32]) =\n                set.expect(\"subscribe_recv closed. are we shutting down?\");\n              let topic = topic_for_set(set);\n              if subscribe {\n                log::info!(\"subscribing to p2p messages for {set:?}\");\n                connect_to_network_send.send(set.network).unwrap();\n                set_for_genesis.insert(genesis, set);\n                swarm.behaviour_mut().gossipsub.subscribe(&topic).unwrap();\n              } else {\n                log::info!(\"unsubscribing to p2p messages for {set:?}\");\n                set_for_genesis.remove(&genesis);\n                swarm.behaviour_mut().gossipsub.unsubscribe(&topic).unwrap();\n              }\n            }\n\n            msg = send_recv.recv() => {\n              let (peer, msg): (PeerId, Vec<u8>) =\n                msg.expect(\"send_recv closed. are we shutting down?\");\n              swarm.behaviour_mut().reqres.send_request(&peer, msg);\n            },\n\n            // Handle any queued outbound messages\n            msg = broadcast_recv.recv() => {\n              // Update the time of last message\n              time_of_last_p2p_message = Instant::now();\n\n              let (kind, msg): (P2pMessageKind, Vec<u8>) =\n                msg.expect(\"broadcast_recv closed. are we shutting down?\");\n\n              if matches!(kind, P2pMessageKind::ReqRes(_)) {\n                // Use request/response, yet send to all connected peers\n                for peer_id in swarm.connected_peers().copied().collect::<Vec<_>>() {\n                  swarm.behaviour_mut().reqres.send_request(&peer_id, msg.clone());\n                }\n              } else {\n                // Use gossipsub\n\n                let set =\n                  kind.genesis().and_then(|genesis| set_for_genesis.get(&genesis).copied());\n                let topic = if let Some(set) = set {\n                  topic_for_set(set)\n                } else {\n                  IdentTopic::new(LIBP2P_TOPIC)\n                };\n\n                match swarm.behaviour_mut().gossipsub.publish(topic, msg.clone()) {\n                  Err(PublishError::SigningError(e)) => {\n                    panic!(\"signing error when broadcasting: {e}\")\n                  },\n                  Err(PublishError::InsufficientPeers) => {\n                    log::warn!(\"failed to send p2p message due to insufficient peers\")\n                  }\n                  Err(PublishError::MessageTooLarge) => {\n                    panic!(\"tried to send a too large message: {}\", hex::encode(msg))\n                  }\n                  Err(PublishError::TransformFailed(e)) => panic!(\"IdentityTransform failed: {e}\"),\n                  Err(PublishError::Duplicate) | Ok(_) => {}\n                }\n              }\n            }\n\n            // Handle new incoming messages\n            event = swarm.next() => {\n              match event {\n                Some(SwarmEvent::Dialing { connection_id, .. }) => {\n                  log::debug!(\"dialing to peer in connection ID {}\", &connection_id);\n                }\n                Some(SwarmEvent::ConnectionEstablished {\n                  peer_id,\n                  connection_id,\n                  endpoint,\n                  ..\n                }) => {\n                  if &peer_id == swarm.local_peer_id() {\n                    log::warn!(\"established a libp2p connection to ourselves\");\n                    swarm.close_connection(connection_id);\n                    continue;\n                  }\n\n                  let addr = endpoint.get_remote_address();\n                  let nets = {\n                    let mut dialing_peers = dialing_peers.write().await;\n                    if let Some(nets) = dialing_peers.remove(addr) {\n                      nets\n                    } else {\n                      log::debug!(\"connected to a peer who we didn't have within dialing\");\n                      HashSet::new()\n                    }\n                  };\n                  {\n                    let mut connected_peers = connected_peers.write().await;\n                    connected_peers.insert(addr.clone(), nets);\n\n                    log::debug!(\n                      \"connection established to peer {} in connection ID {}, connected peers: {}\",\n                      &peer_id,\n                      &connection_id,\n                      connected_peers.len(),\n                    );\n                  }\n                }\n                Some(SwarmEvent::ConnectionClosed { peer_id, endpoint, .. }) => {\n                  let mut connected_peers = connected_peers.write().await;\n                  let Some(nets) = connected_peers.remove(endpoint.get_remote_address()) else {\n                    log::debug!(\"closed connection to peer which wasn't in connected_peers\");\n                    continue;\n                  };\n                  // Downgrade to a read lock\n                  let connected_peers = connected_peers.downgrade();\n\n                  // For each net we lost a peer for, check if we still have sufficient peers\n                  // overall\n                  for net in nets {\n                    let mut remaining_peers = 0;\n                    for nets in connected_peers.values() {\n                      if nets.contains(&net) {\n                        remaining_peers += 1;\n                      }\n                    }\n                    // If we do not, start connecting to this network again\n                    if remaining_peers < TARGET_PEERS {\n                      connect_to_network_send\n                        .send(net)\n                        .expect(\n                          \"couldn't send net to connect to due to disconnects (receiver dropped?)\"\n                        );\n                    }\n                  }\n\n                  log::debug!(\n                    \"connection with peer {peer_id} closed, connected peers: {}\",\n                    connected_peers.len(),\n                  );\n                }\n                Some(SwarmEvent::Behaviour(BehaviorEvent::Reqres(\n                  RrEvent::Message { peer, message },\n                ))) => {\n                  let message = match message {\n                    RrMessage::Request { request, .. } => request,\n                    RrMessage::Response { response, .. } => response,\n                  };\n\n                  let mut msg_ref = message.as_slice();\n                  let Some(kind) = ReqResMessageKind::read(&mut msg_ref) else { continue };\n                  let message = Message {\n                    sender: peer,\n                    kind: P2pMessageKind::ReqRes(kind),\n                    msg: msg_ref.to_vec(),\n                  };\n                  receive_send.send(message).expect(\"receive_send closed. are we shutting down?\");\n                }\n                Some(SwarmEvent::Behaviour(BehaviorEvent::Gossipsub(\n                  GsEvent::Message { propagation_source, message, .. },\n                ))) => {\n                  let mut msg_ref = message.data.as_slice();\n                  let Some(kind) = GossipMessageKind::read(&mut msg_ref) else { continue };\n                  let message = Message {\n                    sender: propagation_source,\n                    kind: P2pMessageKind::Gossip(kind),\n                    msg: msg_ref.to_vec(),\n                  };\n                  receive_send.send(message).expect(\"receive_send closed. are we shutting down?\");\n                }\n                _ => {}\n              }\n            }\n\n            // Handle peers to dial\n            addr_and_nets = to_dial_recv.recv() => {\n              let (addr, nets) =\n                addr_and_nets.expect(\"received address was None (sender dropped?)\");\n              // If we've already dialed and connected to this address, don't further dial them\n              // Just associate these networks with them\n              if let Some(existing_nets) = connected_peers.write().await.get_mut(&addr) {\n                for net in nets {\n                  existing_nets.insert(net);\n                }\n                continue;\n              }\n\n              if let Err(e) = swarm.dial(addr) {\n                log::warn!(\"dialing peer failed: {e:?}\");\n              }\n            }\n\n            // If it's been >80s since we've published a message, publish a KeepAlive since we're\n            // still an active service\n            // This is useful when we have no active tributaries and accordingly aren't sending\n            // heartbeats\n            // If we are sending heartbeats, we should've sent one after 60s of no finalized blocks\n            // (where a finalized block only occurs due to network activity), meaning this won't be\n            // run\n            () = tokio::time::sleep(Duration::from_secs(80).saturating_sub(time_since_last)) => {\n              time_of_last_p2p_message = Instant::now();\n              for peer_id in swarm.connected_peers().copied().collect::<Vec<_>>() {\n                swarm\n                  .behaviour_mut()\n                  .reqres\n                  .send_request(&peer_id, ReqResMessageKind::KeepAlive.serialize());\n              }\n            }\n          }\n        }\n      }\n    });\n\n    LibP2p {\n      subscribe: Arc::new(Mutex::new(subscribe_send)),\n      send: Arc::new(Mutex::new(send_send)),\n      broadcast: Arc::new(Mutex::new(broadcast_send)),\n      receive: Arc::new(Mutex::new(receive_recv)),\n    }\n  }\n}\n\n#[async_trait]\nimpl P2p for LibP2p {\n  type Id = PeerId;\n\n  async fn subscribe(&self, set: ExternalValidatorSet, genesis: [u8; 32]) {\n    self\n      .subscribe\n      .lock()\n      .await\n      .send((true, set, genesis))\n      .expect(\"subscribe_send closed. are we shutting down?\");\n  }\n\n  async fn unsubscribe(&self, set: ExternalValidatorSet, genesis: [u8; 32]) {\n    self\n      .subscribe\n      .lock()\n      .await\n      .send((false, set, genesis))\n      .expect(\"subscribe_send closed. are we shutting down?\");\n  }\n\n  async fn send_raw(&self, peer: Self::Id, msg: Vec<u8>) {\n    self.send.lock().await.send((peer, msg)).expect(\"send_send closed. are we shutting down?\");\n  }\n\n  async fn broadcast_raw(&self, kind: P2pMessageKind, msg: Vec<u8>) {\n    self\n      .broadcast\n      .lock()\n      .await\n      .send((kind, msg))\n      .expect(\"broadcast_send closed. are we shutting down?\");\n  }\n\n  // TODO: We only have a single handle call this. Differentiate Send/Recv to remove this constant\n  // lock acquisition?\n  async fn receive(&self) -> Message<Self> {\n    self.receive.lock().await.recv().await.expect(\"receive_recv closed. are we shutting down?\")\n  }\n}\n\n#[async_trait]\nimpl TributaryP2p for LibP2p {\n  async fn broadcast(&self, genesis: [u8; 32], msg: Vec<u8>) {\n    <Self as P2p>::broadcast(self, GossipMessageKind::Tributary(genesis), msg).await\n  }\n}\n\npub async fn heartbeat_tributaries_task<D: Db, P: P2p>(\n  p2p: P,\n  mut tributary_event: broadcast::Receiver<TributaryEvent<D, P>>,\n) {\n  let ten_blocks_of_time =\n    Duration::from_secs((10 * Tributary::<D, Transaction, P>::block_time()).into());\n\n  let mut readers = HashMap::new();\n  loop {\n    loop {\n      match tributary_event.try_recv() {\n        Ok(TributaryEvent::NewTributary(ActiveTributary { spec, tributary })) => {\n          readers.insert(spec.set(), tributary.reader());\n        }\n        Ok(TributaryEvent::TributaryRetired(set)) => {\n          readers.remove(&set);\n        }\n        Err(broadcast::error::TryRecvError::Empty) => break,\n        Err(broadcast::error::TryRecvError::Lagged(_)) => {\n          panic!(\"heartbeat_tributaries lagged to handle tributary_event\")\n        }\n        Err(broadcast::error::TryRecvError::Closed) => panic!(\"tributary_event sender closed\"),\n      }\n    }\n\n    for tributary in readers.values() {\n      let tip = tributary.tip();\n      let block_time =\n        SystemTime::UNIX_EPOCH + Duration::from_secs(tributary.time_of_block(&tip).unwrap_or(0));\n\n      // Only trigger syncing if the block is more than a minute behind\n      if SystemTime::now() > (block_time + Duration::from_secs(60)) {\n        log::warn!(\"last known tributary block was over a minute ago\");\n        let mut msg = tip.to_vec();\n        let time: u64 = SystemTime::now()\n          .duration_since(SystemTime::UNIX_EPOCH)\n          .expect(\"system clock is wrong\")\n          .as_secs();\n        msg.extend(time.to_le_bytes());\n        P2p::broadcast(&p2p, ReqResMessageKind::Heartbeat(tributary.genesis()), msg).await;\n      }\n    }\n\n    // Only check once every 10 blocks of time\n    sleep(ten_blocks_of_time).await;\n  }\n}\n\npub async fn handle_p2p_task<D: Db, P: P2p>(\n  p2p: P,\n  cosign_channel: mpsc::UnboundedSender<CosignedBlock>,\n  mut tributary_event: broadcast::Receiver<TributaryEvent<D, P>>,\n) {\n  let channels = Arc::new(RwLock::new(HashMap::<_, mpsc::UnboundedSender<Message<P>>>::new()));\n  tokio::spawn({\n    let p2p = p2p.clone();\n    let channels = channels.clone();\n    let mut set_to_genesis = HashMap::new();\n    async move {\n      loop {\n        match tributary_event.recv().await.unwrap() {\n          TributaryEvent::NewTributary(tributary) => {\n            let genesis = tributary.spec.genesis();\n            set_to_genesis.insert(tributary.spec.set(), genesis);\n\n            let (send, mut recv) = mpsc::unbounded_channel();\n            channels.write().await.insert(genesis, send);\n\n            // Subscribe to the topic for this tributary\n            p2p.subscribe(tributary.spec.set(), genesis).await;\n\n            let spec_set = tributary.spec.set();\n\n            // Per-Tributary P2P message handler\n            tokio::spawn({\n              let p2p = p2p.clone();\n              async move {\n                loop {\n                  let Some(msg) = recv.recv().await else {\n                    // Channel closure happens when the tributary retires\n                    break;\n                  };\n                  match msg.kind {\n                    P2pMessageKind::ReqRes(ReqResMessageKind::KeepAlive) => {}\n\n                    // TODO: Slash on Heartbeat which justifies a response, since the node\n                    // obviously was offline and we must now use our bandwidth to compensate for\n                    // them?\n                    P2pMessageKind::ReqRes(ReqResMessageKind::Heartbeat(msg_genesis)) => {\n                      assert_eq!(msg_genesis, genesis);\n                      if msg.msg.len() != 40 {\n                        log::error!(\"validator sent invalid heartbeat\");\n                        continue;\n                      }\n                      // Only respond to recent heartbeats\n                      let msg_time = u64::from_le_bytes(msg.msg[32 .. 40].try_into().expect(\n                        \"length-checked heartbeat message didn't have 8 bytes for the u64\",\n                      ));\n                      if SystemTime::now()\n                        .duration_since(SystemTime::UNIX_EPOCH)\n                        .expect(\"system clock is wrong\")\n                        .as_secs()\n                        .saturating_sub(msg_time) >\n                        10\n                      {\n                        continue;\n                      }\n\n                      log::debug!(\"received heartbeat with a recent timestamp\");\n\n                      let reader = tributary.tributary.reader();\n\n                      let p2p = p2p.clone();\n                      // Spawn a dedicated task as this may require loading large amounts of data\n                      // from disk and take a notable amount of time\n                      tokio::spawn(async move {\n                        let mut latest = msg.msg[.. 32].try_into().unwrap();\n                        let mut to_send = vec![];\n                        while let Some(next) = reader.block_after(&latest) {\n                          to_send.push(next);\n                          latest = next;\n                        }\n                        if to_send.len() > 3 {\n                          // prepare the batch to sends\n                          let mut blocks = vec![];\n                          for (i, next) in to_send.iter().enumerate() {\n                            if i >= BLOCKS_PER_BATCH {\n                              break;\n                            }\n\n                            blocks.push(BlockCommit {\n                              block: reader.block(next).unwrap().serialize(),\n                              commit: reader.commit(next).unwrap(),\n                            });\n                          }\n                          let batch = HeartbeatBatch { blocks, timestamp: msg_time };\n\n                          p2p\n                            .send(msg.sender, ReqResMessageKind::Block(genesis), batch.encode())\n                            .await;\n                        }\n                      });\n                    }\n\n                    P2pMessageKind::ReqRes(ReqResMessageKind::Block(msg_genesis)) => {\n                      assert_eq!(msg_genesis, genesis);\n                      // decode the batch\n                      let Ok(batch) = HeartbeatBatch::decode(&mut msg.msg.as_ref()) else {\n                        log::error!(\n                          \"received HeartBeatBatch message with an invalidly serialized batch\"\n                        );\n                        continue;\n                      };\n\n                      // sync blocks\n                      for bc in batch.blocks {\n                        // TODO: why do we use ReadWrite instead of Encode/Decode for blocks?\n                        // Should we use the same for batches so we can read both at the same time?\n                        let Ok(block) = Block::<Transaction>::read(&mut bc.block.as_slice()) else {\n                          log::error!(\"received block message with an invalidly serialized block\");\n                          continue;\n                        };\n\n                        let res = tributary.tributary.sync_block(block, bc.commit).await;\n                        log::debug!(\n                          \"received block from {:?}, sync_block returned {}\",\n                          msg.sender,\n                          res\n                        );\n                      }\n                    }\n\n                    P2pMessageKind::Gossip(GossipMessageKind::Tributary(msg_genesis)) => {\n                      assert_eq!(msg_genesis, genesis);\n                      log::trace!(\"handling message for tributary {:?}\", spec_set);\n                      if tributary.tributary.handle_message(&msg.msg).await {\n                        P2p::broadcast(&p2p, msg.kind, msg.msg).await;\n                      }\n                    }\n\n                    P2pMessageKind::Gossip(GossipMessageKind::CosignedBlock) => unreachable!(),\n                  }\n                }\n              }\n            });\n          }\n          TributaryEvent::TributaryRetired(set) => {\n            if let Some(genesis) = set_to_genesis.remove(&set) {\n              p2p.unsubscribe(set, genesis).await;\n              channels.write().await.remove(&genesis);\n            }\n          }\n        }\n      }\n    }\n  });\n\n  loop {\n    let msg = p2p.receive().await;\n    match msg.kind {\n      P2pMessageKind::ReqRes(ReqResMessageKind::KeepAlive) => {}\n      P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) |\n      P2pMessageKind::ReqRes(\n        ReqResMessageKind::Heartbeat(genesis) | ReqResMessageKind::Block(genesis),\n      ) => {\n        if let Some(channel) = channels.read().await.get(&genesis) {\n          channel.send(msg).unwrap();\n        }\n      }\n      P2pMessageKind::Gossip(GossipMessageKind::CosignedBlock) => {\n        let Ok(msg) = CosignedBlock::deserialize_reader(&mut msg.msg.as_slice()) else {\n          log::error!(\"received CosignedBlock message with invalidly serialized contents\");\n          continue;\n        };\n        cosign_channel.send(msg).unwrap();\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "coordinator/src/processors.rs",
    "content": "use std::sync::Arc;\n\nuse serai_client::primitives::ExternalNetworkId;\nuse processor_messages::{ProcessorMessage, CoordinatorMessage};\n\nuse message_queue::{Service, Metadata, client::MessageQueue};\n\n#[derive(Clone, PartialEq, Eq, Debug)]\npub struct Message {\n  pub id: u64,\n  pub network: ExternalNetworkId,\n  pub msg: ProcessorMessage,\n}\n\n#[async_trait::async_trait]\npub trait Processors: 'static + Send + Sync + Clone {\n  async fn send(&self, network: ExternalNetworkId, msg: impl Send + Into<CoordinatorMessage>);\n  async fn recv(&self, network: ExternalNetworkId) -> Message;\n  async fn ack(&self, msg: Message);\n}\n\n#[async_trait::async_trait]\nimpl Processors for Arc<MessageQueue> {\n  async fn send(&self, network: ExternalNetworkId, msg: impl Send + Into<CoordinatorMessage>) {\n    let msg: CoordinatorMessage = msg.into();\n    let metadata =\n      Metadata { from: self.service, to: Service::Processor(network), intent: msg.intent() };\n    let msg = borsh::to_vec(&msg).unwrap();\n    self.queue(metadata, msg).await;\n  }\n  async fn recv(&self, network: ExternalNetworkId) -> Message {\n    let msg = self.next(Service::Processor(network)).await;\n    assert_eq!(msg.from, Service::Processor(network));\n\n    let id = msg.id;\n\n    // Deserialize it into a ProcessorMessage\n    let msg: ProcessorMessage =\n      borsh::from_slice(&msg.msg).expect(\"message wasn't a borsh-encoded ProcessorMessage\");\n\n    return Message { id, network, msg };\n  }\n  async fn ack(&self, msg: Message) {\n    MessageQueue::ack(self, Service::Processor(msg.network), msg.id).await\n  }\n}\n"
  },
  {
    "path": "coordinator/src/substrate/cosign.rs",
    "content": "/*\n  If:\n    A) This block has events and it's been at least X blocks since the last cosign or\n    B) This block doesn't have events but it's been X blocks since a skipped block which did\n       have events or\n    C) This block key gens (which changes who the cosigners are)\n  cosign this block.\n\n  This creates both a minimum and maximum delay of X blocks before a block's cosigning begins,\n  barring key gens which are exceptional. The minimum delay is there to ensure we don't constantly\n  spawn new protocols every 6 seconds, overwriting the old ones. The maximum delay is there to\n  ensure any block needing cosigned is consigned within a reasonable amount of time.\n*/\n\nuse zeroize::Zeroizing;\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::Ciphersuite;\n\nuse borsh::{BorshSerialize, BorshDeserialize};\n\nuse serai_client::{\n  primitives::ExternalNetworkId,\n  validator_sets::primitives::{ExternalValidatorSet, Session},\n  Serai, SeraiError,\n};\n\nuse serai_db::*;\n\nuse crate::{Db, substrate::in_set, tributary::SeraiBlockNumber};\n\n// 5 minutes, expressed in blocks\n// TODO: Pull a constant for block time\nconst COSIGN_DISTANCE: u64 = 5 * 60 / 6;\n\n#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]\nenum HasEvents {\n  KeyGen,\n  Yes,\n  No,\n}\n\ncreate_db!(\n  SubstrateCosignDb {\n    ScanCosignFrom: () -> u64,\n    IntendedCosign: () -> (u64, Option<u64>),\n    BlockHasEventsCache: (block: u64) -> HasEvents,\n    LatestCosignedBlock: () -> u64,\n  }\n);\n\nimpl IntendedCosign {\n  // Sets the intended to cosign block, clearing the prior value entirely.\n  pub fn set_intended_cosign(txn: &mut impl DbTxn, intended: u64) {\n    Self::set(txn, &(intended, None::<u64>));\n  }\n\n  // Sets the cosign skipped since the last intended to cosign block.\n  pub fn set_skipped_cosign(txn: &mut impl DbTxn, skipped: u64) {\n    let (intended, prior_skipped) = Self::get(txn).unwrap();\n    assert!(prior_skipped.is_none());\n    Self::set(txn, &(intended, Some(skipped)));\n  }\n}\n\nimpl LatestCosignedBlock {\n  pub fn latest_cosigned_block(getter: &impl Get) -> u64 {\n    Self::get(getter).unwrap_or_default().max(1)\n  }\n}\n\ndb_channel! {\n  SubstrateDbChannels {\n    CosignTransactions: (network: ExternalNetworkId) -> (Session, u64, [u8; 32]),\n  }\n}\n\nimpl CosignTransactions {\n  // Append a cosign transaction.\n  pub fn append_cosign(\n    txn: &mut impl DbTxn,\n    set: ExternalValidatorSet,\n    number: u64,\n    hash: [u8; 32],\n  ) {\n    CosignTransactions::send(txn, set.network, &(set.session, number, hash))\n  }\n}\n\nasync fn block_has_events(\n  txn: &mut impl DbTxn,\n  serai: &Serai,\n  block: u64,\n) -> Result<HasEvents, SeraiError> {\n  let cached = BlockHasEventsCache::get(txn, block);\n  match cached {\n    None => {\n      let serai = serai.as_of(\n        serai\n          .finalized_block_by_number(block)\n          .await?\n          .expect(\"couldn't get block which should've been finalized\")\n          .hash(),\n      );\n\n      if !serai.validator_sets().key_gen_events().await?.is_empty() {\n        return Ok(HasEvents::KeyGen);\n      }\n\n      let has_no_events = serai.coins().burn_with_instruction_events().await?.is_empty() &&\n        serai.in_instructions().batch_events().await?.is_empty() &&\n        serai.validator_sets().new_set_events().await?.is_empty() &&\n        serai.validator_sets().set_retired_events().await?.is_empty();\n\n      let has_events = if has_no_events { HasEvents::No } else { HasEvents::Yes };\n\n      BlockHasEventsCache::set(txn, block, &has_events);\n      Ok(has_events)\n    }\n    Some(code) => Ok(code),\n  }\n}\n\nasync fn potentially_cosign_block(\n  txn: &mut impl DbTxn,\n  serai: &Serai,\n  block: u64,\n  skipped_block: Option<u64>,\n  window_end_exclusive: u64,\n) -> Result<bool, SeraiError> {\n  // The following code regarding marking cosigned if prior block is cosigned expects this block to\n  // not be zero\n  // While we could perform this check there, there's no reason not to optimize the entire function\n  // as such\n  if block == 0 {\n    return Ok(false);\n  }\n\n  let block_has_events = block_has_events(txn, serai, block).await?;\n\n  // If this block had no events and immediately follows a cosigned block, mark it as cosigned\n  if (block_has_events == HasEvents::No) &&\n    (LatestCosignedBlock::latest_cosigned_block(txn) == (block - 1))\n  {\n    log::debug!(\"automatically co-signing next block ({block}) since it has no events\");\n    LatestCosignedBlock::set(txn, &block);\n  }\n\n  // If we skipped a block, we're supposed to sign it plus the COSIGN_DISTANCE if no other blocks\n  // trigger a cosigning protocol covering it\n  // This means there will be the maximum delay allowed from a block needing cosigning occurring\n  // and a cosign for it triggering\n  let maximally_latent_cosign_block =\n    skipped_block.map(|skipped_block| skipped_block + COSIGN_DISTANCE);\n\n  // If this block is within the window,\n  if block < window_end_exclusive {\n    // and set a key, cosign it\n    if block_has_events == HasEvents::KeyGen {\n      IntendedCosign::set_intended_cosign(txn, block);\n      // Carry skipped if it isn't included by cosigning this block\n      if let Some(skipped) = skipped_block {\n        if skipped > block {\n          IntendedCosign::set_skipped_cosign(txn, block);\n        }\n      }\n      return Ok(true);\n    }\n  } else if (Some(block) == maximally_latent_cosign_block) || (block_has_events != HasEvents::No) {\n    // Since this block was outside the window and had events/was maximally latent, cosign it\n    IntendedCosign::set_intended_cosign(txn, block);\n    return Ok(true);\n  }\n  Ok(false)\n}\n\n/*\n  Advances the cosign protocol as should be done per the latest block.\n\n  A block is considered cosigned if:\n    A) It was cosigned\n    B) It's the parent of a cosigned block\n    C) It immediately follows a cosigned block and has no events requiring cosigning\n\n  This only actually performs advancement within a limited bound (generally until it finds a block\n  which should be cosigned). Accordingly, it is necessary to call multiple times even if\n  `latest_number` doesn't change.\n*/\nasync fn advance_cosign_protocol_inner(\n  db: &mut impl Db,\n  key: &Zeroizing<<Ristretto as Ciphersuite>::F>,\n  serai: &Serai,\n  latest_number: u64,\n) -> Result<(), SeraiError> {\n  let mut txn = db.txn();\n\n  const INITIAL_INTENDED_COSIGN: u64 = 1;\n  let (last_intended_to_cosign_block, mut skipped_block) = {\n    let intended_cosign = IntendedCosign::get(&txn);\n    // If we haven't prior intended to cosign a block, set the intended cosign to 1\n    if let Some(intended_cosign) = intended_cosign {\n      intended_cosign\n    } else {\n      IntendedCosign::set_intended_cosign(&mut txn, INITIAL_INTENDED_COSIGN);\n      IntendedCosign::get(&txn).unwrap()\n    }\n  };\n\n  // \"windows\" refers to the window of blocks where even if there's a block which should be\n  // cosigned, it won't be due to proximity due to the prior cosign\n  let mut window_end_exclusive = last_intended_to_cosign_block + COSIGN_DISTANCE;\n  // If we've never triggered a cosign, don't skip any cosigns based on proximity\n  if last_intended_to_cosign_block == INITIAL_INTENDED_COSIGN {\n    window_end_exclusive = 1;\n  }\n\n  // The consensus rules for this are `last_intended_to_cosign_block + 1`\n  let scan_start_block = last_intended_to_cosign_block + 1;\n  // As a practical optimization, we don't re-scan old blocks since old blocks are independent to\n  // new state\n  let scan_start_block = scan_start_block.max(ScanCosignFrom::get(&txn).unwrap_or(1));\n\n  // Check all blocks within the window to see if they should be cosigned\n  // If so, we're skipping them and need to flag them as skipped so that once the window closes, we\n  // do cosign them\n  // We only perform this check if we haven't already marked a block as skipped since the cosign\n  // the skipped block will cause will cosign all other blocks within this window\n  if skipped_block.is_none() {\n    let window_end_inclusive = window_end_exclusive - 1;\n    for b in scan_start_block ..= window_end_inclusive.min(latest_number) {\n      if block_has_events(&mut txn, serai, b).await? == HasEvents::Yes {\n        skipped_block = Some(b);\n        log::debug!(\"skipping cosigning {b} due to proximity to prior cosign\");\n        IntendedCosign::set_skipped_cosign(&mut txn, b);\n        break;\n      }\n    }\n  }\n\n  // A block which should be cosigned\n  let mut to_cosign = None;\n  // A list of sets which are cosigning, along with a boolean of if we're in the set\n  let mut cosigning = vec![];\n\n  for block in scan_start_block ..= latest_number {\n    let actual_block = serai\n      .finalized_block_by_number(block)\n      .await?\n      .expect(\"couldn't get block which should've been finalized\");\n\n    // Save the block number for this block, as needed by the cosigner to perform cosigning\n    SeraiBlockNumber::set(&mut txn, actual_block.hash(), &block);\n\n    if potentially_cosign_block(&mut txn, serai, block, skipped_block, window_end_exclusive).await?\n    {\n      to_cosign = Some((block, actual_block.hash()));\n\n      // Get the keys as of the prior block\n      // If this key sets new keys, the coordinator won't acknowledge so until we process this\n      // block\n      // We won't process this block until its co-signed\n      // Using the keys of the prior block ensures this deadlock isn't reached\n      let serai = serai.as_of(actual_block.header.parent_hash.into());\n\n      for network in serai_client::primitives::EXTERNAL_NETWORKS {\n        // Get the latest session to have set keys\n        let set_with_keys = {\n          let Some(latest_session) = serai.validator_sets().session(network.into()).await? else {\n            continue;\n          };\n          let prior_session = Session(latest_session.0.saturating_sub(1));\n          if serai\n            .validator_sets()\n            .keys(ExternalValidatorSet { network, session: prior_session })\n            .await?\n            .is_some()\n          {\n            ExternalValidatorSet { network, session: prior_session }\n          } else {\n            let set = ExternalValidatorSet { network, session: latest_session };\n            if serai.validator_sets().keys(set).await?.is_none() {\n              continue;\n            }\n            set\n          }\n        };\n\n        log::debug!(\"{:?} will be cosigning {block}\", set_with_keys.network);\n        cosigning.push((set_with_keys, in_set(key, &serai, set_with_keys.into()).await?.unwrap()));\n      }\n\n      break;\n    }\n\n    // If this TX is committed, always start future scanning from the next block\n    ScanCosignFrom::set(&mut txn, &(block + 1));\n    // Since we're scanning *from* the next block, tidy the cache\n    BlockHasEventsCache::del(&mut txn, block);\n  }\n\n  if let Some((number, hash)) = to_cosign {\n    // If this block doesn't have cosigners, yet does have events, automatically mark it as\n    // cosigned\n    if cosigning.is_empty() {\n      log::debug!(\"{} had no cosigners available, marking as cosigned\", number);\n      LatestCosignedBlock::set(&mut txn, &number);\n    } else {\n      for (set, in_set) in cosigning {\n        if in_set {\n          log::debug!(\"cosigning {number} with {:?} {:?}\", set.network, set.session);\n          CosignTransactions::append_cosign(&mut txn, set, number, hash);\n        }\n      }\n    }\n  }\n  txn.commit();\n\n  Ok(())\n}\n\npub async fn advance_cosign_protocol(\n  db: &mut impl Db,\n  key: &Zeroizing<<Ristretto as Ciphersuite>::F>,\n  serai: &Serai,\n  latest_number: u64,\n) -> Result<(), SeraiError> {\n  loop {\n    let scan_from = ScanCosignFrom::get(db).unwrap_or(1);\n    // Only scan 1000 blocks at a time to limit a massive txn from forming\n    let scan_to = latest_number.min(scan_from + 1000);\n    advance_cosign_protocol_inner(db, key, serai, scan_to).await?;\n    // If we didn't limit the scan_to, break\n    if scan_to == latest_number {\n      break;\n    }\n  }\n  Ok(())\n}\n"
  },
  {
    "path": "coordinator/src/substrate/db.rs",
    "content": "use serai_client::primitives::ExternalNetworkId;\n\npub use serai_db::*;\n\nmod inner_db {\n  use super::*;\n\n  create_db!(\n    SubstrateDb {\n      NextBlock: () -> u64,\n      HandledEvent: (block: [u8; 32]) -> u32,\n      BatchInstructionsHashDb: (network: ExternalNetworkId, id: u32) -> [u8; 32]\n    }\n  );\n}\npub(crate) use inner_db::{NextBlock, BatchInstructionsHashDb};\n\npub struct HandledEvent;\nimpl HandledEvent {\n  fn next_to_handle_event(getter: &impl Get, block: [u8; 32]) -> u32 {\n    inner_db::HandledEvent::get(getter, block).map_or(0, |last| last + 1)\n  }\n  pub fn is_unhandled(getter: &impl Get, block: [u8; 32], event_id: u32) -> bool {\n    let next = Self::next_to_handle_event(getter, block);\n    assert!(next >= event_id);\n    next == event_id\n  }\n  pub fn handle_event(txn: &mut impl DbTxn, block: [u8; 32], index: u32) {\n    assert!(Self::next_to_handle_event(txn, block) == index);\n    inner_db::HandledEvent::set(txn, block, &index);\n  }\n}\n"
  },
  {
    "path": "coordinator/src/substrate/mod.rs",
    "content": "use core::{ops::Deref, time::Duration};\nuse std::{\n  sync::Arc,\n  collections::{HashSet, HashMap},\n};\n\nuse zeroize::Zeroizing;\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::{group::GroupEncoding, Ciphersuite};\n\nuse serai_client::{\n  coins::CoinsEvent,\n  in_instructions::InInstructionsEvent,\n  primitives::{BlockHash, ExternalNetworkId},\n  validator_sets::{\n    primitives::{ExternalValidatorSet, ValidatorSet},\n    ValidatorSetsEvent,\n  },\n  Block, Serai, SeraiError, TemporalSerai,\n};\n\nuse serai_db::DbTxn;\n\nuse processor_messages::SubstrateContext;\n\nuse tokio::{sync::mpsc, time::sleep};\n\nuse crate::{\n  Db,\n  processors::Processors,\n  tributary::{TributarySpec, SeraiDkgCompleted},\n};\n\nmod db;\npub use db::*;\n\nmod cosign;\npub use cosign::*;\n\nasync fn in_set(\n  key: &Zeroizing<<Ristretto as Ciphersuite>::F>,\n  serai: &TemporalSerai<'_>,\n  set: ValidatorSet,\n) -> Result<Option<bool>, SeraiError> {\n  let Some(participants) = serai.validator_sets().participants(set.network).await? else {\n    return Ok(None);\n  };\n  let key = (Ristretto::generator() * key.deref()).to_bytes();\n  Ok(Some(participants.iter().any(|(participant, _)| participant.0 == key)))\n}\n\nasync fn handle_new_set<D: Db>(\n  txn: &mut D::Transaction<'_>,\n  key: &Zeroizing<<Ristretto as Ciphersuite>::F>,\n  new_tributary_spec: &mpsc::UnboundedSender<TributarySpec>,\n  serai: &Serai,\n  block: &Block,\n  set: ExternalValidatorSet,\n) -> Result<(), SeraiError> {\n  if in_set(key, &serai.as_of(block.hash()), set.into())\n    .await?\n    .expect(\"NewSet for set which doesn't exist\")\n  {\n    log::info!(\"present in set {:?}\", set);\n\n    let set_data = {\n      let serai = serai.as_of(block.hash());\n      let serai = serai.validator_sets();\n      let set_participants =\n        serai.participants(set.network.into()).await?.expect(\"NewSet for set which doesn't exist\");\n\n      set_participants.into_iter().map(|(k, w)| (k, u16::try_from(w).unwrap())).collect::<Vec<_>>()\n    };\n\n    let time = if let Ok(time) = block.time() {\n      time\n    } else {\n      assert_eq!(block.number(), 0);\n      // Use the next block's time\n      loop {\n        let Ok(Some(res)) = serai.finalized_block_by_number(1).await else {\n          sleep(Duration::from_secs(5)).await;\n          continue;\n        };\n        break res.time().unwrap();\n      }\n    };\n    // The block time is in milliseconds yet the Tributary is in seconds\n    let time = time / 1000;\n    // Since this block is in the past, and Tendermint doesn't play nice with starting chains after\n    // their start time (though it does eventually work), delay the start time by 120 seconds\n    // This is meant to handle ~20 blocks of lack of finalization for this first block\n    const SUBSTRATE_TO_TRIBUTARY_TIME_DELAY: u64 = 120;\n    let time = time + SUBSTRATE_TO_TRIBUTARY_TIME_DELAY;\n\n    let spec = TributarySpec::new(block.hash(), time, set, set_data);\n\n    log::info!(\"creating new tributary for {:?}\", spec.set());\n\n    // Save it to the database now, not on the channel receiver's side, so this is safe against\n    // reboots\n    // If this txn finishes, and we reboot, then this'll be reloaded from active Tributaries\n    // If this txn doesn't finish, this will be re-fired\n    // If we waited to save to the DB, this txn may be finished, preventing re-firing, yet the\n    // prior fired event may have not been received yet\n    crate::ActiveTributaryDb::add_participating_in_tributary(txn, &spec);\n\n    new_tributary_spec.send(spec).unwrap();\n  } else {\n    log::info!(\"not present in new set {:?}\", set);\n  }\n\n  Ok(())\n}\n\nasync fn handle_batch_and_burns<Pro: Processors>(\n  txn: &mut impl DbTxn,\n  processors: &Pro,\n  serai: &Serai,\n  block: &Block,\n) -> Result<(), SeraiError> {\n  // Track which networks had events with a Vec in ordr to preserve the insertion order\n  // While that shouldn't be needed, ensuring order never hurts, and may enable design choices\n  // with regards to Processor <-> Coordinator message passing\n  let mut networks_with_event = vec![];\n  let mut network_had_event = |burns: &mut HashMap<_, _>, batches: &mut HashMap<_, _>, network| {\n    // Don't insert this network multiple times\n    // A Vec is still used in order to maintain the insertion order\n    if !networks_with_event.contains(&network) {\n      networks_with_event.push(network);\n      burns.insert(network, vec![]);\n      batches.insert(network, vec![]);\n    }\n  };\n\n  let mut batch_block = HashMap::new();\n  let mut batches = HashMap::<ExternalNetworkId, Vec<u32>>::new();\n  let mut burns = HashMap::new();\n\n  let serai = serai.as_of(block.hash());\n  for batch in serai.in_instructions().batch_events().await? {\n    if let InInstructionsEvent::Batch { network, id, block: network_block, instructions_hash } =\n      batch\n    {\n      network_had_event(&mut burns, &mut batches, network);\n\n      BatchInstructionsHashDb::set(txn, network, id, &instructions_hash);\n\n      // Make sure this is the only Batch event for this network in this Block\n      assert!(batch_block.insert(network, network_block).is_none());\n\n      // Add the batch included by this block\n      batches.get_mut(&network).unwrap().push(id);\n    } else {\n      panic!(\"Batch event wasn't Batch: {batch:?}\");\n    }\n  }\n\n  for burn in serai.coins().burn_with_instruction_events().await? {\n    if let CoinsEvent::BurnWithInstruction { from: _, instruction } = burn {\n      let network = instruction.balance.coin.network();\n      network_had_event(&mut burns, &mut batches, network);\n\n      // network_had_event should register an entry in burns\n      burns.get_mut(&network).unwrap().push(instruction);\n    } else {\n      panic!(\"Burn event wasn't Burn: {burn:?}\");\n    }\n  }\n\n  assert_eq!(HashSet::<&_>::from_iter(networks_with_event.iter()).len(), networks_with_event.len());\n\n  for network in networks_with_event {\n    let network_latest_finalized_block = if let Some(block) = batch_block.remove(&network) {\n      block\n    } else {\n      // If it's had a batch or a burn, it must have had a block acknowledged\n      serai\n        .in_instructions()\n        .latest_block_for_network(network)\n        .await?\n        .expect(\"network had a batch/burn yet never set a latest block\")\n    };\n\n    processors\n      .send(\n        network,\n        processor_messages::substrate::CoordinatorMessage::SubstrateBlock {\n          context: SubstrateContext {\n            serai_time: block.time().unwrap() / 1000,\n            network_latest_finalized_block,\n          },\n          block: block.number(),\n          burns: burns.remove(&network).unwrap(),\n          batches: batches.remove(&network).unwrap(),\n        },\n      )\n      .await;\n  }\n\n  Ok(())\n}\n\n// Handle a specific Substrate block, returning an error when it fails to get data\n// (not blocking / holding)\n#[allow(clippy::too_many_arguments)]\nasync fn handle_block<D: Db, Pro: Processors>(\n  db: &mut D,\n  key: &Zeroizing<<Ristretto as Ciphersuite>::F>,\n  new_tributary_spec: &mpsc::UnboundedSender<TributarySpec>,\n  perform_slash_report: &mpsc::UnboundedSender<ExternalValidatorSet>,\n  tributary_retired: &mpsc::UnboundedSender<ExternalValidatorSet>,\n  processors: &Pro,\n  serai: &Serai,\n  block: Block,\n) -> Result<(), SeraiError> {\n  let hash = block.hash();\n\n  // Define an indexed event ID.\n  let mut event_id = 0;\n\n  // If a new validator set was activated, create tributary/inform processor to do a DKG\n  for new_set in serai.as_of(hash).validator_sets().new_set_events().await? {\n    // Individually mark each event as handled so on reboot, we minimize duplicates\n    // Additionally, if the Serai connection also fails 1/100 times, this means a block with 1000\n    // events will successfully be incrementally handled\n    // (though the Serai connection should be stable, making this unnecessary)\n    let ValidatorSetsEvent::NewSet { set } = new_set else {\n      panic!(\"NewSet event wasn't NewSet: {new_set:?}\");\n    };\n\n    // We only coordinate/process external networks\n    let Ok(set) = ExternalValidatorSet::try_from(set) else { continue };\n    if HandledEvent::is_unhandled(db, hash, event_id) {\n      log::info!(\"found fresh new set event {:?}\", new_set);\n      let mut txn = db.txn();\n      handle_new_set::<D>(&mut txn, key, new_tributary_spec, serai, &block, set).await?;\n      HandledEvent::handle_event(&mut txn, hash, event_id);\n      txn.commit();\n    }\n    event_id += 1;\n  }\n\n  // If a key pair was confirmed, inform the processor\n  for key_gen in serai.as_of(hash).validator_sets().key_gen_events().await? {\n    if HandledEvent::is_unhandled(db, hash, event_id) {\n      log::info!(\"found fresh key gen event {:?}\", key_gen);\n      let ValidatorSetsEvent::KeyGen { set, key_pair } = key_gen else {\n        panic!(\"KeyGen event wasn't KeyGen: {key_gen:?}\");\n      };\n      let substrate_key = key_pair.0 .0;\n      processors\n        .send(\n          set.network,\n          processor_messages::substrate::CoordinatorMessage::ConfirmKeyPair {\n            context: SubstrateContext {\n              serai_time: block.time().unwrap() / 1000,\n              network_latest_finalized_block: serai\n                .as_of(block.hash())\n                .in_instructions()\n                .latest_block_for_network(set.network)\n                .await?\n                // The processor treats this as a magic value which will cause it to find a network\n                // block which has a time greater than or equal to the Serai time\n                .unwrap_or(BlockHash([0; 32])),\n            },\n            session: set.session,\n            key_pair,\n          },\n        )\n        .await;\n\n      // TODO: If we were in the set, yet were removed, drop the tributary\n\n      let mut txn = db.txn();\n      SeraiDkgCompleted::set(&mut txn, set, &substrate_key);\n      HandledEvent::handle_event(&mut txn, hash, event_id);\n      txn.commit();\n    }\n    event_id += 1;\n  }\n\n  for accepted_handover in serai.as_of(hash).validator_sets().accepted_handover_events().await? {\n    let ValidatorSetsEvent::AcceptedHandover { set } = accepted_handover else {\n      panic!(\"AcceptedHandover event wasn't AcceptedHandover: {accepted_handover:?}\");\n    };\n\n    let Ok(set) = ExternalValidatorSet::try_from(set) else { continue };\n    if HandledEvent::is_unhandled(db, hash, event_id) {\n      log::info!(\"found fresh accepted handover event {:?}\", accepted_handover);\n      // TODO: This isn't atomic with the event handling\n      // Send a oneshot receiver so we can await the response?\n      perform_slash_report.send(set).unwrap();\n      let mut txn = db.txn();\n      HandledEvent::handle_event(&mut txn, hash, event_id);\n      txn.commit();\n    }\n    event_id += 1;\n  }\n\n  for retired_set in serai.as_of(hash).validator_sets().set_retired_events().await? {\n    let ValidatorSetsEvent::SetRetired { set } = retired_set else {\n      panic!(\"SetRetired event wasn't SetRetired: {retired_set:?}\");\n    };\n\n    let Ok(set) = ExternalValidatorSet::try_from(set) else { continue };\n    if HandledEvent::is_unhandled(db, hash, event_id) {\n      log::info!(\"found fresh set retired event {:?}\", retired_set);\n      let mut txn = db.txn();\n      crate::ActiveTributaryDb::retire_tributary(&mut txn, set);\n      tributary_retired.send(set).unwrap();\n      HandledEvent::handle_event(&mut txn, hash, event_id);\n      txn.commit();\n    }\n    event_id += 1;\n  }\n\n  // Finally, tell the processor of acknowledged blocks/burns\n  // This uses a single event as unlike prior events which individually executed code, all\n  // following events share data collection\n  if HandledEvent::is_unhandled(db, hash, event_id) {\n    let mut txn = db.txn();\n    handle_batch_and_burns(&mut txn, processors, serai, &block).await?;\n    HandledEvent::handle_event(&mut txn, hash, event_id);\n    txn.commit();\n  }\n\n  Ok(())\n}\n\n#[allow(clippy::too_many_arguments)]\nasync fn handle_new_blocks<D: Db, Pro: Processors>(\n  db: &mut D,\n  key: &Zeroizing<<Ristretto as Ciphersuite>::F>,\n  new_tributary_spec: &mpsc::UnboundedSender<TributarySpec>,\n  perform_slash_report: &mpsc::UnboundedSender<ExternalValidatorSet>,\n  tributary_retired: &mpsc::UnboundedSender<ExternalValidatorSet>,\n  processors: &Pro,\n  serai: &Serai,\n  next_block: &mut u64,\n) -> Result<(), SeraiError> {\n  // Check if there's been a new Substrate block\n  let latest_number = serai.latest_finalized_block().await?.number();\n\n  // Advance the cosigning protocol\n  advance_cosign_protocol(db, key, serai, latest_number).await?;\n\n  // Reduce to the latest cosigned block\n  let latest_number = latest_number.min(LatestCosignedBlock::latest_cosigned_block(db));\n\n  if latest_number < *next_block {\n    return Ok(());\n  }\n\n  for b in *next_block ..= latest_number {\n    let block = serai\n      .finalized_block_by_number(b)\n      .await?\n      .expect(\"couldn't get block before the latest finalized block\");\n\n    log::info!(\"handling substrate block {b}\");\n    handle_block(\n      db,\n      key,\n      new_tributary_spec,\n      perform_slash_report,\n      tributary_retired,\n      processors,\n      serai,\n      block,\n    )\n    .await?;\n    *next_block += 1;\n\n    let mut txn = db.txn();\n    NextBlock::set(&mut txn, next_block);\n    txn.commit();\n\n    log::info!(\"handled substrate block {b}\");\n  }\n\n  Ok(())\n}\n\npub async fn scan_task<D: Db, Pro: Processors>(\n  mut db: D,\n  key: Zeroizing<<Ristretto as Ciphersuite>::F>,\n  processors: Pro,\n  serai: Arc<Serai>,\n  new_tributary_spec: mpsc::UnboundedSender<TributarySpec>,\n  perform_slash_report: mpsc::UnboundedSender<ExternalValidatorSet>,\n  tributary_retired: mpsc::UnboundedSender<ExternalValidatorSet>,\n) {\n  log::info!(\"scanning substrate\");\n  let mut next_substrate_block = NextBlock::get(&db).unwrap_or_default();\n\n  /*\n  let new_substrate_block_notifier = {\n    let serai = &serai;\n    move || async move {\n      loop {\n        match serai.newly_finalized_block().await {\n          Ok(sub) => return sub,\n          Err(e) => {\n            log::error!(\"couldn't communicate with serai node: {e}\");\n            sleep(Duration::from_secs(5)).await;\n          }\n        }\n      }\n    }\n  };\n  */\n  // TODO: Restore the above subscription-based system\n  // That would require moving serai-client from HTTP to websockets\n  let new_substrate_block_notifier = {\n    let serai = &serai;\n    move |next_substrate_block| async move {\n      loop {\n        match serai.latest_finalized_block().await {\n          Ok(latest) => {\n            if latest.header.number >= next_substrate_block {\n              return latest;\n            }\n            sleep(Duration::from_secs(3)).await;\n          }\n          Err(e) => {\n            log::error!(\"couldn't communicate with serai node: {e}\");\n            sleep(Duration::from_secs(5)).await;\n          }\n        }\n      }\n    }\n  };\n\n  loop {\n    // await the next block, yet if our notifier had an error, re-create it\n    {\n      let Ok(_) = tokio::time::timeout(\n        Duration::from_secs(60),\n        new_substrate_block_notifier(next_substrate_block),\n      )\n      .await\n      else {\n        // Timed out, which may be because Serai isn't finalizing or may be some issue with the\n        // notifier\n        if serai.latest_finalized_block().await.map(|block| block.number()).ok() ==\n          Some(next_substrate_block.saturating_sub(1))\n        {\n          log::info!(\"serai hasn't finalized a block in the last 60s...\");\n        }\n        continue;\n      };\n\n      /*\n      // next_block is a Option<Result>\n      if next_block.and_then(Result::ok).is_none() {\n        substrate_block_notifier = new_substrate_block_notifier(next_substrate_block);\n        continue;\n      }\n      */\n    }\n\n    match handle_new_blocks(\n      &mut db,\n      &key,\n      &new_tributary_spec,\n      &perform_slash_report,\n      &tributary_retired,\n      &processors,\n      &serai,\n      &mut next_substrate_block,\n    )\n    .await\n    {\n      Ok(()) => {}\n      Err(e) => {\n        log::error!(\"couldn't communicate with serai node: {e}\");\n        sleep(Duration::from_secs(5)).await;\n      }\n    }\n  }\n}\n\n/// Gets the expected ID for the next Batch.\n///\n/// Will log an error and apply a slight sleep on error, letting the caller simply immediately\n/// retry.\npub(crate) async fn expected_next_batch(\n  serai: &Serai,\n  network: ExternalNetworkId,\n) -> Result<u32, SeraiError> {\n  async fn expected_next_batch_inner(\n    serai: &Serai,\n    network: ExternalNetworkId,\n  ) -> Result<u32, SeraiError> {\n    let serai = serai.as_of_latest_finalized_block().await?;\n    let last = serai.in_instructions().last_batch_for_network(network).await?;\n    Ok(if let Some(last) = last { last + 1 } else { 0 })\n  }\n  match expected_next_batch_inner(serai, network).await {\n    Ok(next) => Ok(next),\n    Err(e) => {\n      log::error!(\"couldn't get the expected next batch from substrate: {e:?}\");\n      sleep(Duration::from_millis(100)).await;\n      Err(e)\n    }\n  }\n}\n\n/// Verifies `Batch`s which have already been indexed from Substrate.\n///\n/// Spins if a distinct `Batch` is detected on-chain.\n///\n/// This has a slight malleability in that doesn't verify *who* published a `Batch` is as expected.\n/// This is deemed fine.\npub(crate) async fn verify_published_batches<D: Db>(\n  txn: &mut D::Transaction<'_>,\n  network: ExternalNetworkId,\n  optimistic_up_to: u32,\n) -> Option<u32> {\n  // TODO: Localize from MainDb to SubstrateDb\n  let last = crate::LastVerifiedBatchDb::get(txn, network);\n  for id in last.map_or(0, |last| last + 1) ..= optimistic_up_to {\n    let Some(on_chain) = BatchInstructionsHashDb::get(txn, network, id) else {\n      break;\n    };\n    let off_chain = crate::ExpectedBatchDb::get(txn, network, id).unwrap();\n    if on_chain != off_chain {\n      // Halt operations on this network and spin, as this is a critical fault\n      loop {\n        log::error!(\n          \"{}! network: {:?} id: {} off-chain: {} on-chain: {}\",\n          \"on-chain batch doesn't match off-chain\",\n          network,\n          id,\n          hex::encode(off_chain),\n          hex::encode(on_chain),\n        );\n        sleep(Duration::from_secs(60)).await;\n      }\n    }\n    crate::LastVerifiedBatchDb::set(txn, network, &id);\n  }\n\n  crate::LastVerifiedBatchDb::get(txn, network)\n}\n"
  },
  {
    "path": "coordinator/src/tests/mod.rs",
    "content": "use core::fmt::Debug;\nuse std::{\n  sync::Arc,\n  collections::{VecDeque, HashSet, HashMap},\n};\n\nuse serai_client::{primitives::ExternalNetworkId, validator_sets::primitives::ExternalValidatorSet};\n\nuse processor_messages::CoordinatorMessage;\n\nuse async_trait::async_trait;\n\nuse tokio::sync::RwLock;\n\nuse crate::{\n  processors::{Message, Processors},\n  TributaryP2p, ReqResMessageKind, GossipMessageKind, P2pMessageKind, Message as P2pMessage, P2p,\n};\n\npub mod tributary;\n\n#[derive(Clone)]\npub struct MemProcessors(pub Arc<RwLock<HashMap<ExternalNetworkId, VecDeque<CoordinatorMessage>>>>);\nimpl MemProcessors {\n  #[allow(clippy::new_without_default)]\n  pub fn new() -> MemProcessors {\n    MemProcessors(Arc::new(RwLock::new(HashMap::new())))\n  }\n}\n\n#[async_trait::async_trait]\nimpl Processors for MemProcessors {\n  async fn send(&self, network: ExternalNetworkId, msg: impl Send + Into<CoordinatorMessage>) {\n    let mut processors = self.0.write().await;\n    let processor = processors.entry(network).or_insert_with(VecDeque::new);\n    processor.push_back(msg.into());\n  }\n  async fn recv(&self, _: ExternalNetworkId) -> Message {\n    todo!()\n  }\n  async fn ack(&self, _: Message) {\n    todo!()\n  }\n}\n\n#[allow(clippy::type_complexity)]\n#[derive(Clone, Debug)]\npub struct LocalP2p(\n  usize,\n  pub Arc<RwLock<(HashSet<Vec<u8>>, Vec<VecDeque<(usize, P2pMessageKind, Vec<u8>)>>)>>,\n);\n\nimpl LocalP2p {\n  pub fn new(validators: usize) -> Vec<LocalP2p> {\n    let shared = Arc::new(RwLock::new((HashSet::new(), vec![VecDeque::new(); validators])));\n    let mut res = vec![];\n    for i in 0 .. validators {\n      res.push(LocalP2p(i, shared.clone()));\n    }\n    res\n  }\n}\n\n#[async_trait]\nimpl P2p for LocalP2p {\n  type Id = usize;\n\n  async fn subscribe(&self, _set: ExternalValidatorSet, _genesis: [u8; 32]) {}\n  async fn unsubscribe(&self, _set: ExternalValidatorSet, _genesis: [u8; 32]) {}\n\n  async fn send_raw(&self, to: Self::Id, msg: Vec<u8>) {\n    let mut msg_ref = msg.as_slice();\n    let kind = ReqResMessageKind::read(&mut msg_ref).unwrap();\n    self.1.write().await.1[to].push_back((self.0, P2pMessageKind::ReqRes(kind), msg_ref.to_vec()));\n  }\n\n  async fn broadcast_raw(&self, kind: P2pMessageKind, msg: Vec<u8>) {\n    // Content-based deduplication\n    let mut lock = self.1.write().await;\n    {\n      let already_sent = &mut lock.0;\n      if already_sent.contains(&msg) {\n        return;\n      }\n      already_sent.insert(msg.clone());\n    }\n    let queues = &mut lock.1;\n\n    let kind_len = (match kind {\n      P2pMessageKind::ReqRes(kind) => kind.serialize(),\n      P2pMessageKind::Gossip(kind) => kind.serialize(),\n    })\n    .len();\n    let msg = msg[kind_len ..].to_vec();\n\n    for (i, msg_queue) in queues.iter_mut().enumerate() {\n      if i == self.0 {\n        continue;\n      }\n      msg_queue.push_back((self.0, kind, msg.clone()));\n    }\n  }\n\n  async fn receive(&self) -> P2pMessage<Self> {\n    // This is a cursed way to implement an async read from a Vec\n    loop {\n      if let Some((sender, kind, msg)) = self.1.write().await.1[self.0].pop_front() {\n        return P2pMessage { sender, kind, msg };\n      }\n      tokio::time::sleep(std::time::Duration::from_millis(100)).await;\n    }\n  }\n}\n\n#[async_trait]\nimpl TributaryP2p for LocalP2p {\n  async fn broadcast(&self, genesis: [u8; 32], msg: Vec<u8>) {\n    <Self as P2p>::broadcast(\n      self,\n      P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)),\n      msg,\n    )\n    .await\n  }\n}\n"
  },
  {
    "path": "coordinator/src/tests/tributary/chain.rs",
    "content": "use std::{\n  time::{Duration, SystemTime},\n  collections::HashSet,\n};\n\nuse zeroize::Zeroizing;\nuse rand_core::{RngCore, CryptoRng, OsRng};\nuse futures_util::{task::Poll, poll};\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::{\n  group::{ff::Field, GroupEncoding},\n  Ciphersuite,\n};\n\nuse sp_application_crypto::sr25519;\nuse borsh::BorshDeserialize;\nuse serai_client::{\n  primitives::ExternalNetworkId,\n  validator_sets::primitives::{ExternalValidatorSet, Session},\n};\n\nuse tokio::time::sleep;\n\nuse serai_db::MemDb;\n\nuse tributary::Tributary;\n\nuse crate::{\n  GossipMessageKind, P2pMessageKind, P2p,\n  tributary::{Transaction, TributarySpec},\n  tests::LocalP2p,\n};\n\npub fn new_keys<R: RngCore + CryptoRng>(\n  rng: &mut R,\n) -> Vec<Zeroizing<<Ristretto as Ciphersuite>::F>> {\n  let mut keys = vec![];\n  for _ in 0 .. 5 {\n    keys.push(Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut *rng)));\n  }\n  keys\n}\n\npub fn new_spec<R: RngCore + CryptoRng>(\n  rng: &mut R,\n  keys: &[Zeroizing<<Ristretto as Ciphersuite>::F>],\n) -> TributarySpec {\n  let mut serai_block = [0; 32];\n  rng.fill_bytes(&mut serai_block);\n\n  let start_time = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs();\n\n  let set = ExternalValidatorSet { session: Session(0), network: ExternalNetworkId::Bitcoin };\n\n  let set_participants = keys\n    .iter()\n    .map(|key| {\n      (sr25519::Public::from((<Ristretto as Ciphersuite>::generator() * **key).to_bytes()), 1)\n    })\n    .collect::<Vec<_>>();\n\n  let res = TributarySpec::new(serai_block, start_time, set, set_participants);\n  assert_eq!(\n    TributarySpec::deserialize_reader(&mut borsh::to_vec(&res).unwrap().as_slice()).unwrap(),\n    res,\n  );\n  res\n}\n\npub async fn new_tributaries(\n  keys: &[Zeroizing<<Ristretto as Ciphersuite>::F>],\n  spec: &TributarySpec,\n) -> Vec<(MemDb, LocalP2p, Tributary<MemDb, Transaction, LocalP2p>)> {\n  let p2p = LocalP2p::new(keys.len());\n  let mut res = vec![];\n  for (i, key) in keys.iter().enumerate() {\n    let db = MemDb::new();\n    res.push((\n      db.clone(),\n      p2p[i].clone(),\n      Tributary::<_, Transaction, _>::new(\n        db,\n        spec.genesis(),\n        spec.start_time(),\n        key.clone(),\n        spec.validators(),\n        p2p[i].clone(),\n      )\n      .await\n      .unwrap(),\n    ));\n  }\n  res\n}\n\npub async fn run_tributaries(\n  mut tributaries: Vec<(LocalP2p, Tributary<MemDb, Transaction, LocalP2p>)>,\n) {\n  loop {\n    for (p2p, tributary) in &mut tributaries {\n      while let Poll::Ready(msg) = poll!(p2p.receive()) {\n        match msg.kind {\n          P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => {\n            assert_eq!(genesis, tributary.genesis());\n            if tributary.handle_message(&msg.msg).await {\n              p2p.broadcast(msg.kind, msg.msg).await;\n            }\n          }\n          _ => panic!(\"unexpected p2p message found\"),\n        }\n      }\n    }\n\n    sleep(Duration::from_millis(100)).await;\n  }\n}\n\npub async fn wait_for_tx_inclusion(\n  tributary: &Tributary<MemDb, Transaction, LocalP2p>,\n  mut last_checked: [u8; 32],\n  hash: [u8; 32],\n) -> [u8; 32] {\n  let reader = tributary.reader();\n  loop {\n    let tip = tributary.tip().await;\n    if tip == last_checked {\n      sleep(Duration::from_secs(1)).await;\n      continue;\n    }\n\n    let mut queue = vec![reader.block(&tip).unwrap()];\n    let mut block = None;\n    while {\n      let parent = queue.last().unwrap().parent();\n      if parent == tributary.genesis() {\n        false\n      } else {\n        block = Some(reader.block(&parent).unwrap());\n        block.as_ref().unwrap().hash() != last_checked\n      }\n    } {\n      queue.push(block.take().unwrap());\n    }\n\n    while let Some(block) = queue.pop() {\n      for tx in &block.transactions {\n        if tx.hash() == hash {\n          return block.hash();\n        }\n      }\n    }\n\n    last_checked = tip;\n  }\n}\n\n#[tokio::test]\nasync fn tributary_test() {\n  let keys = new_keys(&mut OsRng);\n  let spec = new_spec(&mut OsRng, &keys);\n\n  let mut tributaries = new_tributaries(&keys, &spec)\n    .await\n    .into_iter()\n    .map(|(_, p2p, tributary)| (p2p, tributary))\n    .collect::<Vec<_>>();\n\n  let mut blocks = 0;\n  let mut last_block = spec.genesis();\n\n  // Doesn't use run_tributaries as we want to wind these down at a certain point\n  // run_tributaries will run them ad infinitum\n  let timeout = SystemTime::now() + Duration::from_secs(65);\n  while (blocks < 10) && (SystemTime::now().duration_since(timeout).is_err()) {\n    for (p2p, tributary) in &mut tributaries {\n      while let Poll::Ready(msg) = poll!(p2p.receive()) {\n        match msg.kind {\n          P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => {\n            assert_eq!(genesis, tributary.genesis());\n            tributary.handle_message(&msg.msg).await;\n          }\n          _ => panic!(\"unexpected p2p message found\"),\n        }\n      }\n    }\n\n    let tip = tributaries[0].1.tip().await;\n    if tip != last_block {\n      last_block = tip;\n      blocks += 1;\n    }\n\n    sleep(Duration::from_millis(100)).await;\n  }\n\n  if blocks != 10 {\n    panic!(\"tributary chain test hit timeout\");\n  }\n\n  // Handle all existing messages\n  for (p2p, tributary) in &mut tributaries {\n    while let Poll::Ready(msg) = poll!(p2p.receive()) {\n      match msg.kind {\n        P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => {\n          assert_eq!(genesis, tributary.genesis());\n          tributary.handle_message(&msg.msg).await;\n        }\n        _ => panic!(\"unexpected p2p message found\"),\n      }\n    }\n  }\n\n  // handle_message informed the Tendermint machine, yet it still has to process it\n  // Sleep for a second accordingly\n  // TODO: Is there a better way to handle this?\n  sleep(Duration::from_secs(1)).await;\n\n  // All tributaries should agree on the tip, within a block\n  let mut tips = HashSet::new();\n  for (_, tributary) in &tributaries {\n    tips.insert(tributary.tip().await);\n  }\n  assert!(tips.len() <= 2);\n  if tips.len() == 2 {\n    for tip in &tips {\n      // Find a Tributary where this isn't the tip\n      for (_, tributary) in &tributaries {\n        let Some(after) = tributary.reader().block_after(tip) else { continue };\n        // Make sure the block after is the other tip\n        assert!(tips.contains(&after));\n        return;\n      }\n    }\n  } else {\n    assert_eq!(tips.len(), 1);\n    return;\n  }\n  panic!(\"tributary had different tip with a variance exceeding one block\");\n}\n"
  },
  {
    "path": "coordinator/src/tests/tributary/dkg.rs",
    "content": "use core::time::Duration;\nuse std::collections::HashMap;\n\nuse zeroize::Zeroizing;\nuse rand_core::{RngCore, OsRng};\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::{group::GroupEncoding, Ciphersuite};\nuse frost::Participant;\n\nuse sp_runtime::traits::Verify;\nuse serai_client::{\n  primitives::{SeraiAddress, Signature},\n  validator_sets::primitives::{ExternalValidatorSet, KeyPair},\n};\n\nuse tokio::time::sleep;\n\nuse serai_db::{Get, DbTxn, Db, MemDb};\n\nuse processor_messages::{\n  key_gen::{self, KeyGenId},\n  CoordinatorMessage,\n};\n\nuse tributary::{TransactionTrait, Tributary};\n\nuse crate::{\n  tributary::{\n    Transaction, TributarySpec,\n    scanner::{PublishSeraiTransaction, handle_new_blocks},\n  },\n  tests::{\n    MemProcessors, LocalP2p,\n    tributary::{new_keys, new_spec, new_tributaries, run_tributaries, wait_for_tx_inclusion},\n  },\n};\n\n#[tokio::test]\nasync fn dkg_test() {\n  env_logger::init();\n\n  let keys = new_keys(&mut OsRng);\n  let spec = new_spec(&mut OsRng, &keys);\n\n  let full_tributaries = new_tributaries(&keys, &spec).await;\n  let mut dbs = vec![];\n  let mut tributaries = vec![];\n  for (db, p2p, tributary) in full_tributaries {\n    dbs.push(db);\n    tributaries.push((p2p, tributary));\n  }\n\n  // Run the tributaries in the background\n  tokio::spawn(run_tributaries(tributaries.clone()));\n\n  let mut txs = vec![];\n  // Create DKG commitments for each key\n  for key in &keys {\n    let attempt = 0;\n    let mut commitments = vec![0; 256];\n    OsRng.fill_bytes(&mut commitments);\n\n    let mut tx = Transaction::DkgCommitments {\n      attempt,\n      commitments: vec![commitments],\n      signed: Transaction::empty_signed(),\n    };\n    tx.sign(&mut OsRng, spec.genesis(), key);\n    txs.push(tx);\n  }\n\n  let block_before_tx = tributaries[0].1.tip().await;\n\n  // Publish all commitments but one\n  for (i, tx) in txs.iter().enumerate().skip(1) {\n    assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true));\n  }\n\n  // Wait until these are included\n  for tx in txs.iter().skip(1) {\n    wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;\n  }\n\n  let expected_commitments: HashMap<_, _> = txs\n    .iter()\n    .enumerate()\n    .map(|(i, tx)| {\n      if let Transaction::DkgCommitments { commitments, .. } = tx {\n        (Participant::new((i + 1).try_into().unwrap()).unwrap(), commitments[0].clone())\n      } else {\n        panic!(\"txs had non-commitments\");\n      }\n    })\n    .collect();\n\n  async fn new_processors(\n    db: &mut MemDb,\n    key: &Zeroizing<<Ristretto as Ciphersuite>::F>,\n    spec: &TributarySpec,\n    tributary: &Tributary<MemDb, Transaction, LocalP2p>,\n  ) -> MemProcessors {\n    let processors = MemProcessors::new();\n    handle_new_blocks::<_, _, _, _, _, LocalP2p>(\n      db,\n      key,\n      &|_, _, _, _| async {\n        panic!(\"provided TX caused recognized_id to be called in new_processors\")\n      },\n      &processors,\n      &(),\n      &|_| async {\n        panic!(\n          \"test tried to publish a new Tributary TX from handle_application_tx in new_processors\"\n        )\n      },\n      spec,\n      &tributary.reader(),\n    )\n    .await;\n    processors\n  }\n\n  // Instantiate a scanner and verify it has nothing to report\n  let processors = new_processors(&mut dbs[0], &keys[0], &spec, &tributaries[0].1).await;\n  assert!(processors.0.read().await.is_empty());\n\n  // Publish the last commitment\n  let block_before_tx = tributaries[0].1.tip().await;\n  assert_eq!(tributaries[0].1.add_transaction(txs[0].clone()).await, Ok(true));\n  wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, txs[0].hash()).await;\n  sleep(Duration::from_secs(Tributary::<MemDb, Transaction, LocalP2p>::block_time().into())).await;\n\n  // Verify the scanner emits a KeyGen::Commitments message\n  handle_new_blocks::<_, _, _, _, _, LocalP2p>(\n    &mut dbs[0],\n    &keys[0],\n    &|_, _, _, _| async {\n      panic!(\"provided TX caused recognized_id to be called after Commitments\")\n    },\n    &processors,\n    &(),\n    &|_| async {\n      panic!(\n        \"test tried to publish a new Tributary TX from handle_application_tx after Commitments\"\n      )\n    },\n    &spec,\n    &tributaries[0].1.reader(),\n  )\n  .await;\n  {\n    let mut msgs = processors.0.write().await;\n    assert_eq!(msgs.len(), 1);\n    let msgs = msgs.get_mut(&spec.set().network).unwrap();\n    let mut expected_commitments = expected_commitments.clone();\n    expected_commitments.remove(&Participant::new((1).try_into().unwrap()).unwrap());\n    assert_eq!(\n      msgs.pop_front().unwrap(),\n      CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {\n        id: KeyGenId { session: spec.set().session, attempt: 0 },\n        commitments: expected_commitments\n      })\n    );\n    assert!(msgs.is_empty());\n  }\n\n  // Verify all keys exhibit this scanner behavior\n  for (i, key) in keys.iter().enumerate().skip(1) {\n    let processors = new_processors(&mut dbs[i], key, &spec, &tributaries[i].1).await;\n    let mut msgs = processors.0.write().await;\n    assert_eq!(msgs.len(), 1);\n    let msgs = msgs.get_mut(&spec.set().network).unwrap();\n    let mut expected_commitments = expected_commitments.clone();\n    expected_commitments.remove(&Participant::new((i + 1).try_into().unwrap()).unwrap());\n    assert_eq!(\n      msgs.pop_front().unwrap(),\n      CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {\n        id: KeyGenId { session: spec.set().session, attempt: 0 },\n        commitments: expected_commitments\n      })\n    );\n    assert!(msgs.is_empty());\n  }\n\n  // Now do shares\n  let mut txs = vec![];\n  for (k, key) in keys.iter().enumerate() {\n    let attempt = 0;\n\n    let mut shares = vec![vec![]];\n    for i in 0 .. keys.len() {\n      if i != k {\n        let mut share = vec![0; 256];\n        OsRng.fill_bytes(&mut share);\n        shares.last_mut().unwrap().push(share);\n      }\n    }\n\n    let mut txn = dbs[k].txn();\n    let mut tx = Transaction::DkgShares {\n      attempt,\n      shares,\n      confirmation_nonces: crate::tributary::dkg_confirmation_nonces(key, &spec, &mut txn, 0),\n      signed: Transaction::empty_signed(),\n    };\n    txn.commit();\n    tx.sign(&mut OsRng, spec.genesis(), key);\n    txs.push(tx);\n  }\n\n  let block_before_tx = tributaries[0].1.tip().await;\n  for (i, tx) in txs.iter().enumerate().skip(1) {\n    assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true));\n  }\n  for tx in txs.iter().skip(1) {\n    wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;\n  }\n\n  // With just 4 sets of shares, nothing should happen yet\n  handle_new_blocks::<_, _, _, _, _, LocalP2p>(\n    &mut dbs[0],\n    &keys[0],\n    &|_, _, _, _| async {\n      panic!(\"provided TX caused recognized_id to be called after some shares\")\n    },\n    &processors,\n    &(),\n    &|_| async {\n      panic!(\n        \"test tried to publish a new Tributary TX from handle_application_tx after some shares\"\n      )\n    },\n    &spec,\n    &tributaries[0].1.reader(),\n  )\n  .await;\n  assert_eq!(processors.0.read().await.len(), 1);\n  assert!(processors.0.read().await[&spec.set().network].is_empty());\n\n  // Publish the final set of shares\n  let block_before_tx = tributaries[0].1.tip().await;\n  assert_eq!(tributaries[0].1.add_transaction(txs[0].clone()).await, Ok(true));\n  wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, txs[0].hash()).await;\n  sleep(Duration::from_secs(Tributary::<MemDb, Transaction, LocalP2p>::block_time().into())).await;\n\n  // Each scanner should emit a distinct shares message\n  let shares_for = |i: usize| {\n    CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Shares {\n      id: KeyGenId { session: spec.set().session, attempt: 0 },\n      shares: vec![txs\n        .iter()\n        .enumerate()\n        .filter_map(|(l, tx)| {\n          if let Transaction::DkgShares { shares, .. } = tx {\n            if i == l {\n              None\n            } else {\n              let relative_i = i - (if i > l { 1 } else { 0 });\n              Some((\n                Participant::new((l + 1).try_into().unwrap()).unwrap(),\n                shares[0][relative_i].clone(),\n              ))\n            }\n          } else {\n            panic!(\"txs had non-shares\");\n          }\n        })\n        .collect::<HashMap<_, _>>()],\n    })\n  };\n\n  // Any scanner which has handled the prior blocks should only emit the new event\n  for (i, key) in keys.iter().enumerate() {\n    handle_new_blocks::<_, _, _, _, _, LocalP2p>(\n      &mut dbs[i],\n      key,\n      &|_, _, _, _| async { panic!(\"provided TX caused recognized_id to be called after shares\") },\n      &processors,\n      &(),\n      &|_| async { panic!(\"test tried to publish a new Tributary TX from handle_application_tx\") },\n      &spec,\n      &tributaries[i].1.reader(),\n    )\n    .await;\n    {\n      let mut msgs = processors.0.write().await;\n      assert_eq!(msgs.len(), 1);\n      let msgs = msgs.get_mut(&spec.set().network).unwrap();\n      assert_eq!(msgs.pop_front().unwrap(), shares_for(i));\n      assert!(msgs.is_empty());\n    }\n  }\n\n  // Yet new scanners should emit all events\n  for (i, key) in keys.iter().enumerate() {\n    let processors = new_processors(&mut MemDb::new(), key, &spec, &tributaries[i].1).await;\n    let mut msgs = processors.0.write().await;\n    assert_eq!(msgs.len(), 1);\n    let msgs = msgs.get_mut(&spec.set().network).unwrap();\n    let mut expected_commitments = expected_commitments.clone();\n    expected_commitments.remove(&Participant::new((i + 1).try_into().unwrap()).unwrap());\n    assert_eq!(\n      msgs.pop_front().unwrap(),\n      CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {\n        id: KeyGenId { session: spec.set().session, attempt: 0 },\n        commitments: expected_commitments\n      })\n    );\n    assert_eq!(msgs.pop_front().unwrap(), shares_for(i));\n    assert!(msgs.is_empty());\n  }\n\n  // Send DkgConfirmed\n  let mut substrate_key = [0; 32];\n  OsRng.fill_bytes(&mut substrate_key);\n  let mut network_key = vec![0; usize::try_from((OsRng.next_u64() % 32) + 32).unwrap()];\n  OsRng.fill_bytes(&mut network_key);\n  let key_pair =\n    KeyPair(serai_client::Public::from(substrate_key), network_key.try_into().unwrap());\n\n  let mut txs = vec![];\n  for (i, key) in keys.iter().enumerate() {\n    let attempt = 0;\n    let mut txn = dbs[i].txn();\n    let share =\n      crate::tributary::generated_key_pair::<MemDb>(&mut txn, key, &spec, &key_pair, 0).unwrap();\n    txn.commit();\n\n    let mut tx = Transaction::DkgConfirmed {\n      attempt,\n      confirmation_share: share,\n      signed: Transaction::empty_signed(),\n    };\n    tx.sign(&mut OsRng, spec.genesis(), key);\n    txs.push(tx);\n  }\n  let block_before_tx = tributaries[0].1.tip().await;\n  for (i, tx) in txs.iter().enumerate() {\n    assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true));\n  }\n  for tx in &txs {\n    wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;\n  }\n\n  struct CheckPublishSetKeys {\n    spec: TributarySpec,\n    key_pair: KeyPair,\n  }\n  #[async_trait::async_trait]\n  impl PublishSeraiTransaction for CheckPublishSetKeys {\n    async fn publish_set_keys(\n      &self,\n      _db: &(impl Sync + Get),\n      set: ExternalValidatorSet,\n      removed: Vec<SeraiAddress>,\n      key_pair: KeyPair,\n      signature: Signature,\n    ) {\n      assert_eq!(set, self.spec.set());\n      assert!(removed.is_empty());\n      assert_eq!(self.key_pair, key_pair);\n      assert!(signature.verify(\n        &*serai_client::validator_sets::primitives::set_keys_message(&set, &[], &key_pair),\n        &serai_client::Public::from(\n          dkg_musig::musig_key_vartime::<Ristretto>(\n            serai_client::validator_sets::primitives::musig_context(set.into()),\n            &self.spec.validators().into_iter().map(|(validator, _)| validator).collect::<Vec<_>>()\n          )\n          .unwrap()\n          .to_bytes()\n        ),\n      ));\n    }\n  }\n\n  // The scanner should successfully try to publish a transaction with a validly signed signature\n  handle_new_blocks::<_, _, _, _, _, LocalP2p>(\n    &mut dbs[0],\n    &keys[0],\n    &|_, _, _, _| async {\n      panic!(\"provided TX caused recognized_id to be called after DKG confirmation\")\n    },\n    &processors,\n    &CheckPublishSetKeys { spec: spec.clone(), key_pair: key_pair.clone() },\n    &|_| async { panic!(\"test tried to publish a new Tributary TX from handle_application_tx\") },\n    &spec,\n    &tributaries[0].1.reader(),\n  )\n  .await;\n  {\n    assert!(processors.0.read().await.get(&spec.set().network).unwrap().is_empty());\n  }\n}\n"
  },
  {
    "path": "coordinator/src/tests/tributary/handle_p2p.rs",
    "content": "use core::time::Duration;\nuse std::sync::Arc;\n\nuse rand_core::OsRng;\n\nuse tokio::{\n  sync::{mpsc, broadcast},\n  time::sleep,\n};\n\nuse serai_db::MemDb;\n\nuse tributary::Tributary;\n\nuse crate::{\n  tributary::Transaction,\n  ActiveTributary, TributaryEvent,\n  p2p::handle_p2p_task,\n  tests::{\n    LocalP2p,\n    tributary::{new_keys, new_spec, new_tributaries},\n  },\n};\n\n#[tokio::test]\nasync fn handle_p2p_test() {\n  let keys = new_keys(&mut OsRng);\n  let spec = new_spec(&mut OsRng, &keys);\n\n  let mut tributaries = new_tributaries(&keys, &spec)\n    .await\n    .into_iter()\n    .map(|(_, p2p, tributary)| (p2p, tributary))\n    .collect::<Vec<_>>();\n\n  let mut tributary_senders = vec![];\n  let mut tributary_arcs = vec![];\n  for (p2p, tributary) in tributaries.drain(..) {\n    let tributary = Arc::new(tributary);\n    tributary_arcs.push(tributary.clone());\n    let (new_tributary_send, new_tributary_recv) = broadcast::channel(5);\n    let (cosign_send, _) = mpsc::unbounded_channel();\n    tokio::spawn(handle_p2p_task(p2p, cosign_send, new_tributary_recv));\n    new_tributary_send\n      .send(TributaryEvent::NewTributary(ActiveTributary { spec: spec.clone(), tributary }))\n      .map_err(|_| \"failed to send ActiveTributary\")\n      .unwrap();\n    tributary_senders.push(new_tributary_send);\n  }\n  let tributaries = tributary_arcs;\n\n  // After two blocks of time, we should have a new block\n  // We don't wait one block of time as we may have missed the chance for this block\n  sleep(Duration::from_secs((2 * Tributary::<MemDb, Transaction, LocalP2p>::block_time()).into()))\n    .await;\n  let tip = tributaries[0].tip().await;\n  assert!(tip != spec.genesis());\n\n  // Sleep one second to make sure this block propagates\n  sleep(Duration::from_secs(1)).await;\n  // Make sure every tributary has it\n  for tributary in &tributaries {\n    assert!(tributary.reader().block(&tip).is_some());\n  }\n\n  // Then after another block of time, we should have yet another new block\n  sleep(Duration::from_secs(Tributary::<MemDb, Transaction, LocalP2p>::block_time().into())).await;\n  let new_tip = tributaries[0].tip().await;\n  assert!(new_tip != tip);\n  sleep(Duration::from_secs(1)).await;\n  for tributary in tributaries {\n    assert!(tributary.reader().block(&new_tip).is_some());\n  }\n}\n"
  },
  {
    "path": "coordinator/src/tests/tributary/mod.rs",
    "content": "use core::fmt::Debug;\n\nuse rand_core::{RngCore, OsRng};\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::{group::Group, Ciphersuite};\n\nuse scale::{Encode, Decode};\nuse serai_client::{\n  primitives::{SeraiAddress, Signature},\n  validator_sets::primitives::{ExternalValidatorSet, KeyPair, MAX_KEY_SHARES_PER_SET},\n};\nuse processor_messages::coordinator::SubstrateSignableId;\n\nuse tributary::{ReadWrite, tests::random_signed_with_nonce};\n\nuse crate::tributary::{Label, SignData, Transaction, scanner::PublishSeraiTransaction};\n\nmod chain;\npub use chain::*;\n\nmod tx;\n\nmod dkg;\n// TODO: Test the other transactions\n\nmod handle_p2p;\nmod sync;\n\n#[async_trait::async_trait]\nimpl PublishSeraiTransaction for () {\n  async fn publish_set_keys(\n    &self,\n    _db: &(impl Sync + serai_db::Get),\n    _set: ExternalValidatorSet,\n    _removed: Vec<SeraiAddress>,\n    _key_pair: KeyPair,\n    _signature: Signature,\n  ) {\n    panic!(\"publish_set_keys was called in test\")\n  }\n}\n\nfn random_u32<R: RngCore>(rng: &mut R) -> u32 {\n  u32::try_from(rng.next_u64() >> 32).unwrap()\n}\n\nfn random_vec<R: RngCore>(rng: &mut R, limit: usize) -> Vec<u8> {\n  let len = usize::try_from(rng.next_u64() % u64::try_from(limit).unwrap()).unwrap();\n  let mut res = vec![0; len];\n  rng.fill_bytes(&mut res);\n  res\n}\n\nfn random_sign_data<R: RngCore, Id: Clone + PartialEq + Eq + Debug + Encode + Decode>(\n  rng: &mut R,\n  plan: Id,\n  label: Label,\n) -> SignData<Id> {\n  SignData {\n    plan,\n    attempt: random_u32(&mut OsRng),\n    label,\n\n    data: {\n      let mut res = vec![];\n      for _ in 0 ..= (rng.next_u64() % 255) {\n        res.push(random_vec(&mut OsRng, 512));\n      }\n      res\n    },\n\n    signed: random_signed_with_nonce(&mut OsRng, label.nonce()),\n  }\n}\n\nfn test_read_write<RW: Eq + Debug + ReadWrite>(value: &RW) {\n  assert_eq!(value, &RW::read::<&[u8]>(&mut value.serialize().as_ref()).unwrap());\n}\n\n#[test]\nfn tx_size_limit() {\n  use serai_client::validator_sets::primitives::MAX_KEY_LEN;\n\n  use tributary::TRANSACTION_SIZE_LIMIT;\n\n  let max_dkg_coefficients = (MAX_KEY_SHARES_PER_SET * 2).div_ceil(3) + 1;\n  let max_key_shares_per_individual = MAX_KEY_SHARES_PER_SET - max_dkg_coefficients;\n  // Handwave the DKG Commitments size as the size of the commitments to the coefficients and\n  // 1024 bytes for all overhead\n  let handwaved_dkg_commitments_size = (max_dkg_coefficients * MAX_KEY_LEN) + 1024;\n  assert!(\n    u32::try_from(TRANSACTION_SIZE_LIMIT).unwrap() >=\n      (handwaved_dkg_commitments_size * max_key_shares_per_individual)\n  );\n\n  // Encryption key, PoP (2 elements), message\n  let elements_per_share = 4;\n  let handwaved_dkg_shares_size =\n    (elements_per_share * MAX_KEY_LEN * MAX_KEY_SHARES_PER_SET) + 1024;\n  assert!(\n    u32::try_from(TRANSACTION_SIZE_LIMIT).unwrap() >=\n      (handwaved_dkg_shares_size * max_key_shares_per_individual)\n  );\n}\n\n#[test]\nfn serialize_sign_data() {\n  fn test_read_write<Id: Clone + PartialEq + Eq + Debug + Encode + Decode>(value: &SignData<Id>) {\n    let mut buf = vec![];\n    value.write(&mut buf).unwrap();\n    assert_eq!(value, &SignData::read(&mut buf.as_slice()).unwrap())\n  }\n\n  let mut plan = [0; 3];\n  OsRng.fill_bytes(&mut plan);\n  test_read_write(&random_sign_data::<_, _>(\n    &mut OsRng,\n    plan,\n    if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share },\n  ));\n  let mut plan = [0; 5];\n  OsRng.fill_bytes(&mut plan);\n  test_read_write(&random_sign_data::<_, _>(\n    &mut OsRng,\n    plan,\n    if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share },\n  ));\n  let mut plan = [0; 8];\n  OsRng.fill_bytes(&mut plan);\n  test_read_write(&random_sign_data::<_, _>(\n    &mut OsRng,\n    plan,\n    if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share },\n  ));\n  let mut plan = [0; 24];\n  OsRng.fill_bytes(&mut plan);\n  test_read_write(&random_sign_data::<_, _>(\n    &mut OsRng,\n    plan,\n    if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share },\n  ));\n}\n\n#[test]\nfn serialize_transaction() {\n  test_read_write(&Transaction::RemoveParticipantDueToDkg {\n    participant: <Ristretto as Ciphersuite>::G::random(&mut OsRng),\n    signed: random_signed_with_nonce(&mut OsRng, 0),\n  });\n\n  {\n    let mut commitments = vec![random_vec(&mut OsRng, 512)];\n    for _ in 0 .. (OsRng.next_u64() % 100) {\n      let mut temp = commitments[0].clone();\n      OsRng.fill_bytes(&mut temp);\n      commitments.push(temp);\n    }\n    test_read_write(&Transaction::DkgCommitments {\n      attempt: random_u32(&mut OsRng),\n      commitments,\n      signed: random_signed_with_nonce(&mut OsRng, 0),\n    });\n  }\n\n  {\n    // This supports a variable share length, and variable amount of sent shares, yet share length\n    // and sent shares is expected to be constant among recipients\n    let share_len = usize::try_from((OsRng.next_u64() % 512) + 1).unwrap();\n    let amount_of_shares = usize::try_from((OsRng.next_u64() % 3) + 1).unwrap();\n    // Create a valid vec of shares\n    let mut shares = vec![];\n    // Create up to 150 participants\n    for _ in 0 ..= (OsRng.next_u64() % 150) {\n      // Give each sender multiple shares\n      let mut sender_shares = vec![];\n      for _ in 0 .. amount_of_shares {\n        let mut share = vec![0; share_len];\n        OsRng.fill_bytes(&mut share);\n        sender_shares.push(share);\n      }\n      shares.push(sender_shares);\n    }\n\n    test_read_write(&Transaction::DkgShares {\n      attempt: random_u32(&mut OsRng),\n      shares,\n      confirmation_nonces: {\n        let mut nonces = [0; 64];\n        OsRng.fill_bytes(&mut nonces);\n        nonces\n      },\n      signed: random_signed_with_nonce(&mut OsRng, 1),\n    });\n  }\n\n  for i in 0 .. 2 {\n    test_read_write(&Transaction::InvalidDkgShare {\n      attempt: random_u32(&mut OsRng),\n      accuser: frost::Participant::new(\n        u16::try_from(OsRng.next_u64() >> 48).unwrap().saturating_add(1),\n      )\n      .unwrap(),\n      faulty: frost::Participant::new(\n        u16::try_from(OsRng.next_u64() >> 48).unwrap().saturating_add(1),\n      )\n      .unwrap(),\n      blame: if i == 0 {\n        None\n      } else {\n        Some(random_vec(&mut OsRng, 500)).filter(|blame| !blame.is_empty())\n      },\n      signed: random_signed_with_nonce(&mut OsRng, 2),\n    });\n  }\n\n  test_read_write(&Transaction::DkgConfirmed {\n    attempt: random_u32(&mut OsRng),\n    confirmation_share: {\n      let mut share = [0; 32];\n      OsRng.fill_bytes(&mut share);\n      share\n    },\n    signed: random_signed_with_nonce(&mut OsRng, 2),\n  });\n\n  {\n    let mut block = [0; 32];\n    OsRng.fill_bytes(&mut block);\n    test_read_write(&Transaction::CosignSubstrateBlock(block));\n  }\n\n  {\n    let mut block = [0; 32];\n    OsRng.fill_bytes(&mut block);\n    let batch = u32::try_from(OsRng.next_u64() >> 32).unwrap();\n    test_read_write(&Transaction::Batch { block, batch });\n  }\n  test_read_write(&Transaction::SubstrateBlock(OsRng.next_u64()));\n\n  {\n    let batch = u32::try_from(OsRng.next_u64() >> 32).unwrap();\n    test_read_write(&Transaction::SubstrateSign(random_sign_data(\n      &mut OsRng,\n      SubstrateSignableId::Batch(batch),\n      Label::Preprocess,\n    )));\n  }\n  {\n    let batch = u32::try_from(OsRng.next_u64() >> 32).unwrap();\n    test_read_write(&Transaction::SubstrateSign(random_sign_data(\n      &mut OsRng,\n      SubstrateSignableId::Batch(batch),\n      Label::Share,\n    )));\n  }\n\n  {\n    let mut plan = [0; 32];\n    OsRng.fill_bytes(&mut plan);\n    test_read_write(&Transaction::Sign(random_sign_data(&mut OsRng, plan, Label::Preprocess)));\n  }\n  {\n    let mut plan = [0; 32];\n    OsRng.fill_bytes(&mut plan);\n    test_read_write(&Transaction::Sign(random_sign_data(&mut OsRng, plan, Label::Share)));\n  }\n\n  {\n    let mut plan = [0; 32];\n    OsRng.fill_bytes(&mut plan);\n    let mut tx_hash = vec![0; (OsRng.next_u64() % 64).try_into().unwrap()];\n    OsRng.fill_bytes(&mut tx_hash);\n    test_read_write(&Transaction::SignCompleted {\n      plan,\n      tx_hash,\n      first_signer: random_signed_with_nonce(&mut OsRng, 2).signer,\n      signature: random_signed_with_nonce(&mut OsRng, 2).signature,\n    });\n  }\n\n  test_read_write(&Transaction::SlashReport(\n    {\n      let amount =\n        usize::try_from(OsRng.next_u64() % u64::from(MAX_KEY_SHARES_PER_SET - 1)).unwrap();\n      let mut points = vec![];\n      for _ in 0 .. amount {\n        points.push((OsRng.next_u64() >> 32).try_into().unwrap());\n      }\n      points\n    },\n    random_signed_with_nonce(&mut OsRng, 0),\n  ));\n}\n"
  },
  {
    "path": "coordinator/src/tests/tributary/sync.rs",
    "content": "use core::time::Duration;\nuse std::{sync::Arc, collections::HashSet};\n\nuse rand_core::OsRng;\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::{group::GroupEncoding, Ciphersuite};\n\nuse tokio::{\n  sync::{mpsc, broadcast},\n  time::sleep,\n};\n\nuse serai_db::MemDb;\n\nuse tributary::Tributary;\n\nuse crate::{\n  tributary::Transaction,\n  ActiveTributary, TributaryEvent,\n  p2p::{heartbeat_tributaries_task, handle_p2p_task},\n  tests::{\n    LocalP2p,\n    tributary::{new_keys, new_spec, new_tributaries},\n  },\n};\n\n#[tokio::test]\nasync fn sync_test() {\n  let mut keys = new_keys(&mut OsRng);\n  let spec = new_spec(&mut OsRng, &keys);\n  // Ensure this can have a node fail\n  assert!(spec.n(&[]) > spec.t());\n\n  let mut tributaries = new_tributaries(&keys, &spec)\n    .await\n    .into_iter()\n    .map(|(_, p2p, tributary)| (p2p, tributary))\n    .collect::<Vec<_>>();\n\n  // Keep a Tributary back, effectively having it offline\n  let syncer_key = keys.pop().unwrap();\n  let (syncer_p2p, syncer_tributary) = tributaries.pop().unwrap();\n\n  // Have the rest form a P2P net\n  let mut tributary_senders = vec![];\n  let mut tributary_arcs = vec![];\n  let mut p2p_threads = vec![];\n  for (p2p, tributary) in tributaries.drain(..) {\n    let tributary = Arc::new(tributary);\n    tributary_arcs.push(tributary.clone());\n    let (new_tributary_send, new_tributary_recv) = broadcast::channel(5);\n    let (cosign_send, _) = mpsc::unbounded_channel();\n    let thread = tokio::spawn(handle_p2p_task(p2p, cosign_send, new_tributary_recv));\n    new_tributary_send\n      .send(TributaryEvent::NewTributary(ActiveTributary { spec: spec.clone(), tributary }))\n      .map_err(|_| \"failed to send ActiveTributary\")\n      .unwrap();\n    tributary_senders.push(new_tributary_send);\n    p2p_threads.push(thread);\n  }\n  let tributaries = tributary_arcs;\n\n  // After four blocks of time, we should have a new block\n  // We don't wait one block of time as we may have missed the chance for the first block\n  // We don't wait two blocks because we may have missed the chance, and then had a failure to\n  // propose by our 'offline' validator, which would cause the Tendermint round time to increase,\n  // requiring a longer delay\n  let block_time = u64::from(Tributary::<MemDb, Transaction, LocalP2p>::block_time());\n  sleep(Duration::from_secs(4 * block_time)).await;\n  let tip = tributaries[0].tip().await;\n  assert!(tip != spec.genesis());\n\n  // Sleep one second to make sure this block propagates\n  sleep(Duration::from_secs(1)).await;\n  // Make sure every tributary has it\n  for tributary in &tributaries {\n    assert!(tributary.reader().block(&tip).is_some());\n  }\n\n  // Now that we've confirmed the other tributaries formed a net without issue, drop the syncer's\n  // pending P2P messages\n  syncer_p2p.1.write().await.1.last_mut().unwrap().clear();\n\n  // Have it join the net\n  let syncer_key = Ristretto::generator() * *syncer_key;\n  let syncer_tributary = Arc::new(syncer_tributary);\n  let (syncer_tributary_send, syncer_tributary_recv) = broadcast::channel(5);\n  let (cosign_send, _) = mpsc::unbounded_channel();\n  tokio::spawn(handle_p2p_task(syncer_p2p.clone(), cosign_send, syncer_tributary_recv));\n  syncer_tributary_send\n    .send(TributaryEvent::NewTributary(ActiveTributary {\n      spec: spec.clone(),\n      tributary: syncer_tributary.clone(),\n    }))\n    .map_err(|_| \"failed to send ActiveTributary to syncer\")\n    .unwrap();\n\n  // It shouldn't automatically catch up. If it somehow was, our test would be broken\n  // Sanity check this\n  let tip = tributaries[0].tip().await;\n  // Wait until a new block occurs\n  sleep(Duration::from_secs(3 * block_time)).await;\n  // Make sure a new block actually occurred\n  assert!(tributaries[0].tip().await != tip);\n  // Make sure the new block alone didn't trigger catching up\n  assert_eq!(syncer_tributary.tip().await, spec.genesis());\n\n  // Start the heartbeat protocol\n  let (syncer_heartbeat_tributary_send, syncer_heartbeat_tributary_recv) = broadcast::channel(5);\n  tokio::spawn(heartbeat_tributaries_task(syncer_p2p, syncer_heartbeat_tributary_recv));\n  syncer_heartbeat_tributary_send\n    .send(TributaryEvent::NewTributary(ActiveTributary {\n      spec: spec.clone(),\n      tributary: syncer_tributary.clone(),\n    }))\n    .map_err(|_| \"failed to send ActiveTributary to heartbeat\")\n    .unwrap();\n\n  // The heartbeat is once every 10 blocks, with some limitations\n  sleep(Duration::from_secs(20 * block_time)).await;\n  assert!(syncer_tributary.tip().await != spec.genesis());\n\n  // Verify it synced to the tip\n  let syncer_tip = {\n    let tributary = &tributaries[0];\n\n    let tip = tributary.tip().await;\n    let syncer_tip = syncer_tributary.tip().await;\n    // Allow a one block tolerance in case of race conditions\n    assert!(\n      HashSet::from([tip, tributary.reader().block(&tip).unwrap().parent()]).contains(&syncer_tip)\n    );\n    syncer_tip\n  };\n\n  sleep(Duration::from_secs(block_time)).await;\n\n  // Verify it's now keeping up\n  assert!(syncer_tributary.tip().await != syncer_tip);\n\n  // Verify it's now participating in consensus\n  // Because only `t` validators are used in a commit, take n - t nodes offline\n  // leaving only `t` nodes. Which should force it to participate in the consensus\n  // of next blocks.\n  let spares = usize::from(spec.n(&[]) - spec.t());\n  for thread in p2p_threads.iter().take(spares) {\n    thread.abort();\n  }\n\n  // wait for a block\n  sleep(Duration::from_secs(block_time)).await;\n\n  if syncer_tributary\n    .reader()\n    .parsed_commit(&syncer_tributary.tip().await)\n    .unwrap()\n    .validators\n    .iter()\n    .any(|signer| signer == &syncer_key.to_bytes())\n  {\n    return;\n  }\n\n  panic!(\"synced tributary didn't start participating in consensus\");\n}\n"
  },
  {
    "path": "coordinator/src/tests/tributary/tx.rs",
    "content": "use core::time::Duration;\n\nuse rand_core::{RngCore, OsRng};\n\nuse tokio::time::sleep;\n\nuse serai_db::MemDb;\n\nuse tributary::{\n  transaction::Transaction as TransactionTrait, Transaction as TributaryTransaction, Tributary,\n};\n\nuse crate::{\n  tributary::Transaction,\n  tests::{\n    LocalP2p,\n    tributary::{new_keys, new_spec, new_tributaries, run_tributaries, wait_for_tx_inclusion},\n  },\n};\n\n#[tokio::test]\nasync fn tx_test() {\n  let keys = new_keys(&mut OsRng);\n  let spec = new_spec(&mut OsRng, &keys);\n\n  let tributaries = new_tributaries(&keys, &spec)\n    .await\n    .into_iter()\n    .map(|(_, p2p, tributary)| (p2p, tributary))\n    .collect::<Vec<_>>();\n\n  // Run the tributaries in the background\n  tokio::spawn(run_tributaries(tributaries.clone()));\n\n  // Send a TX from a random Tributary\n  let sender =\n    usize::try_from(OsRng.next_u64() % u64::try_from(tributaries.len()).unwrap()).unwrap();\n  let key = keys[sender].clone();\n\n  let attempt = 0;\n  let mut commitments = vec![0; 256];\n  OsRng.fill_bytes(&mut commitments);\n\n  // Create the TX with a null signature so we can get its sig hash\n  let block_before_tx = tributaries[sender].1.tip().await;\n  let mut tx = Transaction::DkgCommitments {\n    attempt,\n    commitments: vec![commitments.clone()],\n    signed: Transaction::empty_signed(),\n  };\n  tx.sign(&mut OsRng, spec.genesis(), &key);\n\n  assert_eq!(tributaries[sender].1.add_transaction(tx.clone()).await, Ok(true));\n  let included_in = wait_for_tx_inclusion(&tributaries[sender].1, block_before_tx, tx.hash()).await;\n  // Also sleep for the block time to ensure the block is synced around before we run checks on it\n  sleep(Duration::from_secs(Tributary::<MemDb, Transaction, LocalP2p>::block_time().into())).await;\n\n  // All tributaries should have acknowledged this transaction in a block\n  for (_, tributary) in tributaries {\n    let block = tributary.reader().block(&included_in).unwrap();\n    assert_eq!(block.transactions, vec![TributaryTransaction::Application(tx.clone())]);\n  }\n}\n"
  },
  {
    "path": "coordinator/src/tributary/db.rs",
    "content": "use std::collections::HashMap;\n\nuse scale::Encode;\nuse borsh::{BorshSerialize, BorshDeserialize};\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::{group::GroupEncoding, Ciphersuite};\nuse frost::Participant;\n\nuse serai_client::validator_sets::primitives::{KeyPair, ExternalValidatorSet};\n\nuse processor_messages::coordinator::SubstrateSignableId;\n\npub use serai_db::*;\n\nuse tributary::ReadWrite;\n\nuse crate::tributary::{Label, Transaction};\n\n#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, BorshSerialize, BorshDeserialize)]\npub enum Topic {\n  Dkg,\n  DkgConfirmation,\n  SubstrateSign(SubstrateSignableId),\n  Sign([u8; 32]),\n}\n\n// A struct to refer to a piece of data all validators will presumably provide a value for.\n#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode)]\npub struct DataSpecification {\n  pub topic: Topic,\n  pub label: Label,\n  pub attempt: u32,\n}\n\npub enum DataSet {\n  Participating(HashMap<Participant, Vec<u8>>),\n  NotParticipating,\n}\n\npub enum Accumulation {\n  Ready(DataSet),\n  NotReady,\n}\n\n// TODO: Move from genesis to set for indexing\ncreate_db!(\n  Tributary {\n    SeraiBlockNumber: (hash: [u8; 32]) -> u64,\n    SeraiDkgCompleted: (spec: ExternalValidatorSet) -> [u8; 32],\n\n    TributaryBlockNumber: (block: [u8; 32]) -> u32,\n    LastHandledBlock: (genesis: [u8; 32]) -> [u8; 32],\n\n    // TODO: Revisit the point of this\n    FatalSlashes: (genesis: [u8; 32]) -> Vec<[u8; 32]>,\n    RemovedAsOfDkgAttempt: (genesis: [u8; 32], attempt: u32) -> Vec<[u8; 32]>,\n    OfflineDuringDkg: (genesis: [u8; 32]) -> Vec<[u8; 32]>,\n    // TODO: Combine these two\n    FatallySlashed: (genesis: [u8; 32], account: [u8; 32]) -> (),\n    SlashPoints: (genesis: [u8; 32], account: [u8; 32]) -> u32,\n\n    VotedToRemove: (genesis: [u8; 32], voter: [u8; 32], to_remove: [u8; 32]) -> (),\n    VotesToRemove: (genesis: [u8; 32], to_remove: [u8; 32]) -> u16,\n\n    AttemptDb: (genesis: [u8; 32], topic: &Topic) -> u32,\n    ReattemptDb: (genesis: [u8; 32], block: u32) -> Vec<Topic>,\n    DataReceived: (genesis: [u8; 32], data_spec: &DataSpecification) -> u16,\n    DataDb: (genesis: [u8; 32], data_spec: &DataSpecification, signer_bytes: &[u8; 32]) -> Vec<u8>,\n\n    DkgShare: (genesis: [u8; 32], from: u16, to: u16) -> Vec<u8>,\n    ConfirmationNonces: (genesis: [u8; 32], attempt: u32) -> HashMap<Participant, Vec<u8>>,\n    DkgKeyPair: (genesis: [u8; 32], attempt: u32) -> KeyPair,\n    KeyToDkgAttempt: (key: [u8; 32]) -> u32,\n    DkgLocallyCompleted: (genesis: [u8; 32]) -> (),\n\n    PlanIds: (genesis: &[u8], block: u64) -> Vec<[u8; 32]>,\n\n    SignedTransactionDb: (order: &[u8], nonce: u32) -> Vec<u8>,\n\n    SlashReports: (genesis: [u8; 32], signer: [u8; 32]) -> Vec<u32>,\n    SlashReported: (genesis: [u8; 32]) -> u16,\n    SlashReportCutOff: (genesis: [u8; 32]) -> u64,\n    SlashReport: (set: ExternalValidatorSet) -> Vec<([u8; 32], u32)>,\n  }\n);\n\nimpl FatalSlashes {\n  pub fn get_as_keys(getter: &impl Get, genesis: [u8; 32]) -> Vec<<Ristretto as Ciphersuite>::G> {\n    FatalSlashes::get(getter, genesis)\n      .unwrap_or(vec![])\n      .iter()\n      .map(|key| <Ristretto as Ciphersuite>::G::from_bytes(key).unwrap())\n      .collect::<Vec<_>>()\n  }\n}\n\nimpl FatallySlashed {\n  pub fn set_fatally_slashed(txn: &mut impl DbTxn, genesis: [u8; 32], account: [u8; 32]) {\n    Self::set(txn, genesis, account, &());\n    let mut existing = FatalSlashes::get(txn, genesis).unwrap_or_default();\n\n    // Don't append if we already have it, which can occur upon multiple faults\n    if existing.iter().any(|existing| existing == &account) {\n      return;\n    }\n\n    existing.push(account);\n    FatalSlashes::set(txn, genesis, &existing);\n  }\n}\n\nimpl AttemptDb {\n  pub fn recognize_topic(txn: &mut impl DbTxn, genesis: [u8; 32], topic: Topic) {\n    Self::set(txn, genesis, &topic, &0u32);\n  }\n\n  pub fn start_next_attempt(txn: &mut impl DbTxn, genesis: [u8; 32], topic: Topic) -> u32 {\n    let next =\n      Self::attempt(txn, genesis, topic).expect(\"starting next attempt for unknown topic\") + 1;\n    Self::set(txn, genesis, &topic, &next);\n    next\n  }\n\n  pub fn attempt(getter: &impl Get, genesis: [u8; 32], topic: Topic) -> Option<u32> {\n    let attempt = Self::get(getter, genesis, &topic);\n    // Don't require explicit recognition of the Dkg topic as it starts when the chain does\n    // Don't require explicit recognition of the SlashReport topic as it isn't a DoS risk and it\n    // should always happen (eventually)\n    if attempt.is_none() &&\n      ((topic == Topic::Dkg) ||\n        (topic == Topic::DkgConfirmation) ||\n        (topic == Topic::SubstrateSign(SubstrateSignableId::SlashReport)))\n    {\n      return Some(0);\n    }\n    attempt\n  }\n}\n\nimpl ReattemptDb {\n  pub fn schedule_reattempt(\n    txn: &mut impl DbTxn,\n    genesis: [u8; 32],\n    current_block_number: u32,\n    topic: Topic,\n  ) {\n    // 5 minutes\n    #[cfg(not(feature = \"longer-reattempts\"))]\n    const BASE_REATTEMPT_DELAY: u32 = (5 * 60 * 1000) / tributary::tendermint::TARGET_BLOCK_TIME;\n\n    // 10 minutes, intended for latent environments like the GitHub CI\n    #[cfg(feature = \"longer-reattempts\")]\n    const BASE_REATTEMPT_DELAY: u32 = (10 * 60 * 1000) / tributary::tendermint::TARGET_BLOCK_TIME;\n\n    // 5 minutes for attempts 0 ..= 2, 10 minutes for attempts 3 ..= 5, 15 minutes for attempts > 5\n    // Assumes no event will take longer than 15 minutes, yet grows the time in case there are\n    // network bandwidth issues\n    let mut reattempt_delay = BASE_REATTEMPT_DELAY *\n      ((AttemptDb::attempt(txn, genesis, topic)\n        .expect(\"scheduling re-attempt for unknown topic\") /\n        3) +\n        1)\n      .min(3);\n    // Allow more time for DKGs since they have an extra round and much more data\n    if matches!(topic, Topic::Dkg) {\n      reattempt_delay *= 4;\n    }\n    let upon_block = current_block_number + reattempt_delay;\n\n    let mut reattempts = Self::get(txn, genesis, upon_block).unwrap_or(vec![]);\n    reattempts.push(topic);\n    Self::set(txn, genesis, upon_block, &reattempts);\n  }\n\n  pub fn take(txn: &mut impl DbTxn, genesis: [u8; 32], block_number: u32) -> Vec<Topic> {\n    let res = Self::get(txn, genesis, block_number).unwrap_or(vec![]);\n    if !res.is_empty() {\n      Self::del(txn, genesis, block_number);\n    }\n    res\n  }\n}\n\nimpl SignedTransactionDb {\n  pub fn take_signed_transaction(\n    txn: &mut impl DbTxn,\n    order: &[u8],\n    nonce: u32,\n  ) -> Option<Transaction> {\n    let res = SignedTransactionDb::get(txn, order, nonce)\n      .map(|bytes| Transaction::read(&mut bytes.as_slice()).unwrap());\n    if res.is_some() {\n      Self::del(txn, order, nonce);\n    }\n    res\n  }\n}\n"
  },
  {
    "path": "coordinator/src/tributary/handle.rs",
    "content": "use core::ops::Deref;\nuse std::collections::HashMap;\n\nuse zeroize::Zeroizing;\nuse rand_core::OsRng;\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::{group::GroupEncoding, Ciphersuite};\nuse frost::dkg::Participant;\n\nuse scale::{Encode, Decode};\nuse serai_client::validator_sets::primitives::KeyPair;\n\nuse tributary::{Signed, TransactionKind, TransactionTrait};\n\nuse processor_messages::{\n  key_gen::{self, KeyGenId},\n  coordinator::{self, SubstrateSignableId, SubstrateSignId},\n  sign::{self, SignId},\n};\n\nuse serai_db::*;\n\nuse crate::{\n  processors::Processors,\n  tributary::{\n    *,\n    signing_protocol::DkgConfirmer,\n    scanner::{\n      RecognizedIdType, RIDTrait, PublishSeraiTransaction, PTTTrait, TributaryBlockHandler,\n    },\n  },\n  P2p,\n};\n\npub fn dkg_confirmation_nonces(\n  key: &Zeroizing<<Ristretto as Ciphersuite>::F>,\n  spec: &TributarySpec,\n  txn: &mut impl DbTxn,\n  attempt: u32,\n) -> [u8; 64] {\n  DkgConfirmer::new(key, spec, txn, attempt)\n    .expect(\"getting DKG confirmation nonces for unknown attempt\")\n    .preprocess()\n}\n\npub fn generated_key_pair<D: Db>(\n  txn: &mut D::Transaction<'_>,\n  key: &Zeroizing<<Ristretto as Ciphersuite>::F>,\n  spec: &TributarySpec,\n  key_pair: &KeyPair,\n  attempt: u32,\n) -> Result<[u8; 32], Participant> {\n  DkgKeyPair::set(txn, spec.genesis(), attempt, key_pair);\n  KeyToDkgAttempt::set(txn, key_pair.0 .0, &attempt);\n  let preprocesses = ConfirmationNonces::get(txn, spec.genesis(), attempt).unwrap();\n  DkgConfirmer::new(key, spec, txn, attempt)\n    .expect(\"claiming to have generated a key pair for an unrecognized attempt\")\n    .share(preprocesses, key_pair)\n}\n\nfn unflatten(\n  spec: &TributarySpec,\n  removed: &[<Ristretto as Ciphersuite>::G],\n  data: &mut HashMap<Participant, Vec<u8>>,\n) {\n  for (validator, _) in spec.validators() {\n    let Some(range) = spec.i(removed, validator) else { continue };\n    let Some(all_segments) = data.remove(&range.start) else {\n      continue;\n    };\n    let mut data_vec = Vec::<_>::decode(&mut all_segments.as_slice()).unwrap();\n    for i in u16::from(range.start) .. u16::from(range.end) {\n      let i = Participant::new(i).unwrap();\n      data.insert(i, data_vec.remove(0));\n    }\n  }\n}\n\nimpl<\n    D: Db,\n    T: DbTxn,\n    Pro: Processors,\n    PST: PublishSeraiTransaction,\n    PTT: PTTTrait,\n    RID: RIDTrait,\n    P: P2p,\n  > TributaryBlockHandler<'_, D, T, Pro, PST, PTT, RID, P>\n{\n  fn accumulate(\n    &mut self,\n    removed: &[<Ristretto as Ciphersuite>::G],\n    data_spec: &DataSpecification,\n    signer: <Ristretto as Ciphersuite>::G,\n    data: &Vec<u8>,\n  ) -> Accumulation {\n    log::debug!(\"accumulating entry for {:?} attempt #{}\", &data_spec.topic, &data_spec.attempt);\n    let genesis = self.spec.genesis();\n    if DataDb::get(self.txn, genesis, data_spec, &signer.to_bytes()).is_some() {\n      panic!(\"accumulating data for a participant multiple times\");\n    }\n    let signer_shares = {\n      let Some(signer_i) = self.spec.i(removed, signer) else {\n        log::warn!(\"accumulating data from {} who was removed\", hex::encode(signer.to_bytes()));\n        return Accumulation::NotReady;\n      };\n      u16::from(signer_i.end) - u16::from(signer_i.start)\n    };\n\n    let prior_received = DataReceived::get(self.txn, genesis, data_spec).unwrap_or_default();\n    let now_received = prior_received + signer_shares;\n    DataReceived::set(self.txn, genesis, data_spec, &now_received);\n    DataDb::set(self.txn, genesis, data_spec, &signer.to_bytes(), data);\n\n    let received_range = (prior_received + 1) ..= now_received;\n\n    // If 2/3rds of the network participated in this preprocess, queue it for an automatic\n    // re-attempt\n    // DkgConfirmation doesn't have a re-attempt as it's just an extension for Dkg\n    if (data_spec.label == Label::Preprocess) &&\n      received_range.contains(&self.spec.t()) &&\n      (data_spec.topic != Topic::DkgConfirmation)\n    {\n      // Double check the attempt on this entry, as we don't want to schedule a re-attempt if this\n      // is an old entry\n      // This is an assert, not part of the if check, as old data shouldn't be here in the first\n      // place\n      assert_eq!(AttemptDb::attempt(self.txn, genesis, data_spec.topic), Some(data_spec.attempt));\n      ReattemptDb::schedule_reattempt(self.txn, genesis, self.block_number, data_spec.topic);\n    }\n\n    // If we have all the needed commitments/preprocesses/shares, tell the processor\n    let needs_everyone =\n      (data_spec.topic == Topic::Dkg) || (data_spec.topic == Topic::DkgConfirmation);\n    let needed = if needs_everyone { self.spec.n(removed) } else { self.spec.t() };\n    if received_range.contains(&needed) {\n      log::debug!(\n        \"accumulation for entry {:?} attempt #{} is ready\",\n        &data_spec.topic,\n        &data_spec.attempt\n      );\n\n      let mut data = HashMap::new();\n      for validator in self.spec.validators().iter().map(|validator| validator.0) {\n        let Some(i) = self.spec.i(removed, validator) else { continue };\n        data.insert(\n          i.start,\n          if let Some(data) = DataDb::get(self.txn, genesis, data_spec, &validator.to_bytes()) {\n            data\n          } else {\n            continue;\n          },\n        );\n      }\n\n      assert_eq!(data.len(), usize::from(needed));\n\n      // Remove our own piece of data, if we were involved\n      if let Some(i) = self.spec.i(removed, Ristretto::generator() * self.our_key.deref()) {\n        if data.remove(&i.start).is_some() {\n          return Accumulation::Ready(DataSet::Participating(data));\n        }\n      }\n      return Accumulation::Ready(DataSet::NotParticipating);\n    }\n    Accumulation::NotReady\n  }\n\n  fn handle_data(\n    &mut self,\n    removed: &[<Ristretto as Ciphersuite>::G],\n    data_spec: &DataSpecification,\n    bytes: &Vec<u8>,\n    signed: &Signed,\n  ) -> Accumulation {\n    let genesis = self.spec.genesis();\n\n    let Some(curr_attempt) = AttemptDb::attempt(self.txn, genesis, data_spec.topic) else {\n      // Premature publication of a valid ID/publication of an invalid ID\n      self.fatal_slash(signed.signer.to_bytes(), \"published data for ID without an attempt\");\n      return Accumulation::NotReady;\n    };\n\n    // If they've already published a TX for this attempt, slash\n    // This shouldn't be reachable since nonces were made inserted by the coordinator, yet it's a\n    // cheap check to leave in for safety\n    if DataDb::get(self.txn, genesis, data_spec, &signed.signer.to_bytes()).is_some() {\n      self.fatal_slash(signed.signer.to_bytes(), \"published data multiple times\");\n      return Accumulation::NotReady;\n    }\n\n    // If the attempt is lesser than the blockchain's, return\n    if data_spec.attempt < curr_attempt {\n      log::debug!(\n        \"dated attempt published onto tributary for topic {:?} (used attempt {}, current {})\",\n        data_spec.topic,\n        data_spec.attempt,\n        curr_attempt\n      );\n      return Accumulation::NotReady;\n    }\n    // If the attempt is greater, this is a premature publication, full slash\n    if data_spec.attempt > curr_attempt {\n      self.fatal_slash(\n        signed.signer.to_bytes(),\n        \"published data with an attempt which hasn't started\",\n      );\n      return Accumulation::NotReady;\n    }\n\n    // TODO: We can also full slash if shares before all commitments, or share before the\n    // necessary preprocesses\n\n    // TODO: If this is shares, we need to check they are part of the selected signing set\n\n    // Accumulate this data\n    self.accumulate(removed, data_spec, signed.signer, bytes)\n  }\n\n  fn check_sign_data_len(\n    &mut self,\n    removed: &[<Ristretto as Ciphersuite>::G],\n    signer: <Ristretto as Ciphersuite>::G,\n    len: usize,\n  ) -> Result<(), ()> {\n    let Some(signer_i) = self.spec.i(removed, signer) else {\n      // TODO: Ensure processor doesn't so participate/check how it handles removals for being\n      // offline\n      self.fatal_slash(signer.to_bytes(), \"signer participated despite being removed\");\n      Err(())?\n    };\n    if len != usize::from(u16::from(signer_i.end) - u16::from(signer_i.start)) {\n      self.fatal_slash(\n        signer.to_bytes(),\n        \"signer published a distinct amount of sign data than they had shares\",\n      );\n      Err(())?;\n    }\n    Ok(())\n  }\n\n  // TODO: Don't call fatal_slash in here, return the party to fatal_slash to ensure no further\n  // execution occurs\n  pub(crate) async fn handle_application_tx(&mut self, tx: Transaction) {\n    let genesis = self.spec.genesis();\n\n    // Don't handle transactions from fatally slashed participants\n    // This prevents removed participants from sabotaging the removal signing sessions and so on\n    // TODO: Because fatally slashed participants can still publish onto the blockchain, they have\n    // a notable DoS ability\n    if let TransactionKind::Signed(_, signed) = tx.kind() {\n      if FatallySlashed::get(self.txn, genesis, signed.signer.to_bytes()).is_some() {\n        return;\n      }\n    }\n\n    match tx {\n      Transaction::RemoveParticipantDueToDkg { participant, signed } => {\n        if self.spec.i(&[], participant).is_none() {\n          self.fatal_slash(\n            participant.to_bytes(),\n            \"RemoveParticipantDueToDkg vote for non-validator\",\n          );\n          return;\n        }\n\n        let participant = participant.to_bytes();\n        let signer = signed.signer.to_bytes();\n\n        assert!(\n          VotedToRemove::get(self.txn, genesis, signer, participant).is_none(),\n          \"VotedToRemove multiple times despite a single nonce being allocated\",\n        );\n        VotedToRemove::set(self.txn, genesis, signer, participant, &());\n\n        let prior_votes = VotesToRemove::get(self.txn, genesis, participant).unwrap_or(0);\n        let signer_votes =\n          self.spec.i(&[], signed.signer).expect(\"signer wasn't a validator for this network?\");\n        let new_votes = prior_votes + u16::from(signer_votes.end) - u16::from(signer_votes.start);\n        VotesToRemove::set(self.txn, genesis, participant, &new_votes);\n        if ((prior_votes + 1) ..= new_votes).contains(&self.spec.t()) {\n          self.fatal_slash(participant, \"RemoveParticipantDueToDkg vote\")\n        }\n      }\n\n      Transaction::DkgCommitments { attempt, commitments, signed } => {\n        let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else {\n          self.fatal_slash(signed.signer.to_bytes(), \"DkgCommitments with an unrecognized attempt\");\n          return;\n        };\n        let Ok(()) = self.check_sign_data_len(&removed, signed.signer, commitments.len()) else {\n          return;\n        };\n        let data_spec = DataSpecification { topic: Topic::Dkg, label: Label::Preprocess, attempt };\n        match self.handle_data(&removed, &data_spec, &commitments.encode(), &signed) {\n          Accumulation::Ready(DataSet::Participating(mut commitments)) => {\n            log::info!(\"got all DkgCommitments for {}\", hex::encode(genesis));\n            unflatten(self.spec, &removed, &mut commitments);\n            self\n              .processors\n              .send(\n                self.spec.set().network,\n                key_gen::CoordinatorMessage::Commitments {\n                  id: KeyGenId { session: self.spec.set().session, attempt },\n                  commitments,\n                },\n              )\n              .await;\n          }\n          Accumulation::Ready(DataSet::NotParticipating) => {\n            assert!(\n              removed.contains(&(Ristretto::generator() * self.our_key.deref())),\n              \"NotParticipating in a DkgCommitments we weren't removed for\"\n            );\n          }\n          Accumulation::NotReady => {}\n        }\n      }\n\n      Transaction::DkgShares { attempt, mut shares, confirmation_nonces, signed } => {\n        let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else {\n          self.fatal_slash(signed.signer.to_bytes(), \"DkgShares with an unrecognized attempt\");\n          return;\n        };\n        let not_participating = removed.contains(&(Ristretto::generator() * self.our_key.deref()));\n\n        let Ok(()) = self.check_sign_data_len(&removed, signed.signer, shares.len()) else {\n          return;\n        };\n\n        let Some(sender_i) = self.spec.i(&removed, signed.signer) else {\n          self.fatal_slash(\n            signed.signer.to_bytes(),\n            \"DkgShares for a DKG they aren't participating in\",\n          );\n          return;\n        };\n        let sender_is_len = u16::from(sender_i.end) - u16::from(sender_i.start);\n        for shares in &shares {\n          if shares.len() != (usize::from(self.spec.n(&removed) - sender_is_len)) {\n            self.fatal_slash(signed.signer.to_bytes(), \"invalid amount of DKG shares\");\n            return;\n          }\n        }\n\n        // Save each share as needed for blame\n        for (from_offset, shares) in shares.iter().enumerate() {\n          let from =\n            Participant::new(u16::from(sender_i.start) + u16::try_from(from_offset).unwrap())\n              .unwrap();\n\n          for (to_offset, share) in shares.iter().enumerate() {\n            // 0-indexed (the enumeration) to 1-indexed (Participant)\n            let mut to = u16::try_from(to_offset).unwrap() + 1;\n            // Adjust for the omission of the sender's own shares\n            if to >= u16::from(sender_i.start) {\n              to += u16::from(sender_i.end) - u16::from(sender_i.start);\n            }\n            let to = Participant::new(to).unwrap();\n\n            DkgShare::set(self.txn, genesis, from.into(), to.into(), share);\n          }\n        }\n\n        // Filter down to only our share's bytes for handle\n        let our_shares = if let Some(our_i) =\n          self.spec.i(&removed, Ristretto::generator() * self.our_key.deref())\n        {\n          if sender_i == our_i {\n            vec![]\n          } else {\n            // 1-indexed to 0-indexed\n            let mut our_i_pos = u16::from(our_i.start) - 1;\n            // Handle the omission of the sender's own data\n            if u16::from(our_i.start) > u16::from(sender_i.start) {\n              our_i_pos -= sender_is_len;\n            }\n            let our_i_pos = usize::from(our_i_pos);\n            shares\n              .iter_mut()\n              .map(|shares| {\n                shares\n                  .drain(\n                    our_i_pos ..\n                      (our_i_pos + usize::from(u16::from(our_i.end) - u16::from(our_i.start))),\n                  )\n                  .collect::<Vec<_>>()\n              })\n              .collect()\n          }\n        } else {\n          assert!(\n            not_participating,\n            \"we didn't have an i while handling DkgShares we weren't removed for\"\n          );\n          // Since we're not participating, simply save vec![] for our shares\n          vec![]\n        };\n        // Drop shares as it's presumably been mutated into invalidity\n        drop(shares);\n\n        let data_spec = DataSpecification { topic: Topic::Dkg, label: Label::Share, attempt };\n        let encoded_data = (confirmation_nonces.to_vec(), our_shares.encode()).encode();\n        match self.handle_data(&removed, &data_spec, &encoded_data, &signed) {\n          Accumulation::Ready(DataSet::Participating(confirmation_nonces_and_shares)) => {\n            log::info!(\"got all DkgShares for {}\", hex::encode(genesis));\n\n            let mut confirmation_nonces = HashMap::new();\n            let mut shares = HashMap::new();\n            for (participant, confirmation_nonces_and_shares) in confirmation_nonces_and_shares {\n              let (these_confirmation_nonces, these_shares) =\n                <(Vec<u8>, Vec<u8>)>::decode(&mut confirmation_nonces_and_shares.as_slice())\n                  .unwrap();\n              confirmation_nonces.insert(participant, these_confirmation_nonces);\n              shares.insert(participant, these_shares);\n            }\n            ConfirmationNonces::set(self.txn, genesis, attempt, &confirmation_nonces);\n\n            // shares is a HashMap<Participant, Vec<Vec<Vec<u8>>>>, with the values representing:\n            // - Each of the sender's shares\n            // - Each of the our shares\n            // - Each share\n            // We need a Vec<HashMap<Participant, Vec<u8>>>, with the outer being each of ours\n            let mut expanded_shares = vec![];\n            for (sender_start_i, shares) in shares {\n              let shares: Vec<Vec<Vec<u8>>> = Vec::<_>::decode(&mut shares.as_slice()).unwrap();\n              for (sender_i_offset, our_shares) in shares.into_iter().enumerate() {\n                for (our_share_i, our_share) in our_shares.into_iter().enumerate() {\n                  if expanded_shares.len() <= our_share_i {\n                    expanded_shares.push(HashMap::new());\n                  }\n                  expanded_shares[our_share_i].insert(\n                    Participant::new(\n                      u16::from(sender_start_i) + u16::try_from(sender_i_offset).unwrap(),\n                    )\n                    .unwrap(),\n                    our_share,\n                  );\n                }\n              }\n            }\n\n            self\n              .processors\n              .send(\n                self.spec.set().network,\n                key_gen::CoordinatorMessage::Shares {\n                  id: KeyGenId { session: self.spec.set().session, attempt },\n                  shares: expanded_shares,\n                },\n              )\n              .await;\n          }\n          Accumulation::Ready(DataSet::NotParticipating) => {\n            assert!(not_participating, \"NotParticipating in a DkgShares we weren't removed for\");\n          }\n          Accumulation::NotReady => {}\n        }\n      }\n\n      Transaction::InvalidDkgShare { attempt, accuser, faulty, blame, signed } => {\n        let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else {\n          self\n            .fatal_slash(signed.signer.to_bytes(), \"InvalidDkgShare with an unrecognized attempt\");\n          return;\n        };\n        let Some(range) = self.spec.i(&removed, signed.signer) else {\n          self.fatal_slash(\n            signed.signer.to_bytes(),\n            \"InvalidDkgShare for a DKG they aren't participating in\",\n          );\n          return;\n        };\n        if !range.contains(&accuser) {\n          self.fatal_slash(\n            signed.signer.to_bytes(),\n            \"accused with a Participant index which wasn't theirs\",\n          );\n          return;\n        }\n        if range.contains(&faulty) {\n          self.fatal_slash(signed.signer.to_bytes(), \"accused self of having an InvalidDkgShare\");\n          return;\n        }\n\n        let Some(share) = DkgShare::get(self.txn, genesis, accuser.into(), faulty.into()) else {\n          self.fatal_slash(\n            signed.signer.to_bytes(),\n            \"InvalidDkgShare had a non-existent faulty participant\",\n          );\n          return;\n        };\n        self\n          .processors\n          .send(\n            self.spec.set().network,\n            key_gen::CoordinatorMessage::VerifyBlame {\n              id: KeyGenId { session: self.spec.set().session, attempt },\n              accuser,\n              accused: faulty,\n              share,\n              blame,\n            },\n          )\n          .await;\n      }\n\n      Transaction::DkgConfirmed { attempt, confirmation_share, signed } => {\n        let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else {\n          self.fatal_slash(signed.signer.to_bytes(), \"DkgConfirmed with an unrecognized attempt\");\n          return;\n        };\n\n        let data_spec =\n          DataSpecification { topic: Topic::DkgConfirmation, label: Label::Share, attempt };\n        match self.handle_data(&removed, &data_spec, &confirmation_share.to_vec(), &signed) {\n          Accumulation::Ready(DataSet::Participating(shares)) => {\n            log::info!(\"got all DkgConfirmed for {}\", hex::encode(genesis));\n\n            let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else {\n              panic!(\n                \"DkgConfirmed for everyone yet didn't have the removed parties for this attempt\",\n              );\n            };\n\n            let preprocesses = ConfirmationNonces::get(self.txn, genesis, attempt).unwrap();\n            // TODO: This can technically happen under very very very specific timing as the txn\n            // put happens before DkgConfirmed, yet the txn commit isn't guaranteed to\n            let key_pair = DkgKeyPair::get(self.txn, genesis, attempt).expect(\n              \"in DkgConfirmed handling, which happens after everyone \\\n              (including us) fires DkgConfirmed, yet no confirming key pair\",\n            );\n            let mut confirmer = DkgConfirmer::new(self.our_key, self.spec, self.txn, attempt)\n              .expect(\"confirming DKG for unrecognized attempt\");\n            let sig = match confirmer.complete(preprocesses, &key_pair, shares) {\n              Ok(sig) => sig,\n              Err(p) => {\n                let mut tx = Transaction::RemoveParticipantDueToDkg {\n                  participant: self.spec.reverse_lookup_i(&removed, p).unwrap(),\n                  signed: Transaction::empty_signed(),\n                };\n                tx.sign(&mut OsRng, genesis, self.our_key);\n                self.publish_tributary_tx.publish_tributary_tx(tx).await;\n                return;\n              }\n            };\n\n            DkgLocallyCompleted::set(self.txn, genesis, &());\n\n            self\n              .publish_serai_tx\n              .publish_set_keys(\n                self.db,\n                self.spec.set(),\n                removed.into_iter().map(|key| key.to_bytes().into()).collect(),\n                key_pair,\n                sig.into(),\n              )\n              .await;\n          }\n          Accumulation::Ready(DataSet::NotParticipating) => {\n            panic!(\"wasn't a participant in DKG confirmination shares\")\n          }\n          Accumulation::NotReady => {}\n        }\n      }\n\n      Transaction::CosignSubstrateBlock(hash) => {\n        AttemptDb::recognize_topic(\n          self.txn,\n          genesis,\n          Topic::SubstrateSign(SubstrateSignableId::CosigningSubstrateBlock(hash)),\n        );\n\n        let block_number = SeraiBlockNumber::get(self.txn, hash)\n          .expect(\"CosignSubstrateBlock yet didn't save Serai block number\");\n        let msg = coordinator::CoordinatorMessage::CosignSubstrateBlock {\n          id: SubstrateSignId {\n            session: self.spec.set().session,\n            id: SubstrateSignableId::CosigningSubstrateBlock(hash),\n            attempt: 0,\n          },\n          block_number,\n        };\n        self.processors.send(self.spec.set().network, msg).await;\n      }\n\n      Transaction::Batch { block: _, batch } => {\n        // Because this Batch has achieved synchrony, its batch ID should be authorized\n        AttemptDb::recognize_topic(\n          self.txn,\n          genesis,\n          Topic::SubstrateSign(SubstrateSignableId::Batch(batch)),\n        );\n        self\n          .recognized_id\n          .recognized_id(\n            self.spec.set(),\n            genesis,\n            RecognizedIdType::Batch,\n            batch.to_le_bytes().to_vec(),\n          )\n          .await;\n      }\n\n      Transaction::SubstrateBlock(block) => {\n        let plan_ids = PlanIds::get(self.txn, &genesis, block).expect(\n          \"synced a tributary block finalizing a substrate block in a provided transaction \\\n          despite us not providing that transaction\",\n        );\n\n        for id in plan_ids {\n          AttemptDb::recognize_topic(self.txn, genesis, Topic::Sign(id));\n          self\n            .recognized_id\n            .recognized_id(self.spec.set(), genesis, RecognizedIdType::Plan, id.to_vec())\n            .await;\n        }\n      }\n\n      Transaction::SubstrateSign(data) => {\n        // Provided transactions ensure synchrony on any signing protocol, and we won't start\n        // signing with threshold keys before we've confirmed them on-chain\n        let Some(removed) =\n          crate::tributary::removed_as_of_set_keys(self.txn, self.spec.set(), genesis)\n        else {\n          self.fatal_slash(\n            data.signed.signer.to_bytes(),\n            \"signing despite not having set keys on substrate\",\n          );\n          return;\n        };\n        let signer = data.signed.signer;\n        let Ok(()) = self.check_sign_data_len(&removed, signer, data.data.len()) else {\n          return;\n        };\n        let expected_len = match data.label {\n          Label::Preprocess => 64,\n          Label::Share => 32,\n        };\n        for data in &data.data {\n          if data.len() != expected_len {\n            self.fatal_slash(\n              signer.to_bytes(),\n              \"unexpected length data for substrate signing protocol\",\n            );\n            return;\n          }\n        }\n\n        let data_spec = DataSpecification {\n          topic: Topic::SubstrateSign(data.plan),\n          label: data.label,\n          attempt: data.attempt,\n        };\n        let Accumulation::Ready(DataSet::Participating(mut results)) =\n          self.handle_data(&removed, &data_spec, &data.data.encode(), &data.signed)\n        else {\n          return;\n        };\n        unflatten(self.spec, &removed, &mut results);\n\n        let id = SubstrateSignId {\n          session: self.spec.set().session,\n          id: data.plan,\n          attempt: data.attempt,\n        };\n        let msg = match data.label {\n          Label::Preprocess => coordinator::CoordinatorMessage::SubstratePreprocesses {\n            id,\n            preprocesses: results.into_iter().map(|(v, p)| (v, p.try_into().unwrap())).collect(),\n          },\n          Label::Share => coordinator::CoordinatorMessage::SubstrateShares {\n            id,\n            shares: results.into_iter().map(|(v, p)| (v, p.try_into().unwrap())).collect(),\n          },\n        };\n        self.processors.send(self.spec.set().network, msg).await;\n      }\n\n      Transaction::Sign(data) => {\n        let Some(removed) =\n          crate::tributary::removed_as_of_set_keys(self.txn, self.spec.set(), genesis)\n        else {\n          self.fatal_slash(\n            data.signed.signer.to_bytes(),\n            \"signing despite not having set keys on substrate\",\n          );\n          return;\n        };\n        let Ok(()) = self.check_sign_data_len(&removed, data.signed.signer, data.data.len()) else {\n          return;\n        };\n\n        let data_spec = DataSpecification {\n          topic: Topic::Sign(data.plan),\n          label: data.label,\n          attempt: data.attempt,\n        };\n        if let Accumulation::Ready(DataSet::Participating(mut results)) =\n          self.handle_data(&removed, &data_spec, &data.data.encode(), &data.signed)\n        {\n          unflatten(self.spec, &removed, &mut results);\n          let id =\n            SignId { session: self.spec.set().session, id: data.plan, attempt: data.attempt };\n          self\n            .processors\n            .send(\n              self.spec.set().network,\n              match data.label {\n                Label::Preprocess => {\n                  sign::CoordinatorMessage::Preprocesses { id, preprocesses: results }\n                }\n                Label::Share => sign::CoordinatorMessage::Shares { id, shares: results },\n              },\n            )\n            .await;\n        }\n      }\n\n      Transaction::SignCompleted { plan, tx_hash, first_signer, signature: _ } => {\n        log::info!(\n          \"on-chain SignCompleted claims {} completes {}\",\n          hex::encode(&tx_hash),\n          hex::encode(plan)\n        );\n\n        if AttemptDb::attempt(self.txn, genesis, Topic::Sign(plan)).is_none() {\n          self.fatal_slash(first_signer.to_bytes(), \"claimed an unrecognized plan was completed\");\n          return;\n        };\n\n        // TODO: Confirm this signer hasn't prior published a completion\n\n        let msg = sign::CoordinatorMessage::Completed {\n          session: self.spec.set().session,\n          id: plan,\n          tx: tx_hash,\n        };\n        self.processors.send(self.spec.set().network, msg).await;\n      }\n\n      Transaction::SlashReport(points, signed) => {\n        // Uses &[] as we only need the length which is independent to who else was removed\n        let signer_range = self.spec.i(&[], signed.signer).unwrap();\n        let signer_len = u16::from(signer_range.end) - u16::from(signer_range.start);\n        if points.len() != (self.spec.validators().len() - 1) {\n          self.fatal_slash(\n            signed.signer.to_bytes(),\n            \"submitted a distinct amount of slash points to participants\",\n          );\n          return;\n        }\n\n        if SlashReports::get(self.txn, genesis, signed.signer.to_bytes()).is_some() {\n          self.fatal_slash(signed.signer.to_bytes(), \"submitted multiple slash points\");\n          return;\n        }\n        SlashReports::set(self.txn, genesis, signed.signer.to_bytes(), &points);\n\n        let prior_reported = SlashReported::get(self.txn, genesis).unwrap_or(0);\n        let now_reported = prior_reported + signer_len;\n        SlashReported::set(self.txn, genesis, &now_reported);\n\n        if (prior_reported < self.spec.t()) && (now_reported >= self.spec.t()) {\n          SlashReportCutOff::set(\n            self.txn,\n            genesis,\n            // 30 minutes into the future\n            &(u64::from(self.block_number) +\n              ((30 * 60 * 1000) / u64::from(tributary::tendermint::TARGET_BLOCK_TIME))),\n          );\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "coordinator/src/tributary/mod.rs",
    "content": "use dalek_ff_group::Ristretto;\nuse ciphersuite::{group::GroupEncoding, Ciphersuite};\n\nuse serai_client::validator_sets::primitives::ExternalValidatorSet;\n\nuse tributary::{\n  ReadWrite,\n  transaction::{TransactionError, TransactionKind, Transaction as TransactionTrait},\n  Tributary,\n};\n\nmod db;\npub use db::*;\n\nmod spec;\npub use spec::TributarySpec;\n\nmod transaction;\npub use transaction::{Label, SignData, Transaction};\n\nmod signing_protocol;\n\nmod handle;\npub use handle::*;\n\npub mod scanner;\n\npub fn removed_as_of_dkg_attempt(\n  getter: &impl Get,\n  genesis: [u8; 32],\n  attempt: u32,\n) -> Option<Vec<<Ristretto as Ciphersuite>::G>> {\n  if attempt == 0 {\n    Some(vec![])\n  } else {\n    RemovedAsOfDkgAttempt::get(getter, genesis, attempt).map(|keys| {\n      keys.iter().map(|key| <Ristretto as Ciphersuite>::G::from_bytes(key).unwrap()).collect()\n    })\n  }\n}\n\npub fn removed_as_of_set_keys(\n  getter: &impl Get,\n  set: ExternalValidatorSet,\n  genesis: [u8; 32],\n) -> Option<Vec<<Ristretto as Ciphersuite>::G>> {\n  // SeraiDkgCompleted has the key placed on-chain.\n  // This key can be uniquely mapped to an attempt so long as one participant was honest, which we\n  // assume as a presumably honest participant.\n  // Resolve from generated key to attempt to fatally slashed as of attempt.\n\n  // This expect will trigger if this is prematurely called and Substrate has tracked the keys yet\n  // we haven't locally synced and handled the Tributary\n  // All callers of this, at the time of writing, ensure the Tributary has sufficiently synced\n  // making the panic with context more desirable than the None\n  let attempt = KeyToDkgAttempt::get(getter, SeraiDkgCompleted::get(getter, set)?)\n    .expect(\"key completed on-chain didn't have an attempt related\");\n  removed_as_of_dkg_attempt(getter, genesis, attempt)\n}\n\npub async fn publish_signed_transaction<D: Db, P: crate::P2p>(\n  txn: &mut D::Transaction<'_>,\n  tributary: &Tributary<D, Transaction, P>,\n  tx: Transaction,\n) {\n  log::debug!(\"publishing transaction {}\", hex::encode(tx.hash()));\n\n  let (order, signer) = if let TransactionKind::Signed(order, signed) = tx.kind() {\n    let signer = signed.signer;\n\n    // Safe as we should deterministically create transactions, meaning if this is already on-disk,\n    // it's what we're saving now\n    SignedTransactionDb::set(txn, &order, signed.nonce, &tx.serialize());\n\n    (order, signer)\n  } else {\n    panic!(\"non-signed transaction passed to publish_signed_transaction\");\n  };\n\n  // If we're trying to publish 5, when the last transaction published was 3, this will delay\n  // publication until the point in time we publish 4\n  while let Some(tx) = SignedTransactionDb::take_signed_transaction(\n    txn,\n    &order,\n    tributary\n      .next_nonce(&signer, &order)\n      .await\n      .expect(\"we don't have a nonce, meaning we aren't a participant on this tributary\"),\n  ) {\n    // We need to return a proper error here to enable that, due to a race condition around\n    // multiple publications\n    match tributary.add_transaction(tx.clone()).await {\n      Ok(_) => {}\n      // Some asynchonicity if InvalidNonce, assumed safe to deterministic nonces\n      Err(TransactionError::InvalidNonce) => {\n        log::warn!(\"publishing TX {tx:?} returned InvalidNonce. was it already added?\")\n      }\n      Err(e) => panic!(\"created an invalid transaction: {e:?}\"),\n    }\n  }\n}\n"
  },
  {
    "path": "coordinator/src/tributary/scanner.rs",
    "content": "use core::{marker::PhantomData, ops::Deref, future::Future, time::Duration};\nuse std::{sync::Arc, collections::HashSet};\n\nuse zeroize::Zeroizing;\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::{group::GroupEncoding, Ciphersuite};\n\nuse tokio::sync::broadcast;\n\nuse scale::{Encode, Decode};\nuse serai_client::{\n  primitives::{SeraiAddress, Signature},\n  validator_sets::primitives::{ExternalValidatorSet, KeyPair},\n  Serai,\n};\n\nuse serai_db::DbTxn;\n\nuse processor_messages::coordinator::{SubstrateSignId, SubstrateSignableId};\n\nuse tributary::{\n  TransactionKind, Transaction as TributaryTransaction, TransactionError, Block, TributaryReader,\n  tendermint::{\n    tx::{TendermintTx, Evidence, decode_signed_message},\n    TendermintNetwork,\n  },\n};\n\nuse crate::{Db, processors::Processors, substrate::BatchInstructionsHashDb, tributary::*, P2p};\n\n#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, Decode)]\npub enum RecognizedIdType {\n  Batch,\n  Plan,\n}\n\n#[async_trait::async_trait]\npub trait RIDTrait {\n  async fn recognized_id(\n    &self,\n    set: ExternalValidatorSet,\n    genesis: [u8; 32],\n    kind: RecognizedIdType,\n    id: Vec<u8>,\n  );\n}\n#[async_trait::async_trait]\nimpl<\n    FRid: Send + Future<Output = ()>,\n    F: Sync + Fn(ExternalValidatorSet, [u8; 32], RecognizedIdType, Vec<u8>) -> FRid,\n  > RIDTrait for F\n{\n  async fn recognized_id(\n    &self,\n    set: ExternalValidatorSet,\n    genesis: [u8; 32],\n    kind: RecognizedIdType,\n    id: Vec<u8>,\n  ) {\n    (self)(set, genesis, kind, id).await\n  }\n}\n\n#[async_trait::async_trait]\npub trait PublishSeraiTransaction {\n  async fn publish_set_keys(\n    &self,\n    db: &(impl Sync + Get),\n    set: ExternalValidatorSet,\n    removed: Vec<SeraiAddress>,\n    key_pair: KeyPair,\n    signature: Signature,\n  );\n}\n\nmod impl_pst_for_serai {\n  use super::*;\n\n  use serai_client::SeraiValidatorSets;\n\n  // Uses a macro because Rust can't resolve the lifetimes/generics around the check function\n  // check is expected to return true if the effect has already occurred\n  // The generated publish function will return true if *we* published the transaction\n  macro_rules! common_pst {\n    ($Meta: ty, $check: ident) => {\n      async fn publish(\n        serai: &Serai,\n        db: &impl Get,\n        set: ExternalValidatorSet,\n        tx: serai_client::Transaction,\n        meta: $Meta,\n      ) -> bool {\n        loop {\n          match serai.publish(&tx).await {\n            Ok(_) => return true,\n            // This is assumed to be some ephemeral error due to the assumed fault-free\n            // creation\n            // TODO2: Differentiate connection errors from invariants\n            Err(e) => {\n              // The following block is irrelevant, and can/likely will fail, if we're publishing\n              // a TX for an old session\n              // If we're on a newer session, move on\n              if crate::RetiredTributaryDb::get(db, set).is_some() {\n                log::warn!(\"trying to publish a TX relevant to set {set:?} which isn't the latest\");\n                return false;\n              }\n\n              if let Ok(serai) = serai.as_of_latest_finalized_block().await {\n                let serai = serai.validator_sets();\n\n                // Check if someone else published the TX in question\n                if $check(serai, set, meta).await {\n                  return false;\n                }\n              }\n\n              log::error!(\"couldn't connect to Serai node to publish TX: {e:?}\");\n              tokio::time::sleep(core::time::Duration::from_secs(5)).await;\n            }\n          }\n        }\n      }\n    };\n  }\n\n  #[async_trait::async_trait]\n  impl PublishSeraiTransaction for Serai {\n    async fn publish_set_keys(\n      &self,\n      db: &(impl Sync + Get),\n      set: ExternalValidatorSet,\n      removed: Vec<SeraiAddress>,\n      key_pair: KeyPair,\n      signature: Signature,\n    ) {\n      // TODO: BoundedVec as an arg to avoid this expect\n      let tx = SeraiValidatorSets::set_keys(\n        set.network,\n        removed.try_into().expect(\"removing more than allowed\"),\n        key_pair,\n        signature,\n      );\n      async fn check(serai: SeraiValidatorSets<'_>, set: ExternalValidatorSet, (): ()) -> bool {\n        if matches!(serai.keys(set).await, Ok(Some(_))) {\n          log::info!(\"another coordinator set key pair for {:?}\", set);\n          return true;\n        }\n        false\n      }\n      common_pst!((), check);\n      if publish(self, db, set, tx, ()).await {\n        log::info!(\"published set keys for {set:?}\");\n      }\n    }\n  }\n}\n\n#[async_trait::async_trait]\npub trait PTTTrait {\n  async fn publish_tributary_tx(&self, tx: Transaction);\n}\n#[async_trait::async_trait]\nimpl<FPtt: Send + Future<Output = ()>, F: Sync + Fn(Transaction) -> FPtt> PTTTrait for F {\n  async fn publish_tributary_tx(&self, tx: Transaction) {\n    (self)(tx).await\n  }\n}\n\npub struct TributaryBlockHandler<\n  'a,\n  D: Db,\n  T: DbTxn,\n  Pro: Processors,\n  PST: PublishSeraiTransaction,\n  PTT: PTTTrait,\n  RID: RIDTrait,\n  P: P2p,\n> {\n  pub db: &'a D,\n  pub txn: &'a mut T,\n  pub our_key: &'a Zeroizing<<Ristretto as Ciphersuite>::F>,\n  pub recognized_id: &'a RID,\n  pub processors: &'a Pro,\n  pub publish_serai_tx: &'a PST,\n  pub publish_tributary_tx: &'a PTT,\n  pub spec: &'a TributarySpec,\n  block: Block<Transaction>,\n  pub block_number: u32,\n  _p2p: PhantomData<P>,\n}\n\nimpl<\n    D: Db,\n    T: DbTxn,\n    Pro: Processors,\n    PST: PublishSeraiTransaction,\n    PTT: PTTTrait,\n    RID: RIDTrait,\n    P: P2p,\n  > TributaryBlockHandler<'_, D, T, Pro, PST, PTT, RID, P>\n{\n  pub fn fatal_slash(&mut self, slashing: [u8; 32], reason: &str) {\n    let genesis = self.spec.genesis();\n\n    log::warn!(\"fatally slashing {}. reason: {}\", hex::encode(slashing), reason);\n    FatallySlashed::set_fatally_slashed(self.txn, genesis, slashing);\n\n    // TODO: disconnect the node from network/ban from further participation in all Tributaries\n  }\n\n  // TODO: Once Substrate confirms a key, we need to rotate our validator set OR form a second\n  // Tributary post-DKG\n  // https://github.com/serai-dex/serai/issues/426\n\n  async fn handle(mut self) {\n    log::info!(\"found block for Tributary {:?}\", self.spec.set());\n\n    let transactions = self.block.transactions.clone();\n    for tx in transactions {\n      match tx {\n        TributaryTransaction::Tendermint(TendermintTx::SlashEvidence(ev)) => {\n          // Since the evidence is on the chain, it should already have been validated\n          // We can just punish the signer\n          let data = match ev {\n            Evidence::ConflictingMessages(first, second) => (first, Some(second)),\n            Evidence::InvalidPrecommit(first) | Evidence::InvalidValidRound(first) => (first, None),\n          };\n          let msgs = (\n            decode_signed_message::<TendermintNetwork<D, Transaction, P>>(&data.0).unwrap(),\n            if data.1.is_some() {\n              Some(\n                decode_signed_message::<TendermintNetwork<D, Transaction, P>>(&data.1.unwrap())\n                  .unwrap(),\n              )\n            } else {\n              None\n            },\n          );\n\n          // Since anything with evidence is fundamentally faulty behavior, not just temporal\n          // errors, mark the node as fatally slashed\n          self.fatal_slash(msgs.0.msg.sender, &format!(\"invalid tendermint messages: {msgs:?}\"));\n        }\n        TributaryTransaction::Application(tx) => {\n          self.handle_application_tx(tx).await;\n        }\n      }\n    }\n\n    let genesis = self.spec.genesis();\n\n    let current_fatal_slashes = FatalSlashes::get_as_keys(self.txn, genesis);\n\n    // Calculate the shares still present, spinning if not enough are\n    // still_present_shares is used by a below branch, yet it's a natural byproduct of checking if\n    // we should spin, hence storing it in a variable here\n    let still_present_shares = {\n      // Start with the original n value\n      let mut present_shares = self.spec.n(&[]);\n      // Remove everyone fatally slashed\n      for removed in &current_fatal_slashes {\n        let original_i_for_removed =\n          self.spec.i(&[], *removed).expect(\"removed party was never present\");\n        let removed_shares =\n          u16::from(original_i_for_removed.end) - u16::from(original_i_for_removed.start);\n        present_shares -= removed_shares;\n      }\n\n      // Spin if the present shares don't satisfy the required threshold\n      if present_shares < self.spec.t() {\n        loop {\n          log::error!(\n            \"fatally slashed so many participants for {:?} we no longer meet the threshold\",\n            self.spec.set()\n          );\n          tokio::time::sleep(core::time::Duration::from_secs(60)).await;\n        }\n      }\n\n      present_shares\n    };\n\n    for topic in ReattemptDb::take(self.txn, genesis, self.block_number) {\n      let attempt = AttemptDb::start_next_attempt(self.txn, genesis, topic);\n      log::info!(\"re-attempting {topic:?} with attempt {attempt}\");\n\n      // Slash people who failed to participate as expected in the prior attempt\n      {\n        let prior_attempt = attempt - 1;\n        let (removed, expected_participants) = match topic {\n          Topic::Dkg => {\n            // Every validator who wasn't removed is expected to have participated\n            let removed =\n              crate::tributary::removed_as_of_dkg_attempt(self.txn, genesis, prior_attempt)\n                .expect(\"prior attempt didn't have its removed saved to disk\");\n            let removed_set = removed.iter().copied().collect::<HashSet<_>>();\n            (\n              removed,\n              self\n                .spec\n                .validators()\n                .into_iter()\n                .filter_map(|(validator, _)| {\n                  Some(validator).filter(|validator| !removed_set.contains(validator))\n                })\n                .collect(),\n            )\n          }\n          Topic::DkgConfirmation => {\n            panic!(\"TODO: re-attempting DkgConfirmation when we should be re-attempting the Dkg\")\n          }\n          Topic::SubstrateSign(_) | Topic::Sign(_) => {\n            let removed =\n              crate::tributary::removed_as_of_set_keys(self.txn, self.spec.set(), genesis)\n                .expect(\"SubstrateSign/Sign yet have yet to set keys\");\n            // TODO: If 67% sent preprocesses, this should be them. Else, this should be vec![]\n            let expected_participants = vec![];\n            (removed, expected_participants)\n          }\n        };\n\n        let (expected_topic, expected_label) = match topic {\n          Topic::Dkg => {\n            let n = self.spec.n(&removed);\n            // If we got all the DKG shares, we should be on DKG confirmation\n            let share_spec =\n              DataSpecification { topic: Topic::Dkg, label: Label::Share, attempt: prior_attempt };\n            if DataReceived::get(self.txn, genesis, &share_spec).unwrap_or(0) == n {\n              // Label::Share since there is no Label::Preprocess for DkgConfirmation since the\n              // preprocess is part of Topic::Dkg Label::Share\n              (Topic::DkgConfirmation, Label::Share)\n            } else {\n              let preprocess_spec = DataSpecification {\n                topic: Topic::Dkg,\n                label: Label::Preprocess,\n                attempt: prior_attempt,\n              };\n              // If we got all the DKG preprocesses, DKG shares\n              if DataReceived::get(self.txn, genesis, &preprocess_spec).unwrap_or(0) == n {\n                // Label::Share since there is no Label::Preprocess for DkgConfirmation since the\n                // preprocess is part of Topic::Dkg Label::Share\n                (Topic::Dkg, Label::Share)\n              } else {\n                (Topic::Dkg, Label::Preprocess)\n              }\n            }\n          }\n          Topic::DkgConfirmation => unreachable!(),\n          // If we got enough participants to move forward, then we expect shares from them all\n          Topic::SubstrateSign(_) | Topic::Sign(_) => (topic, Label::Share),\n        };\n\n        let mut did_not_participate = vec![];\n        for expected_participant in expected_participants {\n          if DataDb::get(\n            self.txn,\n            genesis,\n            &DataSpecification {\n              topic: expected_topic,\n              label: expected_label,\n              attempt: prior_attempt,\n            },\n            &expected_participant.to_bytes(),\n          )\n          .is_none()\n          {\n            did_not_participate.push(expected_participant);\n          }\n        }\n\n        // If a supermajority didn't participate as expected, the protocol was likely aborted due\n        // to detection of a completion or some larger networking error\n        // Accordingly, clear did_not_participate\n        // TODO\n\n        // If during the DKG, explicitly mark these people as having been offline\n        // TODO: If they were offline sufficiently long ago, don't strike them off\n        if topic == Topic::Dkg {\n          let mut existing = OfflineDuringDkg::get(self.txn, genesis).unwrap_or(vec![]);\n          for did_not_participate in did_not_participate {\n            existing.push(did_not_participate.to_bytes());\n          }\n          OfflineDuringDkg::set(self.txn, genesis, &existing);\n        }\n\n        // Slash everyone who didn't participate as expected\n        // This may be overzealous as if a minority detects a completion, they'll abort yet the\n        // supermajority will cause the above allowance to not trigger, causing an honest minority\n        // to be slashed\n        // At the end of the protocol, the accumulated slashes are reduced by the amount obtained\n        // by the worst-performing member of the supermajority, and this is expected to\n        // sufficiently compensate for slashes which occur under normal operation\n        // TODO\n      }\n\n      /*\n        All of these have the same common flow:\n\n        1) Check if this re-attempt is actually needed\n        2) If so, dispatch whatever events as needed\n\n        This is because we *always* re-attempt any protocol which had participation. That doesn't\n        mean we *should* re-attempt this protocol.\n\n        The alternatives were:\n        1) Note on-chain we completed a protocol, halting re-attempts upon 34%.\n        2) Vote on-chain to re-attempt a protocol.\n\n        This schema doesn't have any additional messages upon the success case (whereas\n        alternative #1 does) and doesn't have overhead (as alternative #2 does, sending votes and\n        then preprocesses. This only sends preprocesses).\n      */\n      match topic {\n        Topic::Dkg => {\n          let mut removed = current_fatal_slashes.clone();\n\n          let t = self.spec.t();\n          {\n            let mut present_shares = still_present_shares;\n\n            // Load the parties marked as offline across the various attempts\n            let mut offline = OfflineDuringDkg::get(self.txn, genesis)\n              .unwrap_or(vec![])\n              .iter()\n              .map(|key| <Ristretto as Ciphersuite>::G::from_bytes(key).unwrap())\n              .collect::<Vec<_>>();\n            // Pop from the list to prioritize the removal of those recently offline\n            while let Some(offline) = offline.pop() {\n              // Make sure they weren't removed already (such as due to being fatally slashed)\n              // This also may trigger if they were offline across multiple attempts\n              if removed.contains(&offline) {\n                continue;\n              }\n\n              // If we can remove them and still meet the threshold, do so\n              let original_i_for_offline =\n                self.spec.i(&[], offline).expect(\"offline was never present?\");\n              let offline_shares =\n                u16::from(original_i_for_offline.end) - u16::from(original_i_for_offline.start);\n              if (present_shares - offline_shares) >= t {\n                present_shares -= offline_shares;\n                removed.push(offline);\n              }\n\n              // If we've removed as many people as we can, break\n              if present_shares == t {\n                break;\n              }\n            }\n          }\n\n          RemovedAsOfDkgAttempt::set(\n            self.txn,\n            genesis,\n            attempt,\n            &removed.iter().map(<Ristretto as Ciphersuite>::G::to_bytes).collect(),\n          );\n\n          if DkgLocallyCompleted::get(self.txn, genesis).is_none() {\n            let Some(our_i) = self.spec.i(&removed, Ristretto::generator() * self.our_key.deref())\n            else {\n              continue;\n            };\n\n            // Since it wasn't completed, instruct the processor to start the next attempt\n            let id =\n              processor_messages::key_gen::KeyGenId { session: self.spec.set().session, attempt };\n\n            let params =\n              frost::ThresholdParams::new(t, self.spec.n(&removed), our_i.start).unwrap();\n            let shares = u16::from(our_i.end) - u16::from(our_i.start);\n\n            self\n              .processors\n              .send(\n                self.spec.set().network,\n                processor_messages::key_gen::CoordinatorMessage::GenerateKey { id, params, shares },\n              )\n              .await;\n          }\n        }\n        Topic::DkgConfirmation => unreachable!(),\n        Topic::SubstrateSign(inner_id) => {\n          let id = processor_messages::coordinator::SubstrateSignId {\n            session: self.spec.set().session,\n            id: inner_id,\n            attempt,\n          };\n          match inner_id {\n            SubstrateSignableId::CosigningSubstrateBlock(block) => {\n              let block_number = SeraiBlockNumber::get(self.txn, block)\n                .expect(\"couldn't get the block number for prior attempted cosign\");\n\n              // Check if the cosigner has a signature from our set for this block/a newer one\n              let latest_cosign =\n                crate::cosign_evaluator::LatestCosign::get(self.txn, self.spec.set().network)\n                  .map_or(0, |cosign| cosign.block_number);\n              if latest_cosign < block_number {\n                // Instruct the processor to start the next attempt\n                self\n                  .processors\n                  .send(\n                    self.spec.set().network,\n                    processor_messages::coordinator::CoordinatorMessage::CosignSubstrateBlock {\n                      id,\n                      block_number,\n                    },\n                  )\n                  .await;\n              }\n            }\n            SubstrateSignableId::Batch(batch) => {\n              // If the Batch hasn't appeared on-chain...\n              if BatchInstructionsHashDb::get(self.txn, self.spec.set().network, batch).is_none() {\n                // Instruct the processor to start the next attempt\n                // The processor won't continue if it's already signed a Batch\n                // Prior checking if the Batch is on-chain just may reduce the non-participating\n                // 33% from publishing their re-attempt messages\n                self\n                  .processors\n                  .send(\n                    self.spec.set().network,\n                    processor_messages::coordinator::CoordinatorMessage::BatchReattempt { id },\n                  )\n                  .await;\n              }\n            }\n            SubstrateSignableId::SlashReport => {\n              // If this Tributary hasn't been retired...\n              // (published SlashReport/took too long to do so)\n              if crate::RetiredTributaryDb::get(self.txn, self.spec.set()).is_none() {\n                let report = SlashReport::get(self.txn, self.spec.set())\n                  .expect(\"re-attempting signing a SlashReport we don't have?\");\n                self\n                  .processors\n                  .send(\n                    self.spec.set().network,\n                    processor_messages::coordinator::CoordinatorMessage::SignSlashReport {\n                      id,\n                      report,\n                    },\n                  )\n                  .await;\n              }\n            }\n          }\n        }\n        Topic::Sign(id) => {\n          // Instruct the processor to start the next attempt\n          // If it has already noted a completion, it won't send a preprocess and will simply drop\n          // the re-attempt message\n          self\n            .processors\n            .send(\n              self.spec.set().network,\n              processor_messages::sign::CoordinatorMessage::Reattempt {\n                id: processor_messages::sign::SignId {\n                  session: self.spec.set().session,\n                  id,\n                  attempt,\n                },\n              },\n            )\n            .await;\n        }\n      }\n    }\n\n    if Some(u64::from(self.block_number)) == SlashReportCutOff::get(self.txn, genesis) {\n      // Grab every slash report\n      let mut all_reports = vec![];\n      for (i, (validator, _)) in self.spec.validators().into_iter().enumerate() {\n        let Some(mut report) = SlashReports::get(self.txn, genesis, validator.to_bytes()) else {\n          continue;\n        };\n        // Assign them 0 points for themselves\n        report.insert(i, 0);\n        // Uses &[] as we only need the length which is independent to who else was removed\n        let signer_i = self.spec.i(&[], validator).unwrap();\n        let signer_len = u16::from(signer_i.end) - u16::from(signer_i.start);\n        // Push `n` copies, one for each of their shares\n        for _ in 0 .. signer_len {\n          all_reports.push(report.clone());\n        }\n      }\n\n      // For each participant, grab their median\n      let mut medians = vec![];\n      for p in 0 .. self.spec.validators().len() {\n        let mut median_calc = vec![];\n        for report in &all_reports {\n          median_calc.push(report[p]);\n        }\n        median_calc.sort_unstable();\n        medians.push(median_calc[median_calc.len() / 2]);\n      }\n\n      // Grab the points of the last party within the best-performing threshold\n      // This is done by first expanding the point values by the amount of shares\n      let mut sorted_medians = vec![];\n      for (i, (_, shares)) in self.spec.validators().into_iter().enumerate() {\n        for _ in 0 .. shares {\n          sorted_medians.push(medians[i]);\n        }\n      }\n      // Then performing the sort\n      sorted_medians.sort_unstable();\n      let worst_points_by_party_within_threshold = sorted_medians[usize::from(self.spec.t()) - 1];\n\n      // Reduce everyone's points by this value\n      for median in &mut medians {\n        *median = median.saturating_sub(worst_points_by_party_within_threshold);\n      }\n\n      // The threshold now has the proper incentive to report this as they no longer suffer\n      // negative effects\n      //\n      // Additionally, if all validators had degraded performance, they don't all get penalized for\n      // what's likely outside their control (as it occurred universally)\n\n      // Mark everyone fatally slashed with u32::MAX\n      for (i, (validator, _)) in self.spec.validators().into_iter().enumerate() {\n        if FatallySlashed::get(self.txn, genesis, validator.to_bytes()).is_some() {\n          medians[i] = u32::MAX;\n        }\n      }\n\n      let mut report = vec![];\n      for (i, (validator, _)) in self.spec.validators().into_iter().enumerate() {\n        if medians[i] != 0 {\n          report.push((validator.to_bytes(), medians[i]));\n        }\n      }\n\n      // This does lock in the report, meaning further slash point accumulations won't be reported\n      // They still have value to be locally tracked due to local decisions made based off\n      // accumulated slash reports\n      SlashReport::set(self.txn, self.spec.set(), &report);\n\n      // Start a signing protocol for this\n      self\n        .processors\n        .send(\n          self.spec.set().network,\n          processor_messages::coordinator::CoordinatorMessage::SignSlashReport {\n            id: SubstrateSignId {\n              session: self.spec.set().session,\n              id: SubstrateSignableId::SlashReport,\n              attempt: 0,\n            },\n            report,\n          },\n        )\n        .await;\n    }\n  }\n}\n\n#[allow(clippy::too_many_arguments)]\npub(crate) async fn handle_new_blocks<\n  D: Db,\n  Pro: Processors,\n  PST: PublishSeraiTransaction,\n  PTT: PTTTrait,\n  RID: RIDTrait,\n  P: P2p,\n>(\n  db: &mut D,\n  key: &Zeroizing<<Ristretto as Ciphersuite>::F>,\n  recognized_id: &RID,\n  processors: &Pro,\n  publish_serai_tx: &PST,\n  publish_tributary_tx: &PTT,\n  spec: &TributarySpec,\n  tributary: &TributaryReader<D, Transaction>,\n) {\n  let genesis = tributary.genesis();\n  let mut last_block = LastHandledBlock::get(db, genesis).unwrap_or(genesis);\n  let mut block_number = TributaryBlockNumber::get(db, last_block).unwrap_or(0);\n  while let Some(next) = tributary.block_after(&last_block) {\n    let block = tributary.block(&next).unwrap();\n    block_number += 1;\n\n    // Make sure we have all of the provided transactions for this block\n    for tx in &block.transactions {\n      // Provided TXs will appear first in the Block, so we can break after we hit a non-Provided\n      let TransactionKind::Provided(order) = tx.kind() else {\n        break;\n      };\n\n      // make sure we have all the provided txs in this block locally\n      if !tributary.locally_provided_txs_in_block(&block.hash(), order) {\n        return;\n      }\n    }\n\n    let mut db_clone = db.clone();\n    let mut txn = db_clone.txn();\n    TributaryBlockNumber::set(&mut txn, next, &block_number);\n    (TributaryBlockHandler {\n      db,\n      txn: &mut txn,\n      spec,\n      our_key: key,\n      recognized_id,\n      processors,\n      publish_serai_tx,\n      publish_tributary_tx,\n      block,\n      block_number,\n      _p2p: PhantomData::<P>,\n    })\n    .handle()\n    .await;\n    last_block = next;\n    LastHandledBlock::set(&mut txn, genesis, &next);\n    txn.commit();\n  }\n}\n\npub(crate) async fn scan_tributaries_task<\n  D: Db,\n  Pro: Processors,\n  P: P2p,\n  RID: 'static + Send + Sync + Clone + RIDTrait,\n>(\n  raw_db: D,\n  key: Zeroizing<<Ristretto as Ciphersuite>::F>,\n  recognized_id: RID,\n  processors: Pro,\n  serai: Arc<Serai>,\n  mut tributary_event: broadcast::Receiver<crate::TributaryEvent<D, P>>,\n) {\n  log::info!(\"scanning tributaries\");\n\n  loop {\n    match tributary_event.recv().await {\n      Ok(crate::TributaryEvent::NewTributary(crate::ActiveTributary { spec, tributary })) => {\n        // For each Tributary, spawn a dedicated scanner task\n        tokio::spawn({\n          let raw_db = raw_db.clone();\n          let key = key.clone();\n          let recognized_id = recognized_id.clone();\n          let processors = processors.clone();\n          let serai = serai.clone();\n          async move {\n            let spec = &spec;\n            let reader = tributary.reader();\n            let mut tributary_db = raw_db.clone();\n            loop {\n              // Check if the set was retired, and if so, don't further operate\n              if crate::db::RetiredTributaryDb::get(&raw_db, spec.set()).is_some() {\n                break;\n              }\n\n              // Obtain the next block notification now to prevent obtaining it immediately after\n              // the next block occurs\n              let next_block_notification = tributary.next_block_notification().await;\n\n              handle_new_blocks::<_, _, _, _, _, P>(\n                &mut tributary_db,\n                &key,\n                &recognized_id,\n                &processors,\n                &*serai,\n                &|tx: Transaction| {\n                  let tributary = tributary.clone();\n                  async move {\n                    match tributary.add_transaction(tx.clone()).await {\n                      Ok(_) => {}\n                      // Can happen as this occurs on a distinct DB TXN\n                      Err(TransactionError::InvalidNonce) => {\n                        log::warn!(\n                          \"publishing TX {tx:?} returned InvalidNonce. was it already added?\"\n                        )\n                      }\n                      Err(e) => panic!(\"created an invalid transaction: {e:?}\"),\n                    }\n                  }\n                },\n                spec,\n                &reader,\n              )\n              .await;\n\n              // Run either when the notification fires, or every interval of block_time\n              let _ = tokio::time::timeout(\n                Duration::from_secs(tributary::Tributary::<D, Transaction, P>::block_time().into()),\n                next_block_notification,\n              )\n              .await;\n            }\n          }\n        });\n      }\n      // The above loop simply checks the DB every few seconds, voiding the need for this event\n      Ok(crate::TributaryEvent::TributaryRetired(_)) => {}\n      Err(broadcast::error::RecvError::Lagged(_)) => {\n        panic!(\"scan_tributaries lagged to handle tributary_event\")\n      }\n      Err(broadcast::error::RecvError::Closed) => panic!(\"tributary_event sender closed\"),\n    }\n  }\n}\n"
  },
  {
    "path": "coordinator/src/tributary/signing_protocol.rs",
    "content": "/*\n  A MuSig-based signing protocol executed with the validators' keys.\n\n  This is used for confirming the results of a DKG on-chain, an operation requiring all validators\n  which aren't specified as removed while still satisfying a supermajority.\n\n  Since we're using the validator's keys, as needed for their being the root of trust, the\n  coordinator must perform the signing. This is distinct from all other group-signing operations,\n  as they're all done by the processor.\n\n  The MuSig-aggregation achieves on-chain efficiency and enables a more secure design pattern.\n  While we could individually tack votes, that'd require logic to prevent voting multiple times and\n  tracking the accumulated votes. MuSig-aggregation simply requires checking the list is sorted and\n  the list's weight exceeds the threshold.\n\n  Instead of maintaining state in memory, a combination of the DB and re-execution are used. This\n  is deemed acceptable re: performance as:\n\n  1) This is only done prior to a DKG being confirmed on Substrate and is assumed infrequent.\n  2) This is an O(n) algorithm.\n  3) The size of the validator set is bounded by MAX_KEY_SHARES_PER_SET.\n\n  Accordingly, this should be tolerable.\n\n  As for safety, it is explicitly unsafe to reuse nonces across signing sessions. This raises\n  concerns regarding our re-execution which is dependent on fixed nonces. Safety is derived from\n  the nonces being context-bound under a BFT protocol. The flow is as follows:\n\n  1) Decide the nonce.\n  2) Publish the nonces' commitments, receiving everyone elses *and potentially the message to be\n     signed*.\n  3) Sign and publish the signature share.\n\n  In order for nonce re-use to occur, the received nonce commitments (or the message to be signed)\n  would have to be distinct and sign would have to be called again.\n\n  Before we act on any received messages, they're ordered and finalized by a BFT algorithm. The\n  only way to operate on distinct received messages would be if:\n\n  1) A logical flaw exists, letting new messages over write prior messages\n  2) A reorganization occurred from chain A to chain B, and with it, different messages\n\n  Reorganizations are not supported, as BFT is assumed by the presence of a BFT algorithm. While\n  a significant amount of processes may be byzantine, leading to BFT being broken, that still will\n  not trigger a reorganization. The only way to move to a distinct chain, with distinct messages,\n  would be by rebuilding the local process (this time following chain B). Upon any complete\n  rebuild, we'd re-decide nonces, achieving safety. This does set a bound preventing partial\n  rebuilds which is accepted.\n\n  Additionally, to ensure a rebuilt service isn't flagged as malicious, we have to check the\n  commitments generated from the decided nonces are in fact its commitments on-chain (TODO).\n\n  TODO: We also need to review how we're handling Processor preprocesses and likely implement the\n  same on-chain-preprocess-matches-presumed-preprocess check before publishing shares.\n*/\n\nuse core::ops::Deref;\nuse std::collections::HashMap;\n\nuse zeroize::{Zeroize, Zeroizing};\n\nuse rand_core::OsRng;\n\nuse blake2::{Digest, Blake2s256};\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::{\n  group::{ff::PrimeField, GroupEncoding},\n  Ciphersuite,\n};\nuse dkg_musig::musig;\nuse frost::{FrostError, dkg::Participant, ThresholdKeys, sign::*};\nuse frost_schnorrkel::Schnorrkel;\n\nuse scale::Encode;\n\nuse serai_client::{\n  Public,\n  validator_sets::primitives::{KeyPair, musig_context, set_keys_message},\n};\n\nuse serai_db::*;\n\nuse crate::tributary::TributarySpec;\n\ncreate_db!(\n  SigningProtocolDb {\n    CachedPreprocesses: (context: &impl Encode) -> [u8; 32]\n  }\n);\n\nstruct SigningProtocol<'a, T: DbTxn, C: Encode> {\n  pub(crate) key: &'a Zeroizing<<Ristretto as Ciphersuite>::F>,\n  pub(crate) spec: &'a TributarySpec,\n  pub(crate) txn: &'a mut T,\n  pub(crate) context: C,\n}\n\nimpl<T: DbTxn, C: Encode> SigningProtocol<'_, T, C> {\n  fn preprocess_internal(\n    &mut self,\n    participants: &[<Ristretto as Ciphersuite>::G],\n  ) -> (AlgorithmSignMachine<Ristretto, Schnorrkel>, [u8; 64]) {\n    // Encrypt the cached preprocess as recovery of it will enable recovering the private key\n    // While the DB isn't expected to be arbitrarily readable, it isn't a proper secret store and\n    // shouldn't be trusted as one\n    let mut encryption_key = {\n      let mut encryption_key_preimage =\n        Zeroizing::new(b\"Cached Preprocess Encryption Key\".to_vec());\n      encryption_key_preimage.extend(self.context.encode());\n      let repr = Zeroizing::new(self.key.to_repr());\n      encryption_key_preimage.extend(repr.deref());\n      Blake2s256::digest(&encryption_key_preimage)\n    };\n    let encryption_key_slice: &mut [u8] = encryption_key.as_mut();\n\n    let algorithm = Schnorrkel::new(b\"substrate\");\n    let keys: ThresholdKeys<Ristretto> =\n      musig(musig_context(self.spec.set().into()), self.key.clone(), participants)\n        .expect(\"signing for a set we aren't in/validator present multiple times\")\n        .into();\n\n    if CachedPreprocesses::get(self.txn, &self.context).is_none() {\n      let (machine, _) =\n        AlgorithmMachine::new(algorithm.clone(), keys.clone()).preprocess(&mut OsRng);\n\n      let mut cache = machine.cache();\n      assert_eq!(cache.0.len(), 32);\n      #[allow(clippy::needless_range_loop)]\n      for b in 0 .. 32 {\n        cache.0[b] ^= encryption_key_slice[b];\n      }\n\n      CachedPreprocesses::set(self.txn, &self.context, &cache.0);\n    }\n\n    let cached = CachedPreprocesses::get(self.txn, &self.context).unwrap();\n    let mut cached: Zeroizing<[u8; 32]> = Zeroizing::new(cached);\n    #[allow(clippy::needless_range_loop)]\n    for b in 0 .. 32 {\n      cached[b] ^= encryption_key_slice[b];\n    }\n    encryption_key_slice.zeroize();\n    let (machine, preprocess) =\n      AlgorithmSignMachine::from_cache(algorithm, keys, CachedPreprocess(cached));\n\n    (machine, preprocess.serialize().try_into().unwrap())\n  }\n\n  fn share_internal(\n    &mut self,\n    participants: &[<Ristretto as Ciphersuite>::G],\n    mut serialized_preprocesses: HashMap<Participant, Vec<u8>>,\n    msg: &[u8],\n  ) -> Result<(AlgorithmSignatureMachine<Ristretto, Schnorrkel>, [u8; 32]), Participant> {\n    let machine = self.preprocess_internal(participants).0;\n\n    let mut participants = serialized_preprocesses.keys().copied().collect::<Vec<_>>();\n    participants.sort();\n    let mut preprocesses = HashMap::new();\n    for participant in participants {\n      preprocesses.insert(\n        participant,\n        machine\n          .read_preprocess(&mut serialized_preprocesses.remove(&participant).unwrap().as_slice())\n          .map_err(|_| participant)?,\n      );\n    }\n\n    let (machine, share) = machine.sign(preprocesses, msg).map_err(|e| match e {\n      FrostError::InternalError(e) => unreachable!(\"FrostError::InternalError {e}\"),\n      FrostError::InvalidParticipant(_, _) |\n      FrostError::InvalidSigningSet(_) |\n      FrostError::InvalidParticipantQuantity(_, _) |\n      FrostError::DuplicatedParticipant(_) |\n      FrostError::MissingParticipant(_) => unreachable!(\"{e:?}\"),\n      FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p,\n    })?;\n\n    Ok((machine, share.serialize().try_into().unwrap()))\n  }\n\n  fn complete_internal(\n    machine: AlgorithmSignatureMachine<Ristretto, Schnorrkel>,\n    shares: HashMap<Participant, Vec<u8>>,\n  ) -> Result<[u8; 64], Participant> {\n    let shares = shares\n      .into_iter()\n      .map(|(p, share)| {\n        machine.read_share(&mut share.as_slice()).map(|share| (p, share)).map_err(|_| p)\n      })\n      .collect::<Result<HashMap<_, _>, _>>()?;\n    let signature = machine.complete(shares).map_err(|e| match e {\n      FrostError::InternalError(e) => unreachable!(\"FrostError::InternalError {e}\"),\n      FrostError::InvalidParticipant(_, _) |\n      FrostError::InvalidSigningSet(_) |\n      FrostError::InvalidParticipantQuantity(_, _) |\n      FrostError::DuplicatedParticipant(_) |\n      FrostError::MissingParticipant(_) => unreachable!(\"{e:?}\"),\n      FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p,\n    })?;\n    Ok(signature.to_bytes())\n  }\n}\n\n// Get the keys of the participants, noted by their threshold is, and return a new map indexed by\n// the MuSig is.\nfn threshold_i_map_to_keys_and_musig_i_map(\n  spec: &TributarySpec,\n  removed: &[<Ristretto as Ciphersuite>::G],\n  our_key: &Zeroizing<<Ristretto as Ciphersuite>::F>,\n  mut map: HashMap<Participant, Vec<u8>>,\n) -> (Vec<<Ristretto as Ciphersuite>::G>, HashMap<Participant, Vec<u8>>) {\n  // Insert our own index so calculations aren't offset\n  let our_threshold_i = spec\n    .i(removed, <Ristretto as Ciphersuite>::generator() * our_key.deref())\n    .expect(\"MuSig t-of-n signing a for a protocol we were removed from\")\n    .start;\n  assert!(map.insert(our_threshold_i, vec![]).is_none());\n\n  let spec_validators = spec.validators();\n  let key_from_threshold_i = |threshold_i| {\n    for (key, _) in &spec_validators {\n      if threshold_i == spec.i(removed, *key).expect(\"MuSig t-of-n participant was removed\").start {\n        return *key;\n      }\n    }\n    panic!(\"requested info for threshold i which doesn't exist\")\n  };\n\n  let mut sorted = vec![];\n  let mut threshold_is = map.keys().copied().collect::<Vec<_>>();\n  threshold_is.sort();\n  for threshold_i in threshold_is {\n    sorted.push((key_from_threshold_i(threshold_i), map.remove(&threshold_i).unwrap()));\n  }\n\n  // Now that signers are sorted, with their shares, create a map with the is needed for MuSig\n  let mut participants = vec![];\n  let mut map = HashMap::new();\n  for (raw_i, (key, share)) in sorted.into_iter().enumerate() {\n    let musig_i = u16::try_from(raw_i).unwrap() + 1;\n    participants.push(key);\n    map.insert(Participant::new(musig_i).unwrap(), share);\n  }\n\n  map.remove(&our_threshold_i).unwrap();\n\n  (participants, map)\n}\n\ntype DkgConfirmerSigningProtocol<'a, T> = SigningProtocol<'a, T, (&'static [u8; 12], u32)>;\n\npub(crate) struct DkgConfirmer<'a, T: DbTxn> {\n  key: &'a Zeroizing<<Ristretto as Ciphersuite>::F>,\n  spec: &'a TributarySpec,\n  removed: Vec<<Ristretto as Ciphersuite>::G>,\n  txn: &'a mut T,\n  attempt: u32,\n}\n\nimpl<T: DbTxn> DkgConfirmer<'_, T> {\n  pub(crate) fn new<'a>(\n    key: &'a Zeroizing<<Ristretto as Ciphersuite>::F>,\n    spec: &'a TributarySpec,\n    txn: &'a mut T,\n    attempt: u32,\n  ) -> Option<DkgConfirmer<'a, T>> {\n    // This relies on how confirmations are inlined into the DKG protocol and they accordingly\n    // share attempts\n    let removed = crate::tributary::removed_as_of_dkg_attempt(txn, spec.genesis(), attempt)?;\n    Some(DkgConfirmer { key, spec, removed, txn, attempt })\n  }\n  fn signing_protocol(&mut self) -> DkgConfirmerSigningProtocol<'_, T> {\n    let context = (b\"DkgConfirmer\", self.attempt);\n    SigningProtocol { key: self.key, spec: self.spec, txn: self.txn, context }\n  }\n\n  fn preprocess_internal(&mut self) -> (AlgorithmSignMachine<Ristretto, Schnorrkel>, [u8; 64]) {\n    let participants = self.spec.validators().iter().map(|val| val.0).collect::<Vec<_>>();\n    self.signing_protocol().preprocess_internal(&participants)\n  }\n  // Get the preprocess for this confirmation.\n  pub(crate) fn preprocess(&mut self) -> [u8; 64] {\n    self.preprocess_internal().1\n  }\n\n  fn share_internal(\n    &mut self,\n    preprocesses: HashMap<Participant, Vec<u8>>,\n    key_pair: &KeyPair,\n  ) -> Result<(AlgorithmSignatureMachine<Ristretto, Schnorrkel>, [u8; 32]), Participant> {\n    let participants = self.spec.validators().iter().map(|val| val.0).collect::<Vec<_>>();\n    let preprocesses =\n      threshold_i_map_to_keys_and_musig_i_map(self.spec, &self.removed, self.key, preprocesses).1;\n    let msg = set_keys_message(\n      &self.spec.set(),\n      &self.removed.iter().map(|key| Public::from(key.to_bytes())).collect::<Vec<_>>(),\n      key_pair,\n    );\n    self.signing_protocol().share_internal(&participants, preprocesses, &msg)\n  }\n  // Get the share for this confirmation, if the preprocesses are valid.\n  pub(crate) fn share(\n    &mut self,\n    preprocesses: HashMap<Participant, Vec<u8>>,\n    key_pair: &KeyPair,\n  ) -> Result<[u8; 32], Participant> {\n    self.share_internal(preprocesses, key_pair).map(|(_, share)| share)\n  }\n\n  pub(crate) fn complete(\n    &mut self,\n    preprocesses: HashMap<Participant, Vec<u8>>,\n    key_pair: &KeyPair,\n    shares: HashMap<Participant, Vec<u8>>,\n  ) -> Result<[u8; 64], Participant> {\n    let shares =\n      threshold_i_map_to_keys_and_musig_i_map(self.spec, &self.removed, self.key, shares).1;\n\n    let machine = self\n      .share_internal(preprocesses, key_pair)\n      .expect(\"trying to complete a machine which failed to preprocess\")\n      .0;\n\n    DkgConfirmerSigningProtocol::<'_, T>::complete_internal(machine, shares)\n  }\n}\n"
  },
  {
    "path": "coordinator/src/tributary/spec.rs",
    "content": "use core::{ops::Range, fmt::Debug};\nuse std::{io, collections::HashMap};\n\nuse transcript::{Transcript, RecommendedTranscript};\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::{group::GroupEncoding, Ciphersuite};\nuse frost::Participant;\n\nuse scale::Encode;\nuse borsh::{BorshSerialize, BorshDeserialize};\n\nuse serai_client::{primitives::PublicKey, validator_sets::primitives::ExternalValidatorSet};\n\nfn borsh_serialize_validators<W: io::Write>(\n  validators: &Vec<(<Ristretto as Ciphersuite>::G, u16)>,\n  writer: &mut W,\n) -> Result<(), io::Error> {\n  let len = u16::try_from(validators.len()).unwrap();\n  BorshSerialize::serialize(&len, writer)?;\n  for validator in validators {\n    BorshSerialize::serialize(&validator.0.to_bytes(), writer)?;\n    BorshSerialize::serialize(&validator.1, writer)?;\n  }\n  Ok(())\n}\n\nfn borsh_deserialize_validators<R: io::Read>(\n  reader: &mut R,\n) -> Result<Vec<(<Ristretto as Ciphersuite>::G, u16)>, io::Error> {\n  let len: u16 = BorshDeserialize::deserialize_reader(reader)?;\n  let mut res = vec![];\n  for _ in 0 .. len {\n    let compressed: [u8; 32] = BorshDeserialize::deserialize_reader(reader)?;\n    let point = Option::from(<Ristretto as Ciphersuite>::G::from_bytes(&compressed))\n      .ok_or_else(|| io::Error::other(\"invalid point for validator\"))?;\n    let weight: u16 = BorshDeserialize::deserialize_reader(reader)?;\n    res.push((point, weight));\n  }\n  Ok(res)\n}\n\n#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]\npub struct TributarySpec {\n  serai_block: [u8; 32],\n  start_time: u64,\n  set: ExternalValidatorSet,\n  #[borsh(\n    serialize_with = \"borsh_serialize_validators\",\n    deserialize_with = \"borsh_deserialize_validators\"\n  )]\n  validators: Vec<(<Ristretto as Ciphersuite>::G, u16)>,\n}\n\nimpl TributarySpec {\n  pub fn new(\n    serai_block: [u8; 32],\n    start_time: u64,\n    set: ExternalValidatorSet,\n    set_participants: Vec<(PublicKey, u16)>,\n  ) -> TributarySpec {\n    let mut validators = vec![];\n    for (participant, shares) in set_participants {\n      let participant = <Ristretto as Ciphersuite>::read_G::<&[u8]>(&mut participant.0.as_ref())\n        .expect(\"invalid key registered as participant\");\n      validators.push((participant, shares));\n    }\n\n    Self { serai_block, start_time, set, validators }\n  }\n\n  pub fn set(&self) -> ExternalValidatorSet {\n    self.set\n  }\n\n  pub fn genesis(&self) -> [u8; 32] {\n    // Calculate the genesis for this Tributary\n    let mut genesis = RecommendedTranscript::new(b\"Serai Tributary Genesis\");\n    // This locks it to a specific Serai chain\n    genesis.append_message(b\"serai_block\", self.serai_block);\n    genesis.append_message(b\"session\", self.set.session.0.to_le_bytes());\n    genesis.append_message(b\"network\", self.set.network.encode());\n    let genesis = genesis.challenge(b\"genesis\");\n    let genesis_ref: &[u8] = genesis.as_ref();\n    genesis_ref[.. 32].try_into().unwrap()\n  }\n\n  pub fn start_time(&self) -> u64 {\n    self.start_time\n  }\n\n  pub fn n(&self, removed_validators: &[<Ristretto as Ciphersuite>::G]) -> u16 {\n    self\n      .validators\n      .iter()\n      .map(|(validator, weight)| if removed_validators.contains(validator) { 0 } else { *weight })\n      .sum()\n  }\n\n  pub fn t(&self) -> u16 {\n    // t doesn't change with regards to the amount of removed validators\n    ((2 * self.n(&[])) / 3) + 1\n  }\n\n  pub fn i(\n    &self,\n    removed_validators: &[<Ristretto as Ciphersuite>::G],\n    key: <Ristretto as Ciphersuite>::G,\n  ) -> Option<Range<Participant>> {\n    let mut all_is = HashMap::new();\n    let mut i = 1;\n    for (validator, weight) in &self.validators {\n      all_is.insert(\n        *validator,\n        Range { start: Participant::new(i).unwrap(), end: Participant::new(i + weight).unwrap() },\n      );\n      i += weight;\n    }\n\n    let original_i = all_is.get(&key)?.clone();\n    let mut result_i = original_i.clone();\n    for removed_validator in removed_validators {\n      let removed_i = all_is\n        .get(removed_validator)\n        .expect(\"removed validator wasn't present in set to begin with\");\n      // If the queried key was removed, return None\n      if &original_i == removed_i {\n        return None;\n      }\n\n      // If the removed was before the queried, shift the queried down accordingly\n      if removed_i.start < original_i.start {\n        let removed_shares = u16::from(removed_i.end) - u16::from(removed_i.start);\n        result_i.start = Participant::new(u16::from(original_i.start) - removed_shares).unwrap();\n        result_i.end = Participant::new(u16::from(original_i.end) - removed_shares).unwrap();\n      }\n    }\n    Some(result_i)\n  }\n\n  pub fn reverse_lookup_i(\n    &self,\n    removed_validators: &[<Ristretto as Ciphersuite>::G],\n    i: Participant,\n  ) -> Option<<Ristretto as Ciphersuite>::G> {\n    for (validator, _) in &self.validators {\n      if self.i(removed_validators, *validator).map_or(false, |range| range.contains(&i)) {\n        return Some(*validator);\n      }\n    }\n    None\n  }\n\n  pub fn validators(&self) -> Vec<(<Ristretto as Ciphersuite>::G, u64)> {\n    self.validators.iter().map(|(validator, weight)| (*validator, u64::from(*weight))).collect()\n  }\n}\n"
  },
  {
    "path": "coordinator/src/tributary/transaction.rs",
    "content": "use core::{ops::Deref, fmt::Debug};\nuse std::io;\n\nuse zeroize::Zeroizing;\nuse rand_core::{RngCore, CryptoRng};\n\nuse blake2::{Digest, Blake2s256};\nuse transcript::{Transcript, RecommendedTranscript};\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::{\n  group::{ff::Field, GroupEncoding},\n  Ciphersuite,\n};\nuse schnorr::SchnorrSignature;\nuse frost::Participant;\n\nuse scale::{Encode, Decode};\nuse processor_messages::coordinator::SubstrateSignableId;\n\nuse tributary::{\n  TRANSACTION_SIZE_LIMIT, ReadWrite,\n  transaction::{Signed, TransactionError, TransactionKind, Transaction as TransactionTrait},\n};\n\n#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode)]\npub enum Label {\n  Preprocess,\n  Share,\n}\n\nimpl Label {\n  // TODO: Should nonces be u8 thanks to our use of topics?\n  pub fn nonce(&self) -> u32 {\n    match self {\n      Label::Preprocess => 0,\n      Label::Share => 1,\n    }\n  }\n}\n\n#[derive(Clone, PartialEq, Eq)]\npub struct SignData<Id: Clone + PartialEq + Eq + Debug + Encode + Decode> {\n  pub plan: Id,\n  pub attempt: u32,\n  pub label: Label,\n\n  pub data: Vec<Vec<u8>>,\n\n  pub signed: Signed,\n}\n\nimpl<Id: Clone + PartialEq + Eq + Debug + Encode + Decode> Debug for SignData<Id> {\n  fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {\n    fmt\n      .debug_struct(\"SignData\")\n      .field(\"id\", &hex::encode(self.plan.encode()))\n      .field(\"attempt\", &self.attempt)\n      .field(\"label\", &self.label)\n      .field(\"signer\", &hex::encode(self.signed.signer.to_bytes()))\n      .finish_non_exhaustive()\n  }\n}\n\nimpl<Id: Clone + PartialEq + Eq + Debug + Encode + Decode> SignData<Id> {\n  pub(crate) fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {\n    let plan = Id::decode(&mut scale::IoReader(&mut *reader))\n      .map_err(|_| io::Error::other(\"invalid plan in SignData\"))?;\n\n    let mut attempt = [0; 4];\n    reader.read_exact(&mut attempt)?;\n    let attempt = u32::from_le_bytes(attempt);\n\n    let mut label = [0; 1];\n    reader.read_exact(&mut label)?;\n    let label = match label[0] {\n      0 => Label::Preprocess,\n      1 => Label::Share,\n      _ => Err(io::Error::other(\"invalid label in SignData\"))?,\n    };\n\n    let data = {\n      let mut data_pieces = [0];\n      reader.read_exact(&mut data_pieces)?;\n      if data_pieces[0] == 0 {\n        Err(io::Error::other(\"zero pieces of data in SignData\"))?;\n      }\n      let mut all_data = vec![];\n      for _ in 0 .. data_pieces[0] {\n        let mut data_len = [0; 2];\n        reader.read_exact(&mut data_len)?;\n        let mut data = vec![0; usize::from(u16::from_le_bytes(data_len))];\n        reader.read_exact(&mut data)?;\n        all_data.push(data);\n      }\n      all_data\n    };\n\n    let signed = Signed::read_without_nonce(reader, label.nonce())?;\n\n    Ok(SignData { plan, attempt, label, data, signed })\n  }\n\n  pub(crate) fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {\n    writer.write_all(&self.plan.encode())?;\n    writer.write_all(&self.attempt.to_le_bytes())?;\n    writer.write_all(&[match self.label {\n      Label::Preprocess => 0,\n      Label::Share => 1,\n    }])?;\n\n    writer.write_all(&[u8::try_from(self.data.len()).unwrap()])?;\n    for data in &self.data {\n      if data.len() > u16::MAX.into() {\n        // Currently, the largest individual preprocess is a Monero transaction\n        // It provides 4 commitments per input (128 bytes), a 64-byte proof for them, along with a\n        // key image and proof (96 bytes)\n        // Even with all of that, we could support 227 inputs in a single TX\n        // Monero is limited to ~120 inputs per TX\n        //\n        // Bitcoin has a much higher input count of 520, yet it only uses 64 bytes per preprocess\n        Err(io::Error::other(\"signing data exceeded 65535 bytes\"))?;\n      }\n      writer.write_all(&u16::try_from(data.len()).unwrap().to_le_bytes())?;\n      writer.write_all(data)?;\n    }\n\n    self.signed.write_without_nonce(writer)\n  }\n}\n\n#[derive(Clone, PartialEq, Eq)]\npub enum Transaction {\n  RemoveParticipantDueToDkg {\n    participant: <Ristretto as Ciphersuite>::G,\n    signed: Signed,\n  },\n\n  DkgCommitments {\n    attempt: u32,\n    commitments: Vec<Vec<u8>>,\n    signed: Signed,\n  },\n  DkgShares {\n    attempt: u32,\n    // Sending Participant, Receiving Participant, Share\n    shares: Vec<Vec<Vec<u8>>>,\n    confirmation_nonces: [u8; 64],\n    signed: Signed,\n  },\n  InvalidDkgShare {\n    attempt: u32,\n    accuser: Participant,\n    faulty: Participant,\n    blame: Option<Vec<u8>>,\n    signed: Signed,\n  },\n  DkgConfirmed {\n    attempt: u32,\n    confirmation_share: [u8; 32],\n    signed: Signed,\n  },\n\n  // Co-sign a Substrate block.\n  CosignSubstrateBlock([u8; 32]),\n\n  // When we have synchrony on a batch, we can allow signing it\n  // TODO (never?): This is less efficient compared to an ExternalBlock provided transaction,\n  // which would be binding over the block hash and automatically achieve synchrony on all\n  // relevant batches. ExternalBlock was removed for this due to complexity around the pipeline\n  // with the current processor, yet it would still be an improvement.\n  Batch {\n    block: [u8; 32],\n    batch: u32,\n  },\n  // When a Serai block is finalized, with the contained batches, we can allow the associated plan\n  // IDs\n  SubstrateBlock(u64),\n\n  SubstrateSign(SignData<SubstrateSignableId>),\n  Sign(SignData<[u8; 32]>),\n  // This is defined as an Unsigned transaction in order to de-duplicate SignCompleted amongst\n  // reporters (who should all report the same thing)\n  // We do still track the signer in order to prevent a single signer from publishing arbitrarily\n  // many TXs without penalty\n  // Here, they're denoted as the first_signer, as only the signer of the first TX to be included\n  // with this pairing will be remembered on-chain\n  SignCompleted {\n    plan: [u8; 32],\n    tx_hash: Vec<u8>,\n    first_signer: <Ristretto as Ciphersuite>::G,\n    signature: SchnorrSignature<Ristretto>,\n  },\n\n  SlashReport(Vec<u32>, Signed),\n}\n\nimpl Debug for Transaction {\n  fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {\n    match self {\n      Transaction::RemoveParticipantDueToDkg { participant, signed } => fmt\n        .debug_struct(\"Transaction::RemoveParticipantDueToDkg\")\n        .field(\"participant\", &hex::encode(participant.to_bytes()))\n        .field(\"signer\", &hex::encode(signed.signer.to_bytes()))\n        .finish_non_exhaustive(),\n      Transaction::DkgCommitments { attempt, commitments: _, signed } => fmt\n        .debug_struct(\"Transaction::DkgCommitments\")\n        .field(\"attempt\", attempt)\n        .field(\"signer\", &hex::encode(signed.signer.to_bytes()))\n        .finish_non_exhaustive(),\n      Transaction::DkgShares { attempt, signed, .. } => fmt\n        .debug_struct(\"Transaction::DkgShares\")\n        .field(\"attempt\", attempt)\n        .field(\"signer\", &hex::encode(signed.signer.to_bytes()))\n        .finish_non_exhaustive(),\n      Transaction::InvalidDkgShare { attempt, accuser, faulty, .. } => fmt\n        .debug_struct(\"Transaction::InvalidDkgShare\")\n        .field(\"attempt\", attempt)\n        .field(\"accuser\", accuser)\n        .field(\"faulty\", faulty)\n        .finish_non_exhaustive(),\n      Transaction::DkgConfirmed { attempt, confirmation_share: _, signed } => fmt\n        .debug_struct(\"Transaction::DkgConfirmed\")\n        .field(\"attempt\", attempt)\n        .field(\"signer\", &hex::encode(signed.signer.to_bytes()))\n        .finish_non_exhaustive(),\n      Transaction::CosignSubstrateBlock(block) => fmt\n        .debug_struct(\"Transaction::CosignSubstrateBlock\")\n        .field(\"block\", &hex::encode(block))\n        .finish(),\n      Transaction::Batch { block, batch } => fmt\n        .debug_struct(\"Transaction::Batch\")\n        .field(\"block\", &hex::encode(block))\n        .field(\"batch\", &batch)\n        .finish(),\n      Transaction::SubstrateBlock(block) => {\n        fmt.debug_struct(\"Transaction::SubstrateBlock\").field(\"block\", block).finish()\n      }\n      Transaction::SubstrateSign(sign_data) => {\n        fmt.debug_struct(\"Transaction::SubstrateSign\").field(\"sign_data\", sign_data).finish()\n      }\n      Transaction::Sign(sign_data) => {\n        fmt.debug_struct(\"Transaction::Sign\").field(\"sign_data\", sign_data).finish()\n      }\n      Transaction::SignCompleted { plan, tx_hash, .. } => fmt\n        .debug_struct(\"Transaction::SignCompleted\")\n        .field(\"plan\", &hex::encode(plan))\n        .field(\"tx_hash\", &hex::encode(tx_hash))\n        .finish_non_exhaustive(),\n      Transaction::SlashReport(points, signed) => fmt\n        .debug_struct(\"Transaction::SignCompleted\")\n        .field(\"points\", points)\n        .field(\"signed\", signed)\n        .finish(),\n    }\n  }\n}\n\nimpl ReadWrite for Transaction {\n  fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {\n    let mut kind = [0];\n    reader.read_exact(&mut kind)?;\n\n    match kind[0] {\n      0 => Ok(Transaction::RemoveParticipantDueToDkg {\n        participant: Ristretto::read_G(reader)?,\n        signed: Signed::read_without_nonce(reader, 0)?,\n      }),\n\n      1 => {\n        let mut attempt = [0; 4];\n        reader.read_exact(&mut attempt)?;\n        let attempt = u32::from_le_bytes(attempt);\n\n        let commitments = {\n          let mut commitments_len = [0; 1];\n          reader.read_exact(&mut commitments_len)?;\n          let commitments_len = usize::from(commitments_len[0]);\n          if commitments_len == 0 {\n            Err(io::Error::other(\"zero commitments in DkgCommitments\"))?;\n          }\n\n          let mut each_commitments_len = [0; 2];\n          reader.read_exact(&mut each_commitments_len)?;\n          let each_commitments_len = usize::from(u16::from_le_bytes(each_commitments_len));\n          if (commitments_len * each_commitments_len) > TRANSACTION_SIZE_LIMIT {\n            Err(io::Error::other(\n              \"commitments present in transaction exceeded transaction size limit\",\n            ))?;\n          }\n          let mut commitments = vec![vec![]; commitments_len];\n          for commitments in &mut commitments {\n            *commitments = vec![0; each_commitments_len];\n            reader.read_exact(commitments)?;\n          }\n          commitments\n        };\n\n        let signed = Signed::read_without_nonce(reader, 0)?;\n\n        Ok(Transaction::DkgCommitments { attempt, commitments, signed })\n      }\n\n      2 => {\n        let mut attempt = [0; 4];\n        reader.read_exact(&mut attempt)?;\n        let attempt = u32::from_le_bytes(attempt);\n\n        let shares = {\n          let mut share_quantity = [0; 1];\n          reader.read_exact(&mut share_quantity)?;\n\n          let mut key_share_quantity = [0; 1];\n          reader.read_exact(&mut key_share_quantity)?;\n\n          let mut share_len = [0; 2];\n          reader.read_exact(&mut share_len)?;\n          let share_len = usize::from(u16::from_le_bytes(share_len));\n\n          let mut all_shares = vec![];\n          for _ in 0 .. share_quantity[0] {\n            let mut shares = vec![];\n            for _ in 0 .. key_share_quantity[0] {\n              let mut share = vec![0; share_len];\n              reader.read_exact(&mut share)?;\n              shares.push(share);\n            }\n            all_shares.push(shares);\n          }\n          all_shares\n        };\n\n        let mut confirmation_nonces = [0; 64];\n        reader.read_exact(&mut confirmation_nonces)?;\n\n        let signed = Signed::read_without_nonce(reader, 1)?;\n\n        Ok(Transaction::DkgShares { attempt, shares, confirmation_nonces, signed })\n      }\n\n      3 => {\n        let mut attempt = [0; 4];\n        reader.read_exact(&mut attempt)?;\n        let attempt = u32::from_le_bytes(attempt);\n\n        let mut accuser = [0; 2];\n        reader.read_exact(&mut accuser)?;\n        let accuser = Participant::new(u16::from_le_bytes(accuser))\n          .ok_or_else(|| io::Error::other(\"invalid participant in InvalidDkgShare\"))?;\n\n        let mut faulty = [0; 2];\n        reader.read_exact(&mut faulty)?;\n        let faulty = Participant::new(u16::from_le_bytes(faulty))\n          .ok_or_else(|| io::Error::other(\"invalid participant in InvalidDkgShare\"))?;\n\n        let mut blame_len = [0; 2];\n        reader.read_exact(&mut blame_len)?;\n        let mut blame = vec![0; u16::from_le_bytes(blame_len).into()];\n        reader.read_exact(&mut blame)?;\n\n        // This shares a nonce with DkgConfirmed as only one is expected\n        let signed = Signed::read_without_nonce(reader, 2)?;\n\n        Ok(Transaction::InvalidDkgShare {\n          attempt,\n          accuser,\n          faulty,\n          blame: Some(blame).filter(|blame| !blame.is_empty()),\n          signed,\n        })\n      }\n\n      4 => {\n        let mut attempt = [0; 4];\n        reader.read_exact(&mut attempt)?;\n        let attempt = u32::from_le_bytes(attempt);\n\n        let mut confirmation_share = [0; 32];\n        reader.read_exact(&mut confirmation_share)?;\n\n        let signed = Signed::read_without_nonce(reader, 2)?;\n\n        Ok(Transaction::DkgConfirmed { attempt, confirmation_share, signed })\n      }\n\n      5 => {\n        let mut block = [0; 32];\n        reader.read_exact(&mut block)?;\n        Ok(Transaction::CosignSubstrateBlock(block))\n      }\n\n      6 => {\n        let mut block = [0; 32];\n        reader.read_exact(&mut block)?;\n        let mut batch = [0; 4];\n        reader.read_exact(&mut batch)?;\n        Ok(Transaction::Batch { block, batch: u32::from_le_bytes(batch) })\n      }\n\n      7 => {\n        let mut block = [0; 8];\n        reader.read_exact(&mut block)?;\n        Ok(Transaction::SubstrateBlock(u64::from_le_bytes(block)))\n      }\n\n      8 => SignData::read(reader).map(Transaction::SubstrateSign),\n      9 => SignData::read(reader).map(Transaction::Sign),\n\n      10 => {\n        let mut plan = [0; 32];\n        reader.read_exact(&mut plan)?;\n\n        let mut tx_hash_len = [0];\n        reader.read_exact(&mut tx_hash_len)?;\n        let mut tx_hash = vec![0; usize::from(tx_hash_len[0])];\n        reader.read_exact(&mut tx_hash)?;\n\n        let first_signer = Ristretto::read_G(reader)?;\n        let signature = SchnorrSignature::<Ristretto>::read(reader)?;\n\n        Ok(Transaction::SignCompleted { plan, tx_hash, first_signer, signature })\n      }\n\n      11 => {\n        let mut len = [0];\n        reader.read_exact(&mut len)?;\n        let len = len[0];\n        // If the set has as many validators as MAX_KEY_SHARES_PER_SET, then the amount of distinct\n        // validators (the amount of validators reported on) will be at most\n        // `MAX_KEY_SHARES_PER_SET - 1`\n        if u32::from(len) > (serai_client::validator_sets::primitives::MAX_KEY_SHARES_PER_SET - 1) {\n          Err(io::Error::other(\"more points reported than allowed validator\"))?;\n        }\n        let mut points = vec![0u32; len.into()];\n        for points in &mut points {\n          let mut these_points = [0; 4];\n          reader.read_exact(&mut these_points)?;\n          *points = u32::from_le_bytes(these_points);\n        }\n        Ok(Transaction::SlashReport(points, Signed::read_without_nonce(reader, 0)?))\n      }\n\n      _ => Err(io::Error::other(\"invalid transaction type\")),\n    }\n  }\n\n  fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {\n    match self {\n      Transaction::RemoveParticipantDueToDkg { participant, signed } => {\n        writer.write_all(&[0])?;\n        writer.write_all(&participant.to_bytes())?;\n        signed.write_without_nonce(writer)\n      }\n\n      Transaction::DkgCommitments { attempt, commitments, signed } => {\n        writer.write_all(&[1])?;\n        writer.write_all(&attempt.to_le_bytes())?;\n        if commitments.is_empty() {\n          Err(io::Error::other(\"zero commitments in DkgCommitments\"))?\n        }\n        writer.write_all(&[u8::try_from(commitments.len()).unwrap()])?;\n        for commitments_i in commitments {\n          if commitments_i.len() != commitments[0].len() {\n            Err(io::Error::other(\"commitments of differing sizes in DkgCommitments\"))?\n          }\n        }\n        writer.write_all(&u16::try_from(commitments[0].len()).unwrap().to_le_bytes())?;\n        for commitments in commitments {\n          writer.write_all(commitments)?;\n        }\n        signed.write_without_nonce(writer)\n      }\n\n      Transaction::DkgShares { attempt, shares, confirmation_nonces, signed } => {\n        writer.write_all(&[2])?;\n        writer.write_all(&attempt.to_le_bytes())?;\n\n        // `shares` is a Vec which is supposed to map to a HashMap<Participant, Vec<u8>>. Since we\n        // bound participants to 150, this conversion is safe if a valid in-memory transaction.\n        writer.write_all(&[u8::try_from(shares.len()).unwrap()])?;\n        // This assumes at least one share is being sent to another party\n        writer.write_all(&[u8::try_from(shares[0].len()).unwrap()])?;\n        let share_len = shares[0][0].len();\n        // For BLS12-381 G2, this would be:\n        // - A 32-byte share\n        // - A 96-byte ephemeral key\n        // - A 128-byte signature\n        // Hence why this has to be u16\n        writer.write_all(&u16::try_from(share_len).unwrap().to_le_bytes())?;\n\n        for these_shares in shares {\n          assert_eq!(these_shares.len(), shares[0].len(), \"amount of sent shares was variable\");\n          for share in these_shares {\n            assert_eq!(share.len(), share_len, \"sent shares were of variable length\");\n            writer.write_all(share)?;\n          }\n        }\n\n        writer.write_all(confirmation_nonces)?;\n        signed.write_without_nonce(writer)\n      }\n\n      Transaction::InvalidDkgShare { attempt, accuser, faulty, blame, signed } => {\n        writer.write_all(&[3])?;\n        writer.write_all(&attempt.to_le_bytes())?;\n        writer.write_all(&u16::from(*accuser).to_le_bytes())?;\n        writer.write_all(&u16::from(*faulty).to_le_bytes())?;\n\n        // Flattens Some(vec![]) to None on the expectation no actual blame will be 0-length\n        assert!(blame.as_ref().map_or(1, Vec::len) != 0);\n        let blame_len =\n          u16::try_from(blame.as_ref().unwrap_or(&vec![]).len()).expect(\"blame exceeded 64 KB\");\n        writer.write_all(&blame_len.to_le_bytes())?;\n        writer.write_all(blame.as_ref().unwrap_or(&vec![]))?;\n\n        signed.write_without_nonce(writer)\n      }\n\n      Transaction::DkgConfirmed { attempt, confirmation_share, signed } => {\n        writer.write_all(&[4])?;\n        writer.write_all(&attempt.to_le_bytes())?;\n        writer.write_all(confirmation_share)?;\n        signed.write_without_nonce(writer)\n      }\n\n      Transaction::CosignSubstrateBlock(block) => {\n        writer.write_all(&[5])?;\n        writer.write_all(block)\n      }\n\n      Transaction::Batch { block, batch } => {\n        writer.write_all(&[6])?;\n        writer.write_all(block)?;\n        writer.write_all(&batch.to_le_bytes())\n      }\n\n      Transaction::SubstrateBlock(block) => {\n        writer.write_all(&[7])?;\n        writer.write_all(&block.to_le_bytes())\n      }\n\n      Transaction::SubstrateSign(data) => {\n        writer.write_all(&[8])?;\n        data.write(writer)\n      }\n      Transaction::Sign(data) => {\n        writer.write_all(&[9])?;\n        data.write(writer)\n      }\n      Transaction::SignCompleted { plan, tx_hash, first_signer, signature } => {\n        writer.write_all(&[10])?;\n        writer.write_all(plan)?;\n        writer\n          .write_all(&[u8::try_from(tx_hash.len()).expect(\"tx hash length exceed 255 bytes\")])?;\n        writer.write_all(tx_hash)?;\n        writer.write_all(&first_signer.to_bytes())?;\n        signature.write(writer)\n      }\n      Transaction::SlashReport(points, signed) => {\n        writer.write_all(&[11])?;\n        writer.write_all(&[u8::try_from(points.len()).unwrap()])?;\n        for points in points {\n          writer.write_all(&points.to_le_bytes())?;\n        }\n        signed.write_without_nonce(writer)\n      }\n    }\n  }\n}\n\nimpl TransactionTrait for Transaction {\n  fn kind(&self) -> TransactionKind<'_> {\n    match self {\n      Transaction::RemoveParticipantDueToDkg { participant, signed } => {\n        TransactionKind::Signed((b\"remove\", participant.to_bytes()).encode(), signed)\n      }\n\n      Transaction::DkgCommitments { attempt, commitments: _, signed } |\n      Transaction::DkgShares { attempt, signed, .. } |\n      Transaction::InvalidDkgShare { attempt, signed, .. } |\n      Transaction::DkgConfirmed { attempt, signed, .. } => {\n        TransactionKind::Signed((b\"dkg\", attempt).encode(), signed)\n      }\n\n      Transaction::CosignSubstrateBlock(_) => TransactionKind::Provided(\"cosign\"),\n\n      Transaction::Batch { .. } => TransactionKind::Provided(\"batch\"),\n      Transaction::SubstrateBlock(_) => TransactionKind::Provided(\"serai\"),\n\n      Transaction::SubstrateSign(data) => {\n        TransactionKind::Signed((b\"substrate\", data.plan, data.attempt).encode(), &data.signed)\n      }\n      Transaction::Sign(data) => {\n        TransactionKind::Signed((b\"sign\", data.plan, data.attempt).encode(), &data.signed)\n      }\n      Transaction::SignCompleted { .. } => TransactionKind::Unsigned,\n\n      Transaction::SlashReport(_, signed) => {\n        TransactionKind::Signed(b\"slash_report\".to_vec(), signed)\n      }\n    }\n  }\n\n  fn hash(&self) -> [u8; 32] {\n    let mut tx = self.serialize();\n    if let TransactionKind::Signed(_, signed) = self.kind() {\n      // Make sure the part we're cutting off is the signature\n      assert_eq!(tx.drain((tx.len() - 64) ..).collect::<Vec<_>>(), signed.signature.serialize());\n    }\n    Blake2s256::digest([b\"Coordinator Tributary Transaction\".as_slice(), &tx].concat()).into()\n  }\n\n  fn verify(&self) -> Result<(), TransactionError> {\n    // TODO: Check SubstrateSign's lengths here\n\n    if let Transaction::SignCompleted { first_signer, signature, .. } = self {\n      if !signature.verify(*first_signer, self.sign_completed_challenge()) {\n        Err(TransactionError::InvalidContent)?;\n      }\n    }\n\n    Ok(())\n  }\n}\n\nimpl Transaction {\n  // Used to initially construct transactions so we can then get sig hashes and perform signing\n  pub fn empty_signed() -> Signed {\n    Signed {\n      signer: Ristretto::generator(),\n      nonce: 0,\n      signature: SchnorrSignature::<Ristretto> {\n        R: Ristretto::generator(),\n        s: <Ristretto as Ciphersuite>::F::ZERO,\n      },\n    }\n  }\n\n  // Sign a transaction\n  pub fn sign<R: RngCore + CryptoRng>(\n    &mut self,\n    rng: &mut R,\n    genesis: [u8; 32],\n    key: &Zeroizing<<Ristretto as Ciphersuite>::F>,\n  ) {\n    fn signed(tx: &mut Transaction) -> (u32, &mut Signed) {\n      #[allow(clippy::match_same_arms)] // Doesn't make semantic sense here\n      let nonce = match tx {\n        Transaction::RemoveParticipantDueToDkg { .. } => 0,\n\n        Transaction::DkgCommitments { .. } => 0,\n        Transaction::DkgShares { .. } => 1,\n        Transaction::InvalidDkgShare { .. } | Transaction::DkgConfirmed { .. } => 2,\n\n        Transaction::CosignSubstrateBlock(_) => panic!(\"signing CosignSubstrateBlock\"),\n\n        Transaction::Batch { .. } => panic!(\"signing Batch\"),\n        Transaction::SubstrateBlock(_) => panic!(\"signing SubstrateBlock\"),\n\n        Transaction::SubstrateSign(data) => data.label.nonce(),\n        Transaction::Sign(data) => data.label.nonce(),\n\n        Transaction::SignCompleted { .. } => panic!(\"signing SignCompleted\"),\n\n        Transaction::SlashReport(_, _) => 0,\n      };\n\n      (\n        nonce,\n        #[allow(clippy::match_same_arms)]\n        match tx {\n          Transaction::RemoveParticipantDueToDkg { ref mut signed, .. } |\n          Transaction::DkgCommitments { ref mut signed, .. } |\n          Transaction::DkgShares { ref mut signed, .. } |\n          Transaction::InvalidDkgShare { ref mut signed, .. } |\n          Transaction::DkgConfirmed { ref mut signed, .. } => signed,\n\n          Transaction::CosignSubstrateBlock(_) => panic!(\"signing CosignSubstrateBlock\"),\n\n          Transaction::Batch { .. } => panic!(\"signing Batch\"),\n          Transaction::SubstrateBlock(_) => panic!(\"signing SubstrateBlock\"),\n\n          Transaction::SubstrateSign(ref mut data) => &mut data.signed,\n          Transaction::Sign(ref mut data) => &mut data.signed,\n\n          Transaction::SignCompleted { .. } => panic!(\"signing SignCompleted\"),\n\n          Transaction::SlashReport(_, ref mut signed) => signed,\n        },\n      )\n    }\n\n    let (nonce, signed_ref) = signed(self);\n    signed_ref.signer = Ristretto::generator() * key.deref();\n    signed_ref.nonce = nonce;\n\n    let sig_nonce = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(rng));\n    signed(self).1.signature.R = <Ristretto as Ciphersuite>::generator() * sig_nonce.deref();\n    let sig_hash = self.sig_hash(genesis);\n    signed(self).1.signature = SchnorrSignature::<Ristretto>::sign(key, sig_nonce, sig_hash);\n  }\n\n  pub fn sign_completed_challenge(&self) -> <Ristretto as Ciphersuite>::F {\n    if let Transaction::SignCompleted { plan, tx_hash, first_signer, signature } = self {\n      let mut transcript =\n        RecommendedTranscript::new(b\"Coordinator Tributary Transaction SignCompleted\");\n      transcript.append_message(b\"plan\", plan);\n      transcript.append_message(b\"tx_hash\", tx_hash);\n      transcript.append_message(b\"signer\", first_signer.to_bytes());\n      transcript.append_message(b\"nonce\", signature.R.to_bytes());\n      Ristretto::hash_to_F(b\"SignCompleted signature\", &transcript.challenge(b\"challenge\"))\n    } else {\n      panic!(\"sign_completed_challenge called on transaction which wasn't SignCompleted\")\n    }\n  }\n}\n"
  },
  {
    "path": "coordinator/tributary/Cargo.toml",
    "content": "[package]\nname = \"tributary-chain\"\nversion = \"0.1.0\"\ndescription = \"A micro-blockchain to provide consensus and ordering to P2P communication\"\nlicense = \"AGPL-3.0-only\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/coordinator/tributary\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nedition = \"2021\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nasync-trait = { version = \"0.1\", default-features = false }\nthiserror = { version = \"1\", default-features = false }\n\nsubtle = { version = \"^2\", default-features = false, features = [\"std\"] }\nzeroize = { version = \"^1.5\", default-features = false, features = [\"std\"] }\n\nrand = { version = \"0.8\", default-features = false, features = [\"std\"] }\nrand_chacha = { version = \"0.3\", default-features = false, features = [\"std\"] }\n\nblake2 = { version = \"0.10\", default-features = false, features = [\"std\"] }\ntranscript = { package = \"flexible-transcript\", path = \"../../crypto/transcript\", default-features = false, features = [\"std\", \"recommended\"] }\n\ndalek-ff-group = { path = \"../../crypto/dalek-ff-group\" }\nciphersuite = { package = \"ciphersuite\", path = \"../../crypto/ciphersuite\", default-features = false, features = [\"std\"] }\nschnorr = { package = \"schnorr-signatures\", path = \"../../crypto/schnorr\", default-features = false, features = [\"std\"] }\n\nhex = { version = \"0.4\", default-features = false, features = [\"std\"] }\nlog = { version = \"0.4\", default-features = false, features = [\"std\"] }\n\nserai-db = { path = \"../../common/db\" }\n\nscale = { package = \"parity-scale-codec\", version = \"3\", default-features = false, features = [\"std\", \"derive\"] }\nfutures-util = { version = \"0.3\", default-features = false, features = [\"std\", \"sink\", \"channel\"] }\nfutures-channel = { version = \"0.3\", default-features = false, features = [\"std\", \"sink\"] }\ntendermint = { package = \"tendermint-machine\", path = \"./tendermint\" }\n\ntokio = { version = \"1\", default-features = false, features = [\"sync\", \"time\", \"rt\"] }\n\n[dev-dependencies]\ntokio = { version = \"1\", features = [\"macros\"] }\n\n[features]\ntests = []\n"
  },
  {
    "path": "coordinator/tributary/LICENSE",
    "content": "AGPL-3.0-only license\n\nCopyright (c) 2023 Luke Parker\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License Version 3 as\npublished by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n"
  },
  {
    "path": "coordinator/tributary/README.md",
    "content": "# Tributary\n\nA verifiable, ordered broadcast layer implemented as a BFT micro-blockchain.\n"
  },
  {
    "path": "coordinator/tributary/src/block.rs",
    "content": "use std::{\n  io,\n  collections::{VecDeque, HashSet, HashMap},\n};\n\nuse thiserror::Error;\n\nuse blake2::{Digest, Blake2s256};\n\nuse tendermint::ext::{Network, Commit};\n\nuse crate::{\n  transaction::{\n    TransactionError, Signed, TransactionKind, Transaction as TransactionTrait, GAIN,\n    verify_transaction,\n  },\n  BLOCK_SIZE_LIMIT, ReadWrite, merkle, Transaction,\n  tendermint::tx::verify_tendermint_tx,\n};\n\n#[derive(Clone, PartialEq, Eq, Debug, Error)]\npub enum BlockError {\n  /// Block was too large.\n  #[error(\"block exceeded size limit\")]\n  TooLargeBlock,\n  /// Header specified a parent which wasn't the chain tip.\n  #[error(\"header doesn't build off the chain tip\")]\n  InvalidParent,\n  /// Header specified an invalid transactions merkle tree hash.\n  #[error(\"header transactions hash is incorrect\")]\n  InvalidTransactions,\n  /// An unsigned transaction which was already added to the chain was present again.\n  #[error(\"an unsigned transaction which was already added to the chain was present again\")]\n  UnsignedAlreadyIncluded,\n  /// A provided transaction which was already added to the chain was present again.\n  #[error(\"an provided transaction which was already added to the chain was present again\")]\n  ProvidedAlreadyIncluded,\n  /// Transactions weren't ordered as expected (Provided, followed by Unsigned, followed by Signed).\n  #[error(\"transactions weren't ordered as expected (Provided, Unsigned, Signed)\")]\n  WrongTransactionOrder,\n  /// The block had a provided transaction this validator has yet to be provided.\n  #[error(\"block had a provided transaction not yet locally provided: {0:?}\")]\n  NonLocalProvided([u8; 32]),\n  /// The provided transaction was distinct from the locally provided transaction.\n  #[error(\"block had a distinct provided transaction\")]\n  DistinctProvided,\n  /// An included transaction was invalid.\n  #[error(\"included transaction had an error\")]\n  TransactionError(TransactionError),\n}\n\n#[derive(Clone, PartialEq, Eq, Debug)]\npub struct BlockHeader {\n  pub parent: [u8; 32],\n  pub transactions: [u8; 32],\n}\n\nimpl ReadWrite for BlockHeader {\n  fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {\n    let mut header = BlockHeader { parent: [0; 32], transactions: [0; 32] };\n    reader.read_exact(&mut header.parent)?;\n    reader.read_exact(&mut header.transactions)?;\n    Ok(header)\n  }\n\n  fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {\n    writer.write_all(&self.parent)?;\n    writer.write_all(&self.transactions)\n  }\n}\n\nimpl BlockHeader {\n  pub fn hash(&self) -> [u8; 32] {\n    Blake2s256::digest([b\"tributary_block\".as_ref(), &self.serialize()].concat()).into()\n  }\n}\n\n#[derive(Clone, PartialEq, Eq, Debug)]\npub struct Block<T: TransactionTrait> {\n  pub header: BlockHeader,\n  pub transactions: Vec<Transaction<T>>,\n}\n\nimpl<T: TransactionTrait> ReadWrite for Block<T> {\n  fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {\n    let header = BlockHeader::read(reader)?;\n\n    let mut txs = [0; 4];\n    reader.read_exact(&mut txs)?;\n    let txs = u32::from_le_bytes(txs);\n\n    let mut transactions = Vec::with_capacity(usize::try_from(txs).unwrap());\n    for _ in 0 .. txs {\n      transactions.push(Transaction::read(reader)?);\n    }\n\n    Ok(Block { header, transactions })\n  }\n\n  fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {\n    self.header.write(writer)?;\n    writer.write_all(&u32::try_from(self.transactions.len()).unwrap().to_le_bytes())?;\n    for tx in &self.transactions {\n      tx.write(writer)?;\n    }\n    Ok(())\n  }\n}\n\nimpl<T: TransactionTrait> Block<T> {\n  /// Create a new block.\n  ///\n  /// mempool is expected to only have valid, non-conflicting transactions, sorted by nonce.\n  pub(crate) fn new(parent: [u8; 32], provided: Vec<T>, mempool: Vec<Transaction<T>>) -> Self {\n    let mut txs = vec![];\n    for tx in provided {\n      txs.push(Transaction::Application(tx))\n    }\n\n    let mut signed = vec![];\n    let mut unsigned = vec![];\n    for tx in mempool {\n      match tx.kind() {\n        TransactionKind::Signed(_, _) => signed.push(tx),\n        TransactionKind::Unsigned => unsigned.push(tx),\n        TransactionKind::Provided(_) => panic!(\"provided transaction entered mempool\"),\n      }\n    }\n\n    // unsigned first\n    txs.extend(unsigned);\n    // then signed\n    txs.extend(signed);\n\n    // Check TXs are sorted by nonce.\n    let nonce = |tx: &Transaction<T>| {\n      if let TransactionKind::Signed(_, Signed { nonce, .. }) = tx.kind() {\n        *nonce\n      } else {\n        0\n      }\n    };\n    let mut last = 0;\n    for tx in &txs {\n      let nonce = nonce(tx);\n      if nonce < last {\n        panic!(\"TXs in mempool weren't ordered by nonce\");\n      }\n      last = nonce;\n    }\n\n    let mut res =\n      Block { header: BlockHeader { parent, transactions: [0; 32] }, transactions: txs };\n    while res.serialize().len() > BLOCK_SIZE_LIMIT {\n      assert!(res.transactions.pop().is_some());\n    }\n    let hashes = res.transactions.iter().map(Transaction::hash).collect::<Vec<_>>();\n    res.header.transactions = merkle(&hashes);\n    res\n  }\n\n  pub fn parent(&self) -> [u8; 32] {\n    self.header.parent\n  }\n\n  pub fn hash(&self) -> [u8; 32] {\n    self.header.hash()\n  }\n\n  #[allow(clippy::too_many_arguments)]\n  pub(crate) fn verify<N: Network, G: GAIN>(\n    &self,\n    genesis: [u8; 32],\n    last_block: [u8; 32],\n    mut locally_provided: HashMap<&'static str, VecDeque<T>>,\n    get_and_increment_nonce: &mut G,\n    schema: &N::SignatureScheme,\n    commit: impl Fn(u64) -> Option<Commit<N::SignatureScheme>>,\n    provided_or_unsigned_in_chain: impl Fn([u8; 32]) -> bool,\n    allow_non_local_provided: bool,\n  ) -> Result<(), BlockError> {\n    #[derive(Clone, Copy, PartialEq, Eq, Debug)]\n    enum Order {\n      Provided,\n      Unsigned,\n      Signed,\n    }\n    impl From<Order> for u8 {\n      fn from(order: Order) -> u8 {\n        match order {\n          Order::Provided => 0,\n          Order::Unsigned => 1,\n          Order::Signed => 2,\n        }\n      }\n    }\n\n    if self.serialize().len() > BLOCK_SIZE_LIMIT {\n      Err(BlockError::TooLargeBlock)?;\n    }\n\n    if self.header.parent != last_block {\n      Err(BlockError::InvalidParent)?;\n    }\n\n    let mut last_tx_order = Order::Provided;\n    let mut included_in_block = HashSet::new();\n    let mut txs = Vec::with_capacity(self.transactions.len());\n    for tx in &self.transactions {\n      let tx_hash = tx.hash();\n      txs.push(tx_hash);\n\n      let current_tx_order = match tx.kind() {\n        TransactionKind::Provided(order) => {\n          if provided_or_unsigned_in_chain(tx_hash) {\n            Err(BlockError::ProvidedAlreadyIncluded)?;\n          }\n\n          if let Some(local) = locally_provided.get_mut(order).and_then(VecDeque::pop_front) {\n            // Since this was a provided TX, it must be an application TX\n            let Transaction::Application(tx) = tx else {\n              Err(BlockError::NonLocalProvided(txs.pop().unwrap()))?\n            };\n            if tx != &local {\n              Err(BlockError::DistinctProvided)?;\n            }\n          } else if !allow_non_local_provided {\n            Err(BlockError::NonLocalProvided(txs.pop().unwrap()))?\n          };\n\n          Order::Provided\n        }\n        TransactionKind::Unsigned => {\n          // check we don't already have the tx in the chain\n          if provided_or_unsigned_in_chain(tx_hash) || included_in_block.contains(&tx_hash) {\n            Err(BlockError::UnsignedAlreadyIncluded)?;\n          }\n          included_in_block.insert(tx_hash);\n\n          Order::Unsigned\n        }\n        TransactionKind::Signed(..) => Order::Signed,\n      };\n\n      // enforce Provided => Unsigned => Signed order\n      if u8::from(current_tx_order) < u8::from(last_tx_order) {\n        Err(BlockError::WrongTransactionOrder)?;\n      }\n      last_tx_order = current_tx_order;\n\n      match tx {\n        Transaction::Tendermint(tx) => match verify_tendermint_tx::<N>(tx, schema, &commit) {\n          Ok(()) => {}\n          Err(e) => Err(BlockError::TransactionError(e))?,\n        },\n        Transaction::Application(tx) => {\n          match verify_transaction(tx, genesis, get_and_increment_nonce) {\n            Ok(()) => {}\n            Err(e) => Err(BlockError::TransactionError(e))?,\n          }\n        }\n      }\n    }\n\n    if merkle(&txs) != self.header.transactions {\n      Err(BlockError::InvalidTransactions)?;\n    }\n\n    Ok(())\n  }\n}\n"
  },
  {
    "path": "coordinator/tributary/src/blockchain.rs",
    "content": "use std::collections::{VecDeque, HashSet};\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::{group::GroupEncoding, Ciphersuite};\n\nuse serai_db::{Get, DbTxn, Db};\n\nuse scale::Decode;\n\nuse tendermint::ext::{Network, Commit};\n\nuse crate::{\n  ReadWrite, ProvidedError, ProvidedTransactions, BlockError, Block, Mempool, Transaction,\n  transaction::{Signed, TransactionKind, TransactionError, Transaction as TransactionTrait},\n};\n\n#[derive(Debug)]\npub(crate) struct Blockchain<D: Db, T: TransactionTrait> {\n  db: Option<D>,\n  genesis: [u8; 32],\n\n  block_number: u64,\n  tip: [u8; 32],\n  participants: HashSet<<Ristretto as Ciphersuite>::G>,\n\n  provided: ProvidedTransactions<D, T>,\n  mempool: Mempool<D, T>,\n\n  pub(crate) next_block_notifications: VecDeque<tokio::sync::oneshot::Sender<()>>,\n}\n\nimpl<D: Db, T: TransactionTrait> Blockchain<D, T> {\n  fn tip_key(genesis: [u8; 32]) -> Vec<u8> {\n    D::key(b\"tributary_blockchain\", b\"tip\", genesis)\n  }\n  fn block_number_key(&self) -> Vec<u8> {\n    D::key(b\"tributary_blockchain\", b\"block_number\", self.genesis)\n  }\n  fn block_key(genesis: &[u8], hash: &[u8; 32]) -> Vec<u8> {\n    D::key(b\"tributary_blockchain\", b\"block\", [genesis, hash].concat())\n  }\n  fn block_hash_key(genesis: &[u8], block_number: u64) -> Vec<u8> {\n    D::key(b\"tributary_blockchain\", b\"block_hash\", [genesis, &block_number.to_le_bytes()].concat())\n  }\n  fn commit_key(genesis: &[u8], hash: &[u8; 32]) -> Vec<u8> {\n    D::key(b\"tributary_blockchain\", b\"commit\", [genesis, hash].concat())\n  }\n  fn block_after_key(genesis: &[u8], hash: &[u8; 32]) -> Vec<u8> {\n    D::key(b\"tributary_blockchain\", b\"block_after\", [genesis, hash].concat())\n  }\n  fn unsigned_included_key(genesis: &[u8], hash: &[u8; 32]) -> Vec<u8> {\n    D::key(b\"tributary_blockchain\", b\"unsigned_included\", [genesis, hash].concat())\n  }\n  fn provided_included_key(genesis: &[u8], hash: &[u8; 32]) -> Vec<u8> {\n    D::key(b\"tributary_blockchain\", b\"provided_included\", [genesis, hash].concat())\n  }\n  fn next_nonce_key(\n    genesis: &[u8; 32],\n    signer: &<Ristretto as Ciphersuite>::G,\n    order: &[u8],\n  ) -> Vec<u8> {\n    D::key(\n      b\"tributary_blockchain\",\n      b\"next_nonce\",\n      [genesis.as_ref(), signer.to_bytes().as_ref(), order].concat(),\n    )\n  }\n\n  pub(crate) fn new(\n    db: D,\n    genesis: [u8; 32],\n    participants: &[<Ristretto as Ciphersuite>::G],\n  ) -> Self {\n    let mut res = Self {\n      db: Some(db.clone()),\n      genesis,\n      participants: participants.iter().copied().collect(),\n\n      block_number: 0,\n      tip: genesis,\n\n      provided: ProvidedTransactions::new(db.clone(), genesis),\n      mempool: Mempool::new(db, genesis),\n\n      next_block_notifications: VecDeque::new(),\n    };\n\n    if let Some((block_number, tip)) = {\n      let db = res.db.as_ref().unwrap();\n      db.get(res.block_number_key()).map(|number| (number, db.get(Self::tip_key(genesis)).unwrap()))\n    } {\n      res.block_number = u64::from_le_bytes(block_number.try_into().unwrap());\n      res.tip.copy_from_slice(&tip);\n    }\n\n    res\n  }\n\n  pub(crate) fn tip(&self) -> [u8; 32] {\n    self.tip\n  }\n\n  pub(crate) fn block_number(&self) -> u64 {\n    self.block_number\n  }\n\n  pub(crate) fn block_from_db(db: &D, genesis: [u8; 32], block: &[u8; 32]) -> Option<Block<T>> {\n    db.get(Self::block_key(&genesis, block))\n      .map(|bytes| Block::<T>::read::<&[u8]>(&mut bytes.as_ref()).unwrap())\n  }\n\n  pub(crate) fn commit_from_db(db: &D, genesis: [u8; 32], block: &[u8; 32]) -> Option<Vec<u8>> {\n    db.get(Self::commit_key(&genesis, block))\n  }\n\n  pub(crate) fn block_hash_from_db(db: &D, genesis: [u8; 32], block: u64) -> Option<[u8; 32]> {\n    db.get(Self::block_hash_key(&genesis, block)).map(|h| h.try_into().unwrap())\n  }\n\n  pub(crate) fn commit(&self, block: &[u8; 32]) -> Option<Vec<u8>> {\n    Self::commit_from_db(self.db.as_ref().unwrap(), self.genesis, block)\n  }\n\n  pub(crate) fn block_hash(&self, block: u64) -> Option<[u8; 32]> {\n    Self::block_hash_from_db(self.db.as_ref().unwrap(), self.genesis, block)\n  }\n\n  pub(crate) fn commit_by_block_number(&self, block: u64) -> Option<Vec<u8>> {\n    self.commit(&self.block_hash(block)?)\n  }\n\n  pub(crate) fn block_after(db: &D, genesis: [u8; 32], block: &[u8; 32]) -> Option<[u8; 32]> {\n    db.get(Self::block_after_key(&genesis, block)).map(|bytes| bytes.try_into().unwrap())\n  }\n\n  pub(crate) fn locally_provided_txs_in_block(\n    db: &D,\n    genesis: &[u8; 32],\n    block: &[u8; 32],\n    order: &str,\n  ) -> bool {\n    let local_key = ProvidedTransactions::<D, T>::locally_provided_quantity_key(genesis, order);\n    let local = db.get(local_key).map_or(0, |bytes| u32::from_le_bytes(bytes.try_into().unwrap()));\n    let block_key =\n      ProvidedTransactions::<D, T>::block_provided_quantity_key(genesis, block, order);\n    let block = db.get(block_key).map_or(0, |bytes| u32::from_le_bytes(bytes.try_into().unwrap()));\n\n    local >= block\n  }\n\n  pub(crate) fn tip_from_db(db: &D, genesis: [u8; 32]) -> [u8; 32] {\n    db.get(Self::tip_key(genesis)).map_or(genesis, |bytes| bytes.try_into().unwrap())\n  }\n\n  pub(crate) fn add_transaction<N: Network>(\n    &mut self,\n    internal: bool,\n    tx: Transaction<T>,\n    schema: &N::SignatureScheme,\n  ) -> Result<bool, TransactionError> {\n    let db = self.db.as_ref().unwrap();\n    let genesis = self.genesis;\n\n    let commit = |block: u64| -> Option<Commit<N::SignatureScheme>> {\n      let hash = Self::block_hash_from_db(db, genesis, block)?;\n      // we must have a commit per valid hash\n      let commit = Self::commit_from_db(db, genesis, &hash).unwrap();\n      // commit has to be valid if it is coming from our db\n      Some(Commit::<N::SignatureScheme>::decode(&mut commit.as_ref()).unwrap())\n    };\n    let unsigned_in_chain =\n      |hash: [u8; 32]| db.get(Self::unsigned_included_key(&self.genesis, &hash)).is_some();\n\n    self.mempool.add::<N, _>(\n      |signer, order| {\n        if self.participants.contains(&signer) {\n          Some(\n            db.get(Self::next_nonce_key(&self.genesis, &signer, &order))\n              .map_or(0, |bytes| u32::from_le_bytes(bytes.try_into().unwrap())),\n          )\n        } else {\n          None\n        }\n      },\n      internal,\n      tx,\n      schema,\n      unsigned_in_chain,\n      commit,\n    )\n  }\n\n  pub(crate) fn provide_transaction(&mut self, tx: T) -> Result<(), ProvidedError> {\n    self.provided.provide(tx)\n  }\n\n  pub(crate) fn next_nonce(\n    &self,\n    signer: &<Ristretto as Ciphersuite>::G,\n    order: &[u8],\n  ) -> Option<u32> {\n    if let Some(next_nonce) = self.mempool.next_nonce_in_mempool(signer, order.to_vec()) {\n      return Some(next_nonce);\n    }\n    if self.participants.contains(signer) {\n      Some(\n        self\n          .db\n          .as_ref()\n          .unwrap()\n          .get(Self::next_nonce_key(&self.genesis, signer, order))\n          .map_or(0, |bytes| u32::from_le_bytes(bytes.try_into().unwrap())),\n      )\n    } else {\n      None\n    }\n  }\n\n  pub(crate) fn build_block<N: Network>(&mut self, schema: &N::SignatureScheme) -> Block<T> {\n    let block = Block::new(\n      self.tip,\n      self.provided.transactions.values().flatten().cloned().collect(),\n      self.mempool.block(),\n    );\n    // build_block should not return invalid blocks\n    self.verify_block::<N>(&block, schema, false).unwrap();\n    block\n  }\n\n  pub(crate) fn verify_block<N: Network>(\n    &self,\n    block: &Block<T>,\n    schema: &N::SignatureScheme,\n    allow_non_local_provided: bool,\n  ) -> Result<(), BlockError> {\n    let db = self.db.as_ref().unwrap();\n    let provided_or_unsigned_in_chain = |hash: [u8; 32]| {\n      db.get(Self::unsigned_included_key(&self.genesis, &hash)).is_some() ||\n        db.get(Self::provided_included_key(&self.genesis, &hash)).is_some()\n    };\n    let commit = |block: u64| -> Option<Commit<N::SignatureScheme>> {\n      let commit = self.commit_by_block_number(block)?;\n      // commit has to be valid if it is coming from our db\n      Some(Commit::<N::SignatureScheme>::decode(&mut commit.as_ref()).unwrap())\n    };\n\n    let mut txn_db = db.clone();\n    let mut txn = txn_db.txn();\n    let res = block.verify::<N, _>(\n      self.genesis,\n      self.tip,\n      self.provided.transactions.clone(),\n      &mut |signer, order| {\n        if self.participants.contains(signer) {\n          let key = Self::next_nonce_key(&self.genesis, signer, order);\n          let next = txn\n            .get(&key)\n            .map_or(0, |next_nonce| u32::from_le_bytes(next_nonce.try_into().unwrap()));\n          txn.put(key, (next + 1).to_le_bytes());\n          Some(next)\n        } else {\n          None\n        }\n      },\n      schema,\n      &commit,\n      provided_or_unsigned_in_chain,\n      allow_non_local_provided,\n    );\n    // Drop this TXN's changes as we're solely verifying the block\n    drop(txn);\n    res\n  }\n\n  /// Add a block.\n  pub(crate) fn add_block<N: Network>(\n    &mut self,\n    block: &Block<T>,\n    commit: Vec<u8>,\n    schema: &N::SignatureScheme,\n  ) -> Result<(), BlockError> {\n    self.verify_block::<N>(block, schema, true)?;\n\n    log::info!(\n      \"adding block {} to tributary {} with {} TXs\",\n      hex::encode(block.hash()),\n      hex::encode(self.genesis),\n      block.transactions.len(),\n    );\n\n    // None of the following assertions should be reachable since we verified the block\n\n    // Take it from the Option so Rust doesn't consider self as mutably borrowed thanks to the\n    // existence of the txn\n    let mut db = self.db.take().unwrap();\n    let mut txn = db.txn();\n\n    self.tip = block.hash();\n    txn.put(Self::tip_key(self.genesis), self.tip);\n\n    self.block_number += 1;\n    txn.put(self.block_number_key(), self.block_number.to_le_bytes());\n\n    txn.put(Self::block_hash_key(&self.genesis, self.block_number), self.tip);\n\n    txn.put(Self::block_key(&self.genesis, &self.tip), block.serialize());\n    txn.put(Self::commit_key(&self.genesis, &self.tip), commit);\n\n    txn.put(Self::block_after_key(&self.genesis, &block.parent()), block.hash());\n\n    for tx in &block.transactions {\n      match tx.kind() {\n        TransactionKind::Provided(order) => {\n          let hash = tx.hash();\n          self.provided.complete(&mut txn, order, self.tip, hash);\n          txn.put(Self::provided_included_key(&self.genesis, &hash), []);\n        }\n        TransactionKind::Unsigned => {\n          let hash = tx.hash();\n          // Save as included on chain\n          txn.put(Self::unsigned_included_key(&self.genesis, &hash), []);\n          // remove from the mempool\n          self.mempool.remove(&hash);\n        }\n        TransactionKind::Signed(order, Signed { signer, nonce, .. }) => {\n          let next_nonce = nonce + 1;\n          txn.put(Self::next_nonce_key(&self.genesis, signer, &order), next_nonce.to_le_bytes());\n          self.mempool.remove(&tx.hash());\n        }\n      }\n    }\n\n    txn.commit();\n    self.db = Some(db);\n\n    for tx in self.next_block_notifications.drain(..) {\n      let _ = tx.send(());\n    }\n\n    Ok(())\n  }\n}\n"
  },
  {
    "path": "coordinator/tributary/src/lib.rs",
    "content": "use core::{marker::PhantomData, fmt::Debug};\nuse std::{sync::Arc, io};\n\nuse async_trait::async_trait;\n\nuse zeroize::Zeroizing;\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::Ciphersuite;\n\nuse scale::Decode;\nuse futures_channel::mpsc::UnboundedReceiver;\nuse futures_util::{StreamExt, SinkExt};\nuse ::tendermint::{\n  ext::{BlockNumber, Commit, Block as BlockTrait, Network},\n  SignedMessageFor, SyncedBlock, SyncedBlockSender, SyncedBlockResultReceiver, MessageSender,\n  TendermintMachine, TendermintHandle,\n};\n\npub use ::tendermint::Evidence;\n\nuse serai_db::Db;\n\nuse tokio::sync::RwLock;\n\nmod merkle;\npub(crate) use merkle::*;\n\npub mod transaction;\npub use transaction::{TransactionError, Signed, TransactionKind, Transaction as TransactionTrait};\n\nuse crate::tendermint::tx::TendermintTx;\n\nmod provided;\npub(crate) use provided::*;\npub use provided::ProvidedError;\n\nmod block;\npub use block::*;\n\nmod blockchain;\npub(crate) use blockchain::*;\n\nmod mempool;\npub(crate) use mempool::*;\n\npub mod tendermint;\npub(crate) use crate::tendermint::*;\n\n#[cfg(any(test, feature = \"tests\"))]\npub mod tests;\n\n/// Size limit for an individual transaction.\npub const TRANSACTION_SIZE_LIMIT: usize = 3_000_000;\n/// Amount of transactions a single account may have in the mempool.\npub const ACCOUNT_MEMPOOL_LIMIT: u32 = 50;\n/// Block size limit.\n// This targets a growth limit of roughly 45 GB a day, under load, in order to prevent a malicious\n// participant from flooding disks and causing out of space errors in order processes.\npub const BLOCK_SIZE_LIMIT: usize = 3_001_000;\n\npub(crate) const TENDERMINT_MESSAGE: u8 = 0;\npub(crate) const TRANSACTION_MESSAGE: u8 = 1;\n\n#[allow(clippy::large_enum_variant)]\n#[derive(Clone, PartialEq, Eq, Debug)]\npub enum Transaction<T: TransactionTrait> {\n  Tendermint(TendermintTx),\n  Application(T),\n}\n\nimpl<T: TransactionTrait> ReadWrite for Transaction<T> {\n  fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {\n    let mut kind = [0];\n    reader.read_exact(&mut kind)?;\n    match kind[0] {\n      0 => {\n        let tx = TendermintTx::read(reader)?;\n        Ok(Transaction::Tendermint(tx))\n      }\n      1 => {\n        let tx = T::read(reader)?;\n        Ok(Transaction::Application(tx))\n      }\n      _ => Err(io::Error::other(\"invalid transaction type\")),\n    }\n  }\n  fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {\n    match self {\n      Transaction::Tendermint(tx) => {\n        writer.write_all(&[0])?;\n        tx.write(writer)\n      }\n      Transaction::Application(tx) => {\n        writer.write_all(&[1])?;\n        tx.write(writer)\n      }\n    }\n  }\n}\n\nimpl<T: TransactionTrait> Transaction<T> {\n  pub fn hash(&self) -> [u8; 32] {\n    match self {\n      Transaction::Tendermint(tx) => tx.hash(),\n      Transaction::Application(tx) => tx.hash(),\n    }\n  }\n\n  pub fn kind(&self) -> TransactionKind<'_> {\n    match self {\n      Transaction::Tendermint(tx) => tx.kind(),\n      Transaction::Application(tx) => tx.kind(),\n    }\n  }\n}\n\n/// An item which can be read and written.\npub trait ReadWrite: Sized {\n  fn read<R: io::Read>(reader: &mut R) -> io::Result<Self>;\n  fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()>;\n\n  fn serialize(&self) -> Vec<u8> {\n    // BlockHeader is 64 bytes and likely the smallest item in this system\n    let mut buf = Vec::with_capacity(64);\n    self.write(&mut buf).unwrap();\n    buf\n  }\n}\n\n#[async_trait]\npub trait P2p: 'static + Send + Sync + Clone + Debug {\n  /// Broadcast a message to all other members of the Tributary with the specified genesis.\n  ///\n  /// The Tributary will re-broadcast consensus messages on a fixed interval to ensure they aren't\n  /// prematurely dropped from the P2P layer. THe P2P layer SHOULD perform content-based\n  /// deduplication to ensure a sane amount of load.\n  async fn broadcast(&self, genesis: [u8; 32], msg: Vec<u8>);\n}\n\n#[async_trait]\nimpl<P: P2p> P2p for Arc<P> {\n  async fn broadcast(&self, genesis: [u8; 32], msg: Vec<u8>) {\n    (*self).broadcast(genesis, msg).await\n  }\n}\n\n#[derive(Clone)]\npub struct Tributary<D: Db, T: TransactionTrait, P: P2p> {\n  db: D,\n\n  genesis: [u8; 32],\n  network: TendermintNetwork<D, T, P>,\n\n  synced_block: Arc<RwLock<SyncedBlockSender<TendermintNetwork<D, T, P>>>>,\n  synced_block_result: Arc<RwLock<SyncedBlockResultReceiver>>,\n  messages: Arc<RwLock<MessageSender<TendermintNetwork<D, T, P>>>>,\n}\n\nimpl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {\n  pub async fn new(\n    db: D,\n    genesis: [u8; 32],\n    start_time: u64,\n    key: Zeroizing<<Ristretto as Ciphersuite>::F>,\n    validators: Vec<(<Ristretto as Ciphersuite>::G, u64)>,\n    p2p: P,\n  ) -> Option<Self> {\n    log::info!(\"new Tributary with genesis {}\", hex::encode(genesis));\n\n    let validators_vec = validators.iter().map(|validator| validator.0).collect::<Vec<_>>();\n\n    let signer = Arc::new(Signer::new(genesis, key));\n    let validators = Arc::new(Validators::new(genesis, validators)?);\n\n    let mut blockchain = Blockchain::new(db.clone(), genesis, &validators_vec);\n    let block_number = BlockNumber(blockchain.block_number());\n\n    let start_time = if let Some(commit) = blockchain.commit(&blockchain.tip()) {\n      Commit::<Validators>::decode(&mut commit.as_ref()).unwrap().end_time\n    } else {\n      start_time\n    };\n    let proposal = TendermintBlock(\n      blockchain.build_block::<TendermintNetwork<D, T, P>>(&validators).serialize(),\n    );\n    let blockchain = Arc::new(RwLock::new(blockchain));\n\n    let network = TendermintNetwork { genesis, signer, validators, blockchain, p2p };\n\n    let TendermintHandle { synced_block, synced_block_result, messages, machine } =\n      TendermintMachine::new(\n        db.clone(),\n        network.clone(),\n        genesis,\n        block_number,\n        start_time,\n        proposal,\n      )\n      .await;\n    tokio::spawn(machine.run());\n\n    Some(Self {\n      db,\n      genesis,\n      network,\n      synced_block: Arc::new(RwLock::new(synced_block)),\n      synced_block_result: Arc::new(RwLock::new(synced_block_result)),\n      messages: Arc::new(RwLock::new(messages)),\n    })\n  }\n\n  pub fn block_time() -> u32 {\n    TendermintNetwork::<D, T, P>::block_time()\n  }\n\n  pub fn genesis(&self) -> [u8; 32] {\n    self.genesis\n  }\n\n  pub async fn block_number(&self) -> u64 {\n    self.network.blockchain.read().await.block_number()\n  }\n  pub async fn tip(&self) -> [u8; 32] {\n    self.network.blockchain.read().await.tip()\n  }\n\n  pub fn reader(&self) -> TributaryReader<D, T> {\n    TributaryReader(self.db.clone(), self.genesis, PhantomData)\n  }\n\n  pub async fn provide_transaction(&self, tx: T) -> Result<(), ProvidedError> {\n    self.network.blockchain.write().await.provide_transaction(tx)\n  }\n\n  pub async fn next_nonce(\n    &self,\n    signer: &<Ristretto as Ciphersuite>::G,\n    order: &[u8],\n  ) -> Option<u32> {\n    self.network.blockchain.read().await.next_nonce(signer, order)\n  }\n\n  // Returns Ok(true) if new, Ok(false) if an already present unsigned, or the error.\n  // Safe to be &self since the only meaningful usage of self is self.network.blockchain which\n  // successfully acquires its own write lock\n  pub async fn add_transaction(&self, tx: T) -> Result<bool, TransactionError> {\n    let tx = Transaction::Application(tx);\n    let mut to_broadcast = vec![TRANSACTION_MESSAGE];\n    tx.write(&mut to_broadcast).unwrap();\n    let res = self.network.blockchain.write().await.add_transaction::<TendermintNetwork<D, T, P>>(\n      true,\n      tx,\n      &self.network.signature_scheme(),\n    );\n    if res == Ok(true) {\n      self.network.p2p.broadcast(self.genesis, to_broadcast).await;\n    }\n    res\n  }\n\n  async fn sync_block_internal(\n    &self,\n    block: Block<T>,\n    commit: Vec<u8>,\n    result: &mut UnboundedReceiver<bool>,\n  ) -> bool {\n    let (tip, block_number) = {\n      let blockchain = self.network.blockchain.read().await;\n      (blockchain.tip(), blockchain.block_number())\n    };\n\n    if block.header.parent != tip {\n      log::debug!(\"told to sync a block whose parent wasn't our tip\");\n      return false;\n    }\n\n    let block = TendermintBlock(block.serialize());\n    let mut commit_ref = commit.as_ref();\n    let Ok(commit) = Commit::<Arc<Validators>>::decode(&mut commit_ref) else {\n      log::error!(\"sent an invalidly serialized commit\");\n      return false;\n    };\n    // Storage DoS vector. We *could* truncate to solely the relevant portion, trying to save this,\n    // yet then we'd have to test the truncation was performed correctly.\n    if !commit_ref.is_empty() {\n      log::error!(\"sent an commit with additional data after it\");\n      return false;\n    }\n    if !self.network.verify_commit(block.id(), &commit) {\n      log::error!(\"sent an invalid commit\");\n      return false;\n    }\n\n    let number = BlockNumber(block_number + 1);\n    self.synced_block.write().await.send(SyncedBlock { number, block, commit }).await.unwrap();\n    result.next().await.unwrap()\n  }\n\n  // Sync a block.\n  // TODO: Since we have a static validator set, we should only need the tail commit?\n  pub async fn sync_block(&self, block: Block<T>, commit: Vec<u8>) -> bool {\n    let mut result = self.synced_block_result.write().await;\n    self.sync_block_internal(block, commit, &mut result).await\n  }\n\n  // Return true if the message should be rebroadcasted.\n  pub async fn handle_message(&self, msg: &[u8]) -> bool {\n    match msg.first() {\n      Some(&TRANSACTION_MESSAGE) => {\n        let Ok(tx) = Transaction::read::<&[u8]>(&mut &msg[1 ..]) else {\n          log::error!(\"received invalid transaction message\");\n          return false;\n        };\n\n        // TODO: Sync mempools with fellow peers\n        // Can we just rebroadcast transactions not included for at least two blocks?\n        let res =\n          self.network.blockchain.write().await.add_transaction::<TendermintNetwork<D, T, P>>(\n            false,\n            tx,\n            &self.network.signature_scheme(),\n          );\n        log::debug!(\"received transaction message. valid new transaction: {res:?}\");\n        res == Ok(true)\n      }\n\n      Some(&TENDERMINT_MESSAGE) => {\n        let Ok(msg) =\n          SignedMessageFor::<TendermintNetwork<D, T, P>>::decode::<&[u8]>(&mut &msg[1 ..])\n        else {\n          log::error!(\"received invalid tendermint message\");\n          return false;\n        };\n\n        self.messages.write().await.send(msg).await.unwrap();\n        false\n      }\n\n      _ => false,\n    }\n  }\n\n  /// Get a Future which will resolve once the next block has been added.\n  pub async fn next_block_notification(\n    &self,\n  ) -> impl Send + Sync + core::future::Future<Output = Result<(), impl Send + Sync>> {\n    let (tx, rx) = tokio::sync::oneshot::channel();\n    self.network.blockchain.write().await.next_block_notifications.push_back(tx);\n    rx\n  }\n}\n\n#[derive(Clone)]\npub struct TributaryReader<D: Db, T: TransactionTrait>(D, [u8; 32], PhantomData<T>);\nimpl<D: Db, T: TransactionTrait> TributaryReader<D, T> {\n  pub fn genesis(&self) -> [u8; 32] {\n    self.1\n  }\n\n  // Since these values are static once set, they can be safely read from the database without lock\n  // acquisition\n  pub fn block(&self, hash: &[u8; 32]) -> Option<Block<T>> {\n    Blockchain::<D, T>::block_from_db(&self.0, self.1, hash)\n  }\n  pub fn commit(&self, hash: &[u8; 32]) -> Option<Vec<u8>> {\n    Blockchain::<D, T>::commit_from_db(&self.0, self.1, hash)\n  }\n  pub fn parsed_commit(&self, hash: &[u8; 32]) -> Option<Commit<Validators>> {\n    self.commit(hash).map(|commit| Commit::<Validators>::decode(&mut commit.as_ref()).unwrap())\n  }\n  pub fn block_after(&self, hash: &[u8; 32]) -> Option<[u8; 32]> {\n    Blockchain::<D, T>::block_after(&self.0, self.1, hash)\n  }\n  pub fn time_of_block(&self, hash: &[u8; 32]) -> Option<u64> {\n    self\n      .commit(hash)\n      .map(|commit| Commit::<Validators>::decode(&mut commit.as_ref()).unwrap().end_time)\n  }\n\n  pub fn locally_provided_txs_in_block(&self, hash: &[u8; 32], order: &str) -> bool {\n    Blockchain::<D, T>::locally_provided_txs_in_block(&self.0, &self.1, hash, order)\n  }\n\n  // This isn't static, yet can be read with only minor discrepancy risks\n  pub fn tip(&self) -> [u8; 32] {\n    Blockchain::<D, T>::tip_from_db(&self.0, self.1)\n  }\n}\n"
  },
  {
    "path": "coordinator/tributary/src/mempool.rs",
    "content": "use std::collections::HashMap;\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::Ciphersuite;\n\nuse serai_db::{DbTxn, Db};\n\nuse tendermint::ext::{Network, Commit};\n\nuse crate::{\n  ACCOUNT_MEMPOOL_LIMIT, ReadWrite,\n  transaction::{\n    Signed, TransactionKind, TransactionError, Transaction as TransactionTrait, verify_transaction,\n  },\n  tendermint::tx::verify_tendermint_tx,\n  Transaction,\n};\n\n#[derive(Clone, PartialEq, Eq, Debug)]\npub(crate) struct Mempool<D: Db, T: TransactionTrait> {\n  db: D,\n  genesis: [u8; 32],\n\n  last_nonce_in_mempool: HashMap<(<Ristretto as Ciphersuite>::G, Vec<u8>), u32>,\n  txs: HashMap<[u8; 32], Transaction<T>>,\n  txs_per_signer: HashMap<<Ristretto as Ciphersuite>::G, u32>,\n}\n\nimpl<D: Db, T: TransactionTrait> Mempool<D, T> {\n  fn transaction_key(&self, hash: &[u8]) -> Vec<u8> {\n    D::key(b\"tributary_mempool\", b\"transaction\", [self.genesis.as_ref(), hash].concat())\n  }\n  fn current_mempool_key(&self) -> Vec<u8> {\n    D::key(b\"tributary_mempool\", b\"current\", self.genesis)\n  }\n\n  // save given tx to the mempool db\n  fn save_tx(&mut self, tx: Transaction<T>) {\n    let tx_hash = tx.hash();\n    let transaction_key = self.transaction_key(&tx_hash);\n    let current_mempool_key = self.current_mempool_key();\n    let mut current_mempool = self.db.get(&current_mempool_key).unwrap_or(vec![]);\n\n    let mut txn = self.db.txn();\n    txn.put(transaction_key, tx.serialize());\n    current_mempool.extend(tx_hash);\n    txn.put(current_mempool_key, current_mempool);\n    txn.commit();\n\n    self.txs.insert(tx_hash, tx);\n  }\n\n  fn unsigned_already_exist(\n    &self,\n    hash: [u8; 32],\n    unsigned_in_chain: impl Fn([u8; 32]) -> bool,\n  ) -> bool {\n    unsigned_in_chain(hash) || self.txs.contains_key(&hash)\n  }\n\n  pub(crate) fn new(db: D, genesis: [u8; 32]) -> Self {\n    let mut res = Mempool {\n      db,\n      genesis,\n      last_nonce_in_mempool: HashMap::new(),\n      txs: HashMap::new(),\n      txs_per_signer: HashMap::new(),\n    };\n\n    let current_mempool = res.db.get(res.current_mempool_key()).unwrap_or(vec![]);\n\n    for hash in current_mempool.chunks(32) {\n      let hash: [u8; 32] = hash.try_into().unwrap();\n      let tx: Transaction<T> =\n        Transaction::read::<&[u8]>(&mut res.db.get(res.transaction_key(&hash)).unwrap().as_ref())\n          .unwrap();\n      debug_assert_eq!(tx.hash(), hash);\n\n      match tx {\n        Transaction::Tendermint(tx) => {\n          res.txs.insert(hash, Transaction::Tendermint(tx));\n        }\n        Transaction::Application(tx) => match tx.kind() {\n          TransactionKind::Signed(order, Signed { signer, nonce, .. }) => {\n            let amount = *res.txs_per_signer.get(signer).unwrap_or(&0) + 1;\n            res.txs_per_signer.insert(*signer, amount);\n\n            if let Some(prior_nonce) =\n              res.last_nonce_in_mempool.insert((*signer, order.clone()), *nonce)\n            {\n              assert_eq!(prior_nonce, nonce - 1);\n            }\n\n            res.txs.insert(hash, Transaction::Application(tx));\n          }\n          TransactionKind::Unsigned => {\n            res.txs.insert(hash, Transaction::Application(tx));\n          }\n          _ => panic!(\"mempool database had a provided transaction\"),\n        },\n      }\n    }\n\n    res\n  }\n\n  // Returns Ok(true) if new, Ok(false) if an already present unsigned, or the error.\n  pub(crate) fn add<\n    N: Network,\n    F: FnOnce(<Ristretto as Ciphersuite>::G, Vec<u8>) -> Option<u32>,\n  >(\n    &mut self,\n    blockchain_next_nonce: F,\n    internal: bool,\n    tx: Transaction<T>,\n    schema: &N::SignatureScheme,\n    unsigned_in_chain: impl Fn([u8; 32]) -> bool,\n    commit: impl Fn(u64) -> Option<Commit<N::SignatureScheme>>,\n  ) -> Result<bool, TransactionError> {\n    match &tx {\n      Transaction::Tendermint(tendermint_tx) => {\n        // All Tendermint transactions should be unsigned\n        assert_eq!(TransactionKind::Unsigned, tendermint_tx.kind());\n\n        // check we have the tx in the pool/chain\n        if self.unsigned_already_exist(tx.hash(), unsigned_in_chain) {\n          return Ok(false);\n        }\n\n        // verify the tx\n        verify_tendermint_tx::<N>(tendermint_tx, schema, commit)?;\n      }\n      Transaction::Application(app_tx) => {\n        match app_tx.kind() {\n          TransactionKind::Signed(order, Signed { signer, .. }) => {\n            // Get the nonce from the blockchain\n            let Some(blockchain_next_nonce) = blockchain_next_nonce(*signer, order.clone()) else {\n              // Not a participant\n              Err(TransactionError::InvalidSigner)?\n            };\n            let mut next_nonce = blockchain_next_nonce;\n\n            if let Some(mempool_last_nonce) =\n              self.last_nonce_in_mempool.get(&(*signer, order.clone()))\n            {\n              assert!(*mempool_last_nonce >= blockchain_next_nonce);\n              next_nonce = *mempool_last_nonce + 1;\n            }\n\n            // If we have too many transactions from this sender, don't add this yet UNLESS we are\n            // this sender\n            let amount_in_pool = *self.txs_per_signer.get(signer).unwrap_or(&0) + 1;\n            if !internal && (amount_in_pool > ACCOUNT_MEMPOOL_LIMIT) {\n              Err(TransactionError::TooManyInMempool)?;\n            }\n\n            verify_transaction(app_tx, self.genesis, &mut |_, _| Some(next_nonce))?;\n            self.last_nonce_in_mempool.insert((*signer, order.clone()), next_nonce);\n            self.txs_per_signer.insert(*signer, amount_in_pool);\n          }\n          TransactionKind::Unsigned => {\n            // check we have the tx in the pool/chain\n            if self.unsigned_already_exist(tx.hash(), unsigned_in_chain) {\n              return Ok(false);\n            }\n\n            app_tx.verify()?;\n          }\n          TransactionKind::Provided(_) => Err(TransactionError::ProvidedAddedToMempool)?,\n        }\n      }\n    }\n\n    // Save the TX to the pool\n    self.save_tx(tx);\n    Ok(true)\n  }\n\n  // Returns None if the mempool doesn't have a nonce tracked.\n  pub(crate) fn next_nonce_in_mempool(\n    &self,\n    signer: &<Ristretto as Ciphersuite>::G,\n    order: Vec<u8>,\n  ) -> Option<u32> {\n    self.last_nonce_in_mempool.get(&(*signer, order)).copied().map(|nonce| nonce + 1)\n  }\n\n  /// Get transactions to include in a block.\n  pub(crate) fn block(&mut self) -> Vec<Transaction<T>> {\n    let mut unsigned = vec![];\n    let mut signed = vec![];\n    for hash in self.txs.keys().copied().collect::<Vec<_>>() {\n      let tx = &self.txs[&hash];\n\n      match tx.kind() {\n        TransactionKind::Signed(_, Signed { .. }) => {\n          signed.push(tx.clone());\n        }\n        TransactionKind::Unsigned => {\n          unsigned.push(tx.clone());\n        }\n        _ => panic!(\"provided transaction entered mempool\"),\n      }\n    }\n\n    // Sort signed by nonce\n    let nonce = |tx: &Transaction<T>| {\n      if let TransactionKind::Signed(_, Signed { nonce, .. }) = tx.kind() {\n        *nonce\n      } else {\n        unreachable!()\n      }\n    };\n    signed.sort_by(|a, b| nonce(a).partial_cmp(&nonce(b)).unwrap());\n\n    // unsigned first, then signed.\n    unsigned.append(&mut signed);\n    unsigned\n  }\n\n  /// Remove a transaction from the mempool.\n  pub(crate) fn remove(&mut self, tx: &[u8; 32]) {\n    let transaction_key = self.transaction_key(tx);\n    let current_mempool_key = self.current_mempool_key();\n    let current_mempool = self.db.get(&current_mempool_key).unwrap_or(vec![]);\n\n    let mut i = 0;\n    while i < current_mempool.len() {\n      if &current_mempool[i .. (i + 32)] == tx {\n        break;\n      }\n      i += 32;\n    }\n\n    // This doesn't have to be atomic with any greater operation\n    let mut txn = self.db.txn();\n    txn.del(transaction_key);\n    if i != current_mempool.len() {\n      txn\n        .put(current_mempool_key, [&current_mempool[.. i], &current_mempool[(i + 32) ..]].concat());\n    }\n    txn.commit();\n\n    if let Some(tx) = self.txs.remove(tx) {\n      if let TransactionKind::Signed(order, Signed { signer, nonce, .. }) = tx.kind() {\n        let amount = *self.txs_per_signer.get(signer).unwrap() - 1;\n        self.txs_per_signer.insert(*signer, amount);\n\n        if self.last_nonce_in_mempool.get(&(*signer, order.clone())) == Some(nonce) {\n          self.last_nonce_in_mempool.remove(&(*signer, order));\n        }\n      }\n    }\n  }\n\n  #[cfg(test)]\n  pub(crate) fn txs(&self) -> &HashMap<[u8; 32], Transaction<T>> {\n    &self.txs\n  }\n}\n"
  },
  {
    "path": "coordinator/tributary/src/merkle.rs",
    "content": "use blake2::{Digest, Blake2s256};\n\npub(crate) fn merkle(hash_args: &[[u8; 32]]) -> [u8; 32] {\n  let mut hashes = Vec::with_capacity(hash_args.len());\n  for hash in hash_args {\n    hashes.push(Blake2s256::digest([b\"leaf_hash\".as_ref(), hash].concat()));\n  }\n\n  let zero = [0; 32];\n  let mut interim;\n  while hashes.len() > 1 {\n    interim = Vec::with_capacity((hashes.len() + 1) / 2);\n\n    let mut i = 0;\n    while i < hashes.len() {\n      interim.push(Blake2s256::digest(\n        [\n          b\"branch_hash\".as_ref(),\n          hashes[i].as_ref(),\n          hashes.get(i + 1).map_or(zero.as_ref(), AsRef::as_ref),\n        ]\n        .concat(),\n      ));\n      i += 2;\n    }\n\n    hashes = interim;\n  }\n\n  hashes.first().copied().map_or(zero, Into::into)\n}\n"
  },
  {
    "path": "coordinator/tributary/src/provided.rs",
    "content": "use std::collections::{VecDeque, HashMap};\n\nuse thiserror::Error;\n\nuse serai_db::{Get, DbTxn, Db};\n\nuse crate::transaction::{TransactionKind, TransactionError, Transaction, verify_transaction};\n\n#[derive(Clone, PartialEq, Eq, Debug, Error)]\npub enum ProvidedError {\n  /// The provided transaction's kind wasn't Provided\n  #[error(\"transaction wasn't a provided transaction\")]\n  NotProvided,\n  /// The provided transaction was invalid\n  #[error(\"provided transaction was invalid\")]\n  InvalidProvided(TransactionError),\n  /// Transaction was already provided\n  #[error(\"transaction was already provided\")]\n  AlreadyProvided,\n  /// Local transaction mismatches the on-chain provided\n  #[error(\"local provides mismatches on-chain provided\")]\n  LocalMismatchesOnChain,\n}\n\n#[derive(Clone, PartialEq, Eq, Debug)]\npub struct ProvidedTransactions<D: Db, T: Transaction> {\n  db: D,\n  genesis: [u8; 32],\n\n  pub(crate) transactions: HashMap<&'static str, VecDeque<T>>,\n}\n\nimpl<D: Db, T: Transaction> ProvidedTransactions<D, T> {\n  fn transaction_key(&self, hash: &[u8]) -> Vec<u8> {\n    D::key(b\"tributary_provided\", b\"transaction\", [self.genesis.as_ref(), hash].concat())\n  }\n  fn current_provided_key(&self) -> Vec<u8> {\n    D::key(b\"tributary_provided\", b\"current\", self.genesis)\n  }\n  pub(crate) fn locally_provided_quantity_key(genesis: &[u8; 32], order: &str) -> Vec<u8> {\n    D::key(b\"tributary_provided\", b\"local_quantity\", [genesis, order.as_bytes()].concat())\n  }\n  pub(crate) fn on_chain_provided_quantity_key(genesis: &[u8; 32], order: &str) -> Vec<u8> {\n    D::key(b\"tributary_provided\", b\"on_chain_quantity\", [genesis, order.as_bytes()].concat())\n  }\n  pub(crate) fn block_provided_quantity_key(\n    genesis: &[u8; 32],\n    block: &[u8; 32],\n    order: &str,\n  ) -> Vec<u8> {\n    D::key(b\"tributary_provided\", b\"block_quantity\", [genesis, block, order.as_bytes()].concat())\n  }\n\n  pub(crate) fn on_chain_provided_key(genesis: &[u8; 32], order: &str, id: u32) -> Vec<u8> {\n    D::key(\n      b\"tributary_provided\",\n      b\"on_chain_tx\",\n      [genesis, order.as_bytes(), &id.to_le_bytes()].concat(),\n    )\n  }\n\n  pub(crate) fn new(db: D, genesis: [u8; 32]) -> Self {\n    let mut res = ProvidedTransactions { db, genesis, transactions: HashMap::new() };\n\n    let currently_provided = res.db.get(res.current_provided_key()).unwrap_or(vec![]);\n    let mut i = 0;\n    while i < currently_provided.len() {\n      let tx = T::read::<&[u8]>(\n        &mut res.db.get(res.transaction_key(&currently_provided[i .. (i + 32)])).unwrap().as_ref(),\n      )\n      .unwrap();\n\n      let TransactionKind::Provided(order) = tx.kind() else {\n        panic!(\"provided transaction saved to disk wasn't provided\");\n      };\n\n      if !res.transactions.contains_key(order) {\n        res.transactions.insert(order, VecDeque::new());\n      }\n      res.transactions.get_mut(order).unwrap().push_back(tx);\n\n      i += 32;\n    }\n\n    res\n  }\n\n  /// Provide a transaction for inclusion in a block.\n  pub(crate) fn provide(&mut self, tx: T) -> Result<(), ProvidedError> {\n    let TransactionKind::Provided(order) = tx.kind() else { Err(ProvidedError::NotProvided)? };\n\n    match verify_transaction(&tx, self.genesis, &mut |_, _| None) {\n      Ok(()) => {}\n      Err(e) => Err(ProvidedError::InvalidProvided(e))?,\n    }\n    let tx_hash = tx.hash();\n\n    // Check it wasn't already provided\n    let provided_key = self.transaction_key(&tx_hash);\n    if self.db.get(&provided_key).is_some() {\n      Err(ProvidedError::AlreadyProvided)?;\n    }\n\n    // get local and on-chain tx numbers\n    let local_key = Self::locally_provided_quantity_key(&self.genesis, order);\n    let mut local_quantity =\n      self.db.get(&local_key).map_or(0, |bytes| u32::from_le_bytes(bytes.try_into().unwrap()));\n    let on_chain_key = Self::on_chain_provided_quantity_key(&self.genesis, order);\n    let on_chain_quantity =\n      self.db.get(on_chain_key).map_or(0, |bytes| u32::from_le_bytes(bytes.try_into().unwrap()));\n\n    let current_provided_key = self.current_provided_key();\n\n    // This would have a race-condition with multiple calls to provide, though this takes &mut self\n    // peventing multiple calls at once\n    let mut txn = self.db.txn();\n    txn.put(provided_key, tx.serialize());\n\n    let this_provided_id = local_quantity;\n\n    local_quantity += 1;\n    txn.put(local_key, local_quantity.to_le_bytes());\n\n    if this_provided_id < on_chain_quantity {\n      // Verify against the on-chain version\n      if tx_hash.as_ref() !=\n        txn.get(Self::on_chain_provided_key(&self.genesis, order, this_provided_id)).unwrap()\n      {\n        Err(ProvidedError::LocalMismatchesOnChain)?;\n      }\n      txn.commit();\n    } else {\n      let mut currently_provided = txn.get(&current_provided_key).unwrap_or(vec![]);\n      currently_provided.extend(tx_hash);\n      txn.put(current_provided_key, currently_provided);\n      txn.commit();\n\n      if !self.transactions.contains_key(order) {\n        self.transactions.insert(order, VecDeque::new());\n      }\n      self.transactions.get_mut(order).unwrap().push_back(tx);\n    }\n\n    Ok(())\n  }\n\n  /// Complete a provided transaction, no longer proposing it nor voting for its validity.\n  pub(crate) fn complete(\n    &mut self,\n    txn: &mut D::Transaction<'_>,\n    order: &'static str,\n    block: [u8; 32],\n    tx: [u8; 32],\n  ) {\n    if let Some(next_tx) = self.transactions.get_mut(order).and_then(VecDeque::pop_front) {\n      assert_eq!(next_tx.hash(), tx);\n\n      let current_provided_key = self.current_provided_key();\n      let mut currently_provided = txn.get(&current_provided_key).unwrap();\n\n      // Find this TX's hash\n      let mut i = 0;\n      loop {\n        if currently_provided[i .. (i + 32)] == tx {\n          assert_eq!(&currently_provided.drain(i .. (i + 32)).collect::<Vec<_>>(), &tx);\n          break;\n        }\n\n        i += 32;\n        if i >= currently_provided.len() {\n          panic!(\"couldn't find completed TX in currently provided\");\n        }\n      }\n\n      txn.put(current_provided_key, currently_provided);\n    }\n\n    // bump the on-chain tx number.\n    let on_chain_key = Self::on_chain_provided_quantity_key(&self.genesis, order);\n    let block_order_key = Self::block_provided_quantity_key(&self.genesis, &block, order);\n    let mut on_chain_quantity =\n      self.db.get(&on_chain_key).map_or(0, |bytes| u32::from_le_bytes(bytes.try_into().unwrap()));\n\n    let this_provided_id = on_chain_quantity;\n    txn.put(Self::on_chain_provided_key(&self.genesis, order, this_provided_id), tx);\n\n    on_chain_quantity += 1;\n    txn.put(on_chain_key, on_chain_quantity.to_le_bytes());\n    txn.put(block_order_key, on_chain_quantity.to_le_bytes());\n  }\n}\n"
  },
  {
    "path": "coordinator/tributary/src/tendermint/mod.rs",
    "content": "use core::ops::Deref;\nuse std::{sync::Arc, collections::HashMap};\n\nuse async_trait::async_trait;\n\nuse subtle::ConstantTimeEq;\nuse zeroize::{Zeroize, Zeroizing};\n\nuse rand::{SeedableRng, seq::SliceRandom};\nuse rand_chacha::ChaCha12Rng;\n\nuse transcript::{Transcript, RecommendedTranscript};\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::{\n  group::{\n    GroupEncoding,\n    ff::{Field, PrimeField},\n  },\n  Ciphersuite,\n};\nuse schnorr::{\n  SchnorrSignature,\n  aggregate::{SchnorrAggregator, SchnorrAggregate},\n};\n\nuse serai_db::Db;\n\nuse scale::{Encode, Decode};\nuse tendermint::{\n  SignedMessageFor,\n  ext::{\n    BlockNumber, RoundNumber, Signer as SignerTrait, SignatureScheme, Weights, Block as BlockTrait,\n    BlockError as TendermintBlockError, Commit, Network,\n  },\n  SlashEvent,\n};\n\nuse tokio::sync::RwLock;\n\nuse crate::{\n  TENDERMINT_MESSAGE, TRANSACTION_MESSAGE, ReadWrite, transaction::Transaction as TransactionTrait,\n  Transaction, BlockHeader, Block, BlockError, Blockchain, P2p,\n};\n\npub mod tx;\nuse tx::TendermintTx;\n\nconst DST: &[u8] = b\"Tributary Tendermint Commit Aggregator\";\n\nfn challenge(\n  genesis: [u8; 32],\n  key: [u8; 32],\n  nonce: &[u8],\n  msg: &[u8],\n) -> <Ristretto as Ciphersuite>::F {\n  let mut transcript = RecommendedTranscript::new(b\"Tributary Chain Tendermint Message\");\n  transcript.append_message(b\"genesis\", genesis);\n  transcript.append_message(b\"key\", key);\n  transcript.append_message(b\"nonce\", nonce);\n  transcript.append_message(b\"message\", msg);\n\n  <Ristretto as Ciphersuite>::F::from_bytes_mod_order_wide(&transcript.challenge(b\"schnorr\").into())\n}\n\n#[derive(Clone, PartialEq, Eq, Debug)]\npub struct Signer {\n  genesis: [u8; 32],\n  key: Zeroizing<<Ristretto as Ciphersuite>::F>,\n}\n\nimpl Signer {\n  pub(crate) fn new(genesis: [u8; 32], key: Zeroizing<<Ristretto as Ciphersuite>::F>) -> Signer {\n    Signer { genesis, key }\n  }\n}\n\n#[async_trait]\nimpl SignerTrait for Signer {\n  type ValidatorId = [u8; 32];\n  type Signature = [u8; 64];\n\n  /// Returns the validator's current ID. Returns None if they aren't a current validator.\n  async fn validator_id(&self) -> Option<Self::ValidatorId> {\n    Some((Ristretto::generator() * self.key.deref()).to_bytes())\n  }\n\n  /// Sign a signature with the current validator's private key.\n  async fn sign(&self, msg: &[u8]) -> Self::Signature {\n    let mut nonce = Zeroizing::new(RecommendedTranscript::new(b\"Tributary Chain Tendermint Nonce\"));\n    nonce.append_message(b\"genesis\", self.genesis);\n    nonce.append_message(b\"key\", Zeroizing::new(self.key.deref().to_repr()).as_ref());\n    nonce.append_message(b\"message\", msg);\n    let mut nonce = nonce.challenge(b\"nonce\");\n\n    let mut nonce_arr = [0; 64];\n    nonce_arr.copy_from_slice(nonce.as_ref());\n\n    let nonce_ref: &mut [u8] = nonce.as_mut();\n    nonce_ref.zeroize();\n    let nonce_ref: &[u8] = nonce.as_ref();\n    assert_eq!(nonce_ref, [0; 64].as_ref());\n\n    let nonce =\n      Zeroizing::new(<Ristretto as Ciphersuite>::F::from_bytes_mod_order_wide(&nonce_arr));\n    nonce_arr.zeroize();\n\n    assert!(!bool::from(nonce.ct_eq(&<Ristretto as Ciphersuite>::F::ZERO)));\n\n    let challenge = challenge(\n      self.genesis,\n      (Ristretto::generator() * self.key.deref()).to_bytes(),\n      (Ristretto::generator() * nonce.deref()).to_bytes().as_ref(),\n      msg,\n    );\n\n    let sig = SchnorrSignature::<Ristretto>::sign(&self.key, nonce, challenge).serialize();\n\n    let mut res = [0; 64];\n    res.copy_from_slice(&sig);\n    res\n  }\n}\n\n#[derive(Clone, PartialEq, Eq, Debug)]\npub struct Validators {\n  genesis: [u8; 32],\n  total_weight: u64,\n  weights: HashMap<[u8; 32], u64>,\n  robin: Vec<[u8; 32]>,\n}\n\nimpl Validators {\n  pub(crate) fn new(\n    genesis: [u8; 32],\n    validators: Vec<(<Ristretto as Ciphersuite>::G, u64)>,\n  ) -> Option<Validators> {\n    let mut total_weight = 0;\n    let mut weights = HashMap::new();\n\n    let mut transcript = RecommendedTranscript::new(b\"Round Robin Randomization\");\n    let mut robin = vec![];\n    for (validator, weight) in validators {\n      let validator = validator.to_bytes();\n      if weight == 0 {\n        return None;\n      }\n      total_weight += weight;\n      weights.insert(validator, weight);\n\n      transcript.append_message(b\"validator\", validator);\n      transcript.append_message(b\"weight\", weight.to_le_bytes());\n      robin.extend(vec![validator; usize::try_from(weight).unwrap()]);\n    }\n    robin.shuffle(&mut ChaCha12Rng::from_seed(transcript.rng_seed(b\"robin\")));\n\n    Some(Validators { genesis, total_weight, weights, robin })\n  }\n}\n\nimpl SignatureScheme for Validators {\n  type ValidatorId = [u8; 32];\n  type Signature = [u8; 64];\n  type AggregateSignature = Vec<u8>;\n  type Signer = Arc<Signer>;\n\n  #[must_use]\n  fn verify(&self, validator: Self::ValidatorId, msg: &[u8], sig: &Self::Signature) -> bool {\n    if !self.weights.contains_key(&validator) {\n      return false;\n    }\n    let Ok(validator_point) = Ristretto::read_G::<&[u8]>(&mut validator.as_ref()) else {\n      return false;\n    };\n    let Ok(actual_sig) = SchnorrSignature::<Ristretto>::read::<&[u8]>(&mut sig.as_ref()) else {\n      return false;\n    };\n    actual_sig.verify(validator_point, challenge(self.genesis, validator, &sig[.. 32], msg))\n  }\n\n  fn aggregate(\n    &self,\n    validators: &[Self::ValidatorId],\n    msg: &[u8],\n    sigs: &[Self::Signature],\n  ) -> Self::AggregateSignature {\n    assert_eq!(validators.len(), sigs.len());\n\n    let mut aggregator = SchnorrAggregator::<Ristretto>::new(DST);\n    for (key, sig) in validators.iter().zip(sigs) {\n      let actual_sig = SchnorrSignature::<Ristretto>::read::<&[u8]>(&mut sig.as_ref()).unwrap();\n      let challenge = challenge(self.genesis, *key, actual_sig.R.to_bytes().as_ref(), msg);\n      aggregator.aggregate(challenge, actual_sig);\n    }\n\n    let aggregate = aggregator.complete().unwrap();\n    aggregate.serialize()\n  }\n\n  #[must_use]\n  fn verify_aggregate(\n    &self,\n    signers: &[Self::ValidatorId],\n    msg: &[u8],\n    sig: &Self::AggregateSignature,\n  ) -> bool {\n    let Ok(aggregate) = SchnorrAggregate::<Ristretto>::read::<&[u8]>(&mut sig.as_slice()) else {\n      return false;\n    };\n\n    if signers.len() != aggregate.Rs().len() {\n      return false;\n    }\n\n    let mut challenges = vec![];\n    for (key, nonce) in signers.iter().zip(aggregate.Rs()) {\n      challenges.push(challenge(self.genesis, *key, nonce.to_bytes().as_ref(), msg));\n    }\n\n    aggregate.verify(\n      DST,\n      signers\n        .iter()\n        .zip(challenges)\n        .map(|(s, c)| (<Ristretto as Ciphersuite>::read_G(&mut s.as_slice()).unwrap(), c))\n        .collect::<Vec<_>>()\n        .as_slice(),\n    )\n  }\n}\n\nimpl Weights for Validators {\n  type ValidatorId = [u8; 32];\n\n  fn total_weight(&self) -> u64 {\n    self.total_weight\n  }\n  fn weight(&self, validator: Self::ValidatorId) -> u64 {\n    self.weights[&validator]\n  }\n  fn proposer(&self, block: BlockNumber, round: RoundNumber) -> Self::ValidatorId {\n    let block = usize::try_from(block.0).unwrap();\n    let round = usize::try_from(round.0).unwrap();\n    // If multiple rounds are used, a naive block + round would cause the same index to be chosen\n    // in quick succession.\n    // Accordingly, if we use additional rounds, jump halfway around.\n    // While this is still game-able, it's not explicitly reusing indexes immediately after each\n    // other.\n    self.robin\n      [(block + (if round == 0 { 0 } else { round + (self.robin.len() / 2) })) % self.robin.len()]\n  }\n}\n\n#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)]\npub struct TendermintBlock(pub Vec<u8>);\nimpl BlockTrait for TendermintBlock {\n  type Id = [u8; 32];\n  fn id(&self) -> Self::Id {\n    BlockHeader::read::<&[u8]>(&mut self.0.as_ref()).unwrap().hash()\n  }\n}\n\n#[derive(Clone, Debug)]\npub struct TendermintNetwork<D: Db, T: TransactionTrait, P: P2p> {\n  pub(crate) genesis: [u8; 32],\n\n  pub(crate) signer: Arc<Signer>,\n  pub(crate) validators: Arc<Validators>,\n  pub(crate) blockchain: Arc<RwLock<Blockchain<D, T>>>,\n\n  pub(crate) p2p: P,\n}\n\npub const BLOCK_PROCESSING_TIME: u32 = 999;\npub const LATENCY_TIME: u32 = 1667;\npub const TARGET_BLOCK_TIME: u32 = BLOCK_PROCESSING_TIME + (3 * LATENCY_TIME);\n\n#[async_trait]\nimpl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P> {\n  type Db = D;\n\n  type ValidatorId = [u8; 32];\n  type SignatureScheme = Arc<Validators>;\n  type Weights = Arc<Validators>;\n  type Block = TendermintBlock;\n\n  // These are in milliseconds and create a six-second block time.\n  // The block time is the latency on message delivery (where a message is some piece of data\n  // embedded in a transaction) times three plus the block processing time, hence why it should be\n  // kept low.\n  const BLOCK_PROCESSING_TIME: u32 = BLOCK_PROCESSING_TIME;\n  const LATENCY_TIME: u32 = LATENCY_TIME;\n\n  fn signer(&self) -> Arc<Signer> {\n    self.signer.clone()\n  }\n  fn signature_scheme(&self) -> Arc<Validators> {\n    self.validators.clone()\n  }\n  fn weights(&self) -> Arc<Validators> {\n    self.validators.clone()\n  }\n\n  async fn broadcast(&mut self, msg: SignedMessageFor<Self>) {\n    let mut to_broadcast = vec![TENDERMINT_MESSAGE];\n    to_broadcast.extend(msg.encode());\n    self.p2p.broadcast(self.genesis, to_broadcast).await\n  }\n\n  async fn slash(&mut self, validator: Self::ValidatorId, slash_event: SlashEvent) {\n    log::error!(\n      \"validator {} triggered a slash event on tributary {} (with evidence: {})\",\n      hex::encode(validator),\n      hex::encode(self.genesis),\n      matches!(slash_event, SlashEvent::WithEvidence(_)),\n    );\n\n    let signer = self.signer();\n    let Some(tx) = (match slash_event {\n      SlashEvent::WithEvidence(evidence) => {\n        // create an unsigned evidence tx\n        Some(TendermintTx::SlashEvidence(evidence))\n      }\n      SlashEvent::Id(_reason, _block, _round) => {\n        // TODO: Increase locally observed slash points\n        None\n      }\n    }) else {\n      return;\n    };\n\n    // add tx to blockchain and broadcast to peers\n    let mut to_broadcast = vec![TRANSACTION_MESSAGE];\n    tx.write(&mut to_broadcast).unwrap();\n    if self.blockchain.write().await.add_transaction::<Self>(\n      true,\n      Transaction::Tendermint(tx),\n      &self.signature_scheme(),\n    ) == Ok(true)\n    {\n      self.p2p.broadcast(signer.genesis, to_broadcast).await;\n    }\n  }\n\n  async fn validate(&self, block: &Self::Block) -> Result<(), TendermintBlockError> {\n    let block =\n      Block::read::<&[u8]>(&mut block.0.as_ref()).map_err(|_| TendermintBlockError::Fatal)?;\n    self\n      .blockchain\n      .read()\n      .await\n      .verify_block::<Self>(&block, &self.signature_scheme(), false)\n      .map_err(|e| match e {\n        BlockError::NonLocalProvided(_) => TendermintBlockError::Temporal,\n        _ => {\n          log::warn!(\"Tributary Tendermint validate returning BlockError::Fatal due to {e:?}\");\n          TendermintBlockError::Fatal\n        }\n      })\n  }\n\n  async fn add_block(\n    &mut self,\n    serialized_block: Self::Block,\n    commit: Commit<Self::SignatureScheme>,\n  ) -> Option<Self::Block> {\n    let invalid_block = || {\n      // There's a fatal flaw in the code, it's behind a hard fork, or the validators turned\n      // malicious\n      // All justify a halt to then achieve social consensus from\n      // TODO: Under multiple validator sets, a small validator set turning malicious knocks\n      // off the entire network. That's an unacceptable DoS.\n      panic!(\"validators added invalid block to tributary {}\", hex::encode(self.genesis));\n    };\n\n    // Tendermint should only produce valid commits\n    assert!(self.verify_commit(serialized_block.id(), &commit));\n\n    let Ok(block) = Block::read::<&[u8]>(&mut serialized_block.0.as_ref()) else {\n      return invalid_block();\n    };\n\n    let encoded_commit = commit.encode();\n    loop {\n      let block_res = self.blockchain.write().await.add_block::<Self>(\n        &block,\n        encoded_commit.clone(),\n        &self.signature_scheme(),\n      );\n      match block_res {\n        Ok(()) => {\n          // If we successfully added this block, break\n          break;\n        }\n        Err(BlockError::NonLocalProvided(hash)) => {\n          log::error!(\n            \"missing provided transaction {} which other validators on tributary {} had\",\n            hex::encode(hash),\n            hex::encode(self.genesis)\n          );\n          tokio::time::sleep(core::time::Duration::from_secs(5)).await;\n        }\n        _ => return invalid_block(),\n      }\n    }\n\n    Some(TendermintBlock(\n      self.blockchain.write().await.build_block::<Self>(&self.signature_scheme()).serialize(),\n    ))\n  }\n}\n"
  },
  {
    "path": "coordinator/tributary/src/tendermint/tx.rs",
    "content": "use std::io;\n\nuse scale::{Encode, Decode, IoReader};\n\nuse blake2::{Digest, Blake2s256};\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::Ciphersuite;\n\nuse crate::{\n  transaction::{Transaction, TransactionKind, TransactionError},\n  ReadWrite,\n};\n\nuse tendermint::{\n  verify_tendermint_evidence,\n  ext::{Network, Commit},\n};\n\npub use tendermint::{Evidence, decode_signed_message};\n\n#[allow(clippy::large_enum_variant)]\n#[derive(Clone, PartialEq, Eq, Debug)]\npub enum TendermintTx {\n  SlashEvidence(Evidence),\n}\n\nimpl ReadWrite for TendermintTx {\n  fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {\n    Evidence::decode(&mut IoReader(reader))\n      .map(TendermintTx::SlashEvidence)\n      .map_err(|_| io::Error::new(io::ErrorKind::InvalidData, \"invalid evidence format\"))\n  }\n\n  fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {\n    match self {\n      TendermintTx::SlashEvidence(ev) => writer.write_all(&ev.encode()),\n    }\n  }\n}\n\nimpl Transaction for TendermintTx {\n  fn kind(&self) -> TransactionKind<'_> {\n    // There's an assert elsewhere in the codebase expecting this behavior\n    // If we do want to add Provided/Signed TendermintTxs, review the implications carefully\n    TransactionKind::Unsigned\n  }\n\n  fn hash(&self) -> [u8; 32] {\n    Blake2s256::digest(self.serialize()).into()\n  }\n\n  fn sig_hash(&self, _genesis: [u8; 32]) -> <Ristretto as Ciphersuite>::F {\n    match self {\n      TendermintTx::SlashEvidence(_) => panic!(\"sig_hash called on slash evidence transaction\"),\n    }\n  }\n\n  fn verify(&self) -> Result<(), TransactionError> {\n    Ok(())\n  }\n}\n\npub(crate) fn verify_tendermint_tx<N: Network>(\n  tx: &TendermintTx,\n  schema: &N::SignatureScheme,\n  commit: impl Fn(u64) -> Option<Commit<N::SignatureScheme>>,\n) -> Result<(), TransactionError> {\n  tx.verify()?;\n\n  match tx {\n    TendermintTx::SlashEvidence(ev) => verify_tendermint_evidence::<N>(ev, schema, commit)\n      .map_err(|_| TransactionError::InvalidContent)?,\n  }\n\n  Ok(())\n}\n"
  },
  {
    "path": "coordinator/tributary/src/tests/block.rs",
    "content": "use std::{sync::Arc, io, collections::HashMap, fmt::Debug};\n\nuse blake2::{Digest, Blake2s256};\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::{\n  group::{ff::Field, Group},\n  Ciphersuite,\n};\nuse schnorr::SchnorrSignature;\n\nuse serai_db::MemDb;\nuse tendermint::ext::Commit;\n\nuse crate::{\n  ReadWrite, BlockError, Block, Transaction,\n  tests::p2p::DummyP2p,\n  transaction::{TransactionError, Signed, TransactionKind, Transaction as TransactionTrait},\n  tendermint::{TendermintNetwork, Validators},\n};\n\ntype N = TendermintNetwork<MemDb, NonceTransaction, DummyP2p>;\n\n// A transaction solely defined by its nonce and a distinguisher (to allow creating distinct TXs\n// sharing a nonce).\n#[derive(Clone, PartialEq, Eq, Debug)]\nstruct NonceTransaction(u32, u8, Signed);\n\nimpl NonceTransaction {\n  fn new(nonce: u32, distinguisher: u8) -> Self {\n    NonceTransaction(\n      nonce,\n      distinguisher,\n      Signed {\n        signer: <Ristretto as Ciphersuite>::G::identity(),\n        nonce,\n        signature: SchnorrSignature::<Ristretto> {\n          R: <Ristretto as Ciphersuite>::G::identity(),\n          s: <Ristretto as Ciphersuite>::F::ZERO,\n        },\n      },\n    )\n  }\n}\n\nimpl ReadWrite for NonceTransaction {\n  fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {\n    let mut nonce = [0; 4];\n    reader.read_exact(&mut nonce)?;\n    let nonce = u32::from_le_bytes(nonce);\n\n    let mut distinguisher = [0];\n    reader.read_exact(&mut distinguisher)?;\n\n    Ok(NonceTransaction::new(nonce, distinguisher[0]))\n  }\n\n  fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {\n    writer.write_all(&self.0.to_le_bytes())?;\n    writer.write_all(&[self.1])\n  }\n}\n\nimpl TransactionTrait for NonceTransaction {\n  fn kind(&self) -> TransactionKind<'_> {\n    TransactionKind::Signed(vec![], &self.2)\n  }\n\n  fn hash(&self) -> [u8; 32] {\n    Blake2s256::digest([self.0.to_le_bytes().as_ref(), &[self.1]].concat()).into()\n  }\n\n  fn verify(&self) -> Result<(), TransactionError> {\n    Ok(())\n  }\n}\n\n#[test]\nfn empty_block() {\n  const GENESIS: [u8; 32] = [0xff; 32];\n  const LAST: [u8; 32] = [0x01; 32];\n  let validators = Arc::new(Validators::new(GENESIS, vec![]).unwrap());\n  let commit = |_: u64| -> Option<Commit<Arc<Validators>>> {\n    Some(Commit::<Arc<Validators>> { end_time: 0, validators: vec![], signature: vec![] })\n  };\n  let provided_or_unsigned_in_chain = |_: [u8; 32]| false;\n  Block::<NonceTransaction>::new(LAST, vec![], vec![])\n    .verify::<N, _>(\n      GENESIS,\n      LAST,\n      HashMap::new(),\n      &mut |_, _| None,\n      &validators,\n      commit,\n      provided_or_unsigned_in_chain,\n      false,\n    )\n    .unwrap();\n}\n\n#[test]\nfn duplicate_nonces() {\n  const GENESIS: [u8; 32] = [0xff; 32];\n  const LAST: [u8; 32] = [0x01; 32];\n\n  let validators = Arc::new(Validators::new(GENESIS, vec![]).unwrap());\n\n  // Run once without duplicating a nonce, and once with, so that's confirmed to be the faulty\n  // component\n  for i in [1, 0] {\n    let mut mempool = vec![];\n    let mut insert = |tx: NonceTransaction| mempool.push(Transaction::Application(tx));\n    insert(NonceTransaction::new(0, 0));\n    insert(NonceTransaction::new(i, 1));\n\n    let commit = |_: u64| -> Option<Commit<Arc<Validators>>> {\n      Some(Commit::<Arc<Validators>> { end_time: 0, validators: vec![], signature: vec![] })\n    };\n    let provided_or_unsigned_in_chain = |_: [u8; 32]| false;\n\n    let mut last_nonce = 0;\n    let res = Block::new(LAST, vec![], mempool).verify::<N, _>(\n      GENESIS,\n      LAST,\n      HashMap::new(),\n      &mut |_, _| {\n        let res = last_nonce;\n        last_nonce += 1;\n        Some(res)\n      },\n      &validators,\n      commit,\n      provided_or_unsigned_in_chain,\n      false,\n    );\n    if i == 1 {\n      res.unwrap();\n    } else {\n      assert_eq!(res, Err(BlockError::TransactionError(TransactionError::InvalidNonce)));\n    }\n  }\n}\n"
  },
  {
    "path": "coordinator/tributary/src/tests/blockchain.rs",
    "content": "use core::ops::Deref;\nuse std::{\n  collections::{VecDeque, HashMap},\n  sync::Arc,\n  io,\n};\n\nuse zeroize::Zeroizing;\nuse rand::rngs::OsRng;\n\nuse blake2::{Digest, Blake2s256};\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::{group::ff::Field, Ciphersuite};\n\nuse serai_db::{DbTxn, Db, MemDb};\n\nuse crate::{\n  ReadWrite, TransactionKind,\n  transaction::Transaction as TransactionTrait,\n  TransactionError, Transaction, ProvidedError, ProvidedTransactions, merkle, BlockError, Block,\n  Blockchain,\n  tendermint::{TendermintNetwork, Validators, Signer, TendermintBlock},\n  tests::{\n    ProvidedTransaction, SignedTransaction, random_provided_transaction, p2p::DummyP2p,\n    new_genesis, random_evidence_tx,\n  },\n};\n\ntype N = TendermintNetwork<MemDb, SignedTransaction, DummyP2p>;\n\nfn new_blockchain<T: TransactionTrait>(\n  genesis: [u8; 32],\n  participants: &[<Ristretto as Ciphersuite>::G],\n) -> (MemDb, Blockchain<MemDb, T>) {\n  let db = MemDb::new();\n  let blockchain = Blockchain::new(db.clone(), genesis, participants);\n  assert_eq!(blockchain.tip(), genesis);\n  assert_eq!(blockchain.block_number(), 0);\n  (db, blockchain)\n}\n\n#[test]\nfn block_addition() {\n  let genesis = new_genesis();\n  let validators = Arc::new(Validators::new(genesis, vec![]).unwrap());\n  let (db, mut blockchain) = new_blockchain::<SignedTransaction>(genesis, &[]);\n  let block = blockchain.build_block::<N>(&validators);\n\n  assert_eq!(block.header.parent, genesis);\n  assert_eq!(block.header.transactions, [0; 32]);\n  blockchain.verify_block::<N>(&block, &validators, false).unwrap();\n  assert!(blockchain.add_block::<N>(&block, vec![], &validators).is_ok());\n  assert_eq!(blockchain.tip(), block.hash());\n  assert_eq!(blockchain.block_number(), 1);\n  assert_eq!(\n    Blockchain::<MemDb, SignedTransaction>::block_after(&db, genesis, &block.parent()).unwrap(),\n    block.hash()\n  );\n}\n\n#[test]\nfn invalid_block() {\n  let genesis = new_genesis();\n  let validators = Arc::new(Validators::new(genesis, vec![]).unwrap());\n  let (_, mut blockchain) = new_blockchain::<SignedTransaction>(genesis, &[]);\n\n  let block = blockchain.build_block::<N>(&validators);\n\n  // Mutate parent\n  {\n    #[allow(clippy::redundant_clone)] // False positive\n    let mut block = block.clone();\n    block.header.parent = Blake2s256::digest(block.header.parent).into();\n    assert!(blockchain.verify_block::<N>(&block, &validators, false).is_err());\n  }\n\n  // Mutate transactions merkle\n  {\n    let mut block = block;\n    block.header.transactions = Blake2s256::digest(block.header.transactions).into();\n    assert!(blockchain.verify_block::<N>(&block, &validators, false).is_err());\n  }\n\n  let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng));\n  let tx = crate::tests::signed_transaction(&mut OsRng, genesis, &key, 0);\n\n  // Not a participant\n  {\n    // Manually create the block to bypass build_block's checks\n    let block = Block::new(blockchain.tip(), vec![], vec![Transaction::Application(tx.clone())]);\n    assert_eq!(block.header.transactions, merkle(&[tx.hash()]));\n    assert!(blockchain.verify_block::<N>(&block, &validators, false).is_err());\n  }\n\n  // Run the rest of the tests with them as a participant\n  let (_, blockchain) = new_blockchain(genesis, &[tx.1.signer]);\n\n  // Re-run the not a participant block to make sure it now works\n  {\n    let block = Block::new(blockchain.tip(), vec![], vec![Transaction::Application(tx.clone())]);\n    assert_eq!(block.header.transactions, merkle(&[tx.hash()]));\n    blockchain.verify_block::<N>(&block, &validators, false).unwrap();\n  }\n\n  {\n    // Add a valid transaction\n    let (_, mut blockchain) = new_blockchain(genesis, &[tx.1.signer]);\n    blockchain\n      .add_transaction::<N>(true, Transaction::Application(tx.clone()), &validators)\n      .unwrap();\n    let mut block = blockchain.build_block::<N>(&validators);\n    assert_eq!(block.header.transactions, merkle(&[tx.hash()]));\n    blockchain.verify_block::<N>(&block, &validators, false).unwrap();\n\n    // And verify mutating the transactions merkle now causes a failure\n    block.header.transactions = merkle(&[]);\n    assert!(blockchain.verify_block::<N>(&block, &validators, false).is_err());\n  }\n\n  {\n    // Invalid nonce\n    let tx = crate::tests::signed_transaction(&mut OsRng, genesis, &key, 5);\n    // Manually create the block to bypass build_block's checks\n    let block = Block::new(blockchain.tip(), vec![], vec![Transaction::Application(tx)]);\n    assert!(blockchain.verify_block::<N>(&block, &validators, false).is_err());\n  }\n\n  {\n    // Invalid signature\n    let (_, mut blockchain) = new_blockchain(genesis, &[tx.1.signer]);\n    blockchain.add_transaction::<N>(true, Transaction::Application(tx), &validators).unwrap();\n    let mut block = blockchain.build_block::<N>(&validators);\n    blockchain.verify_block::<N>(&block, &validators, false).unwrap();\n    match &mut block.transactions[0] {\n      Transaction::Application(tx) => {\n        tx.1.signature.s += <Ristretto as Ciphersuite>::F::ONE;\n      }\n      _ => panic!(\"non-signed tx found\"),\n    }\n    assert!(blockchain.verify_block::<N>(&block, &validators, false).is_err());\n\n    // Make sure this isn't because the merkle changed due to the transaction hash including the\n    // signature (which it explicitly isn't allowed to anyways)\n    assert_eq!(block.header.transactions, merkle(&[block.transactions[0].hash()]));\n  }\n}\n\n#[test]\nfn signed_transaction() {\n  let genesis = new_genesis();\n  let validators = Arc::new(Validators::new(genesis, vec![]).unwrap());\n  let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng));\n  let tx = crate::tests::signed_transaction(&mut OsRng, genesis, &key, 0);\n  let signer = tx.1.signer;\n\n  let (_, mut blockchain) = new_blockchain::<SignedTransaction>(genesis, &[signer]);\n  assert_eq!(blockchain.next_nonce(&signer, &[]), Some(0));\n\n  let test = |blockchain: &mut Blockchain<MemDb, SignedTransaction>,\n              mempool: Vec<Transaction<SignedTransaction>>| {\n    let tip = blockchain.tip();\n    for tx in mempool.clone() {\n      let Transaction::Application(tx) = tx else {\n        panic!(\"tendermint tx found\");\n      };\n      let next_nonce = blockchain.next_nonce(&signer, &[]).unwrap();\n      blockchain.add_transaction::<N>(true, Transaction::Application(tx), &validators).unwrap();\n      assert_eq!(next_nonce + 1, blockchain.next_nonce(&signer, &[]).unwrap());\n    }\n    let block = blockchain.build_block::<N>(&validators);\n    assert_eq!(block, Block::new(blockchain.tip(), vec![], mempool.clone()));\n    assert_eq!(blockchain.tip(), tip);\n    assert_eq!(block.header.parent, tip);\n\n    // Make sure all transactions were included\n    assert_eq!(block.transactions, mempool);\n    // Make sure the merkle was correct\n    assert_eq!(\n      block.header.transactions,\n      merkle(&mempool.iter().map(Transaction::hash).collect::<Vec<_>>())\n    );\n\n    // Verify and add the block\n    blockchain.verify_block::<N>(&block, &validators, false).unwrap();\n    assert!(blockchain.add_block::<N>(&block, vec![], &validators).is_ok());\n    assert_eq!(blockchain.tip(), block.hash());\n  };\n\n  // Test with a single nonce\n  test(&mut blockchain, vec![Transaction::Application(tx)]);\n  assert_eq!(blockchain.next_nonce(&signer, &[]), Some(1));\n\n  // Test with a flood of nonces\n  let mut mempool = vec![];\n  for nonce in 1 .. 64 {\n    mempool.push(Transaction::Application(crate::tests::signed_transaction(\n      &mut OsRng, genesis, &key, nonce,\n    )));\n  }\n  test(&mut blockchain, mempool);\n  assert_eq!(blockchain.next_nonce(&signer, &[]), Some(64));\n}\n\n#[test]\nfn provided_transaction() {\n  let genesis = new_genesis();\n  let validators = Arc::new(Validators::new(genesis, vec![]).unwrap());\n  let (db, mut blockchain) = new_blockchain::<ProvidedTransaction>(genesis, &[]);\n\n  let tx = random_provided_transaction(&mut OsRng, \"order1\");\n\n  // This should be providable\n  let mut temp_db = MemDb::new();\n  let mut txs = ProvidedTransactions::<_, ProvidedTransaction>::new(temp_db.clone(), genesis);\n  txs.provide(tx.clone()).unwrap();\n  assert_eq!(txs.provide(tx.clone()), Err(ProvidedError::AlreadyProvided));\n  assert_eq!(\n    ProvidedTransactions::<_, ProvidedTransaction>::new(temp_db.clone(), genesis).transactions,\n    HashMap::from([(\"order1\", VecDeque::from([tx.clone()]))]),\n  );\n  let mut txn = temp_db.txn();\n  txs.complete(&mut txn, \"order1\", [0u8; 32], tx.hash());\n  txn.commit();\n  assert!(ProvidedTransactions::<_, ProvidedTransaction>::new(db.clone(), genesis)\n    .transactions\n    .is_empty());\n\n  // case we have the block's provided txs in our local as well\n  {\n    // Non-provided transactions should fail verification because we don't have them locally.\n    let block = Block::new(blockchain.tip(), vec![tx.clone()], vec![]);\n    assert!(blockchain.verify_block::<N>(&block, &validators, false).is_err());\n\n    // Provided transactions should pass verification\n    blockchain.provide_transaction(tx.clone()).unwrap();\n    blockchain.verify_block::<N>(&block, &validators, false).unwrap();\n\n    // add_block should work for verified blocks\n    assert!(blockchain.add_block::<N>(&block, vec![], &validators).is_ok());\n\n    let block = Block::new(blockchain.tip(), vec![tx.clone()], vec![]);\n\n    // The provided transaction should no longer considered provided but added to chain,\n    // causing this error\n    assert_eq!(\n      blockchain.verify_block::<N>(&block, &validators, false),\n      Err(BlockError::ProvidedAlreadyIncluded)\n    );\n  }\n\n  // case we don't have the block's provided txs in our local\n  {\n    let tx1 = random_provided_transaction(&mut OsRng, \"order1\");\n    let tx2 = random_provided_transaction(&mut OsRng, \"order1\");\n    let tx3 = random_provided_transaction(&mut OsRng, \"order2\");\n    let tx4 = random_provided_transaction(&mut OsRng, \"order2\");\n\n    // add_block DOES NOT fail for unverified provided transactions if told to add them,\n    // since now we can have them later.\n    let block1 = Block::new(blockchain.tip(), vec![tx1.clone(), tx3.clone()], vec![]);\n    assert!(blockchain.add_block::<N>(&block1, vec![], &validators).is_ok());\n\n    // in fact, we can have many blocks that have provided txs that we don't have locally.\n    let block2 = Block::new(blockchain.tip(), vec![tx2.clone(), tx4.clone()], vec![]);\n    assert!(blockchain.add_block::<N>(&block2, vec![], &validators).is_ok());\n\n    // make sure we won't return ok for the block before we actually got the txs\n    let TransactionKind::Provided(order) = tx1.kind() else { panic!(\"tx wasn't provided\") };\n    assert!(!Blockchain::<MemDb, ProvidedTransaction>::locally_provided_txs_in_block(\n      &db,\n      &genesis,\n      &block1.hash(),\n      order\n    ));\n    // provide the first tx\n    blockchain.provide_transaction(tx1).unwrap();\n    // it should be ok for this order now, since the second tx has different order.\n    assert!(Blockchain::<MemDb, ProvidedTransaction>::locally_provided_txs_in_block(\n      &db,\n      &genesis,\n      &block1.hash(),\n      order\n    ));\n\n    // give the second tx\n    let TransactionKind::Provided(order) = tx3.kind() else { panic!(\"tx wasn't provided\") };\n    assert!(!Blockchain::<MemDb, ProvidedTransaction>::locally_provided_txs_in_block(\n      &db,\n      &genesis,\n      &block1.hash(),\n      order\n    ));\n    blockchain.provide_transaction(tx3).unwrap();\n    // it should be ok now for the first block\n    assert!(Blockchain::<MemDb, ProvidedTransaction>::locally_provided_txs_in_block(\n      &db,\n      &genesis,\n      &block1.hash(),\n      order\n    ));\n\n    // provide the second block txs\n    let TransactionKind::Provided(order) = tx4.kind() else { panic!(\"tx wasn't provided\") };\n    // not ok yet\n    assert!(!Blockchain::<MemDb, ProvidedTransaction>::locally_provided_txs_in_block(\n      &db,\n      &genesis,\n      &block2.hash(),\n      order\n    ));\n    blockchain.provide_transaction(tx4).unwrap();\n    // ok now\n    assert!(Blockchain::<MemDb, ProvidedTransaction>::locally_provided_txs_in_block(\n      &db,\n      &genesis,\n      &block2.hash(),\n      order\n    ));\n\n    // provide the second block txs\n    let TransactionKind::Provided(order) = tx2.kind() else { panic!(\"tx wasn't provided\") };\n    assert!(!Blockchain::<MemDb, ProvidedTransaction>::locally_provided_txs_in_block(\n      &db,\n      &genesis,\n      &block2.hash(),\n      order\n    ));\n    blockchain.provide_transaction(tx2).unwrap();\n    assert!(Blockchain::<MemDb, ProvidedTransaction>::locally_provided_txs_in_block(\n      &db,\n      &genesis,\n      &block2.hash(),\n      order\n    ));\n  }\n}\n\n#[tokio::test]\nasync fn tendermint_evidence_tx() {\n  let genesis = new_genesis();\n  let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng));\n  let signer = Signer::new(genesis, key.clone());\n  let signer_id = Ristretto::generator() * key.deref();\n  let validators = Arc::new(Validators::new(genesis, vec![(signer_id, 1)]).unwrap());\n\n  let (_, mut blockchain) = new_blockchain::<SignedTransaction>(genesis, &[]);\n\n  let test = |blockchain: &mut Blockchain<MemDb, SignedTransaction>,\n              mempool: Vec<Transaction<SignedTransaction>>,\n              validators: Arc<Validators>| {\n    let tip = blockchain.tip();\n    for tx in mempool.clone() {\n      let Transaction::Tendermint(tx) = tx else {\n        panic!(\"non-tendermint tx found\");\n      };\n      blockchain.add_transaction::<N>(true, Transaction::Tendermint(tx), &validators).unwrap();\n    }\n    let block = blockchain.build_block::<N>(&validators);\n    assert_eq!(blockchain.tip(), tip);\n    assert_eq!(block.header.parent, tip);\n\n    // Make sure all transactions were included\n    for bt in &block.transactions {\n      assert!(mempool.contains(bt));\n    }\n\n    // Verify and add the block\n    blockchain.verify_block::<N>(&block, &validators, false).unwrap();\n    assert!(blockchain.add_block::<N>(&block, vec![], &validators).is_ok());\n    assert_eq!(blockchain.tip(), block.hash());\n  };\n\n  // test with single tx\n  let tx = random_evidence_tx::<N>(signer.into(), TendermintBlock(vec![0x12])).await;\n  test(&mut blockchain, vec![Transaction::Tendermint(tx)], validators);\n\n  // test with multiple txs\n  let mut mempool: Vec<Transaction<SignedTransaction>> = vec![];\n  let mut signers = vec![];\n  for _ in 0 .. 5 {\n    let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng));\n    let signer = Signer::new(genesis, key.clone());\n    let signer_id = Ristretto::generator() * key.deref();\n    signers.push((signer_id, 1));\n    mempool.push(Transaction::Tendermint(\n      random_evidence_tx::<N>(signer.into(), TendermintBlock(vec![0x12])).await,\n    ));\n  }\n\n  // update validators\n  let validators = Arc::new(Validators::new(genesis, signers).unwrap());\n  test(&mut blockchain, mempool, validators);\n}\n\n#[tokio::test]\nasync fn block_tx_ordering() {\n  #[derive(Debug, PartialEq, Eq, Clone)]\n  enum SignedTx {\n    Signed(Box<SignedTransaction>),\n    Provided(Box<ProvidedTransaction>),\n  }\n  impl ReadWrite for SignedTx {\n    fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {\n      let mut kind = [0];\n      reader.read_exact(&mut kind)?;\n      match kind[0] {\n        0 => Ok(SignedTx::Signed(Box::new(SignedTransaction::read(reader)?))),\n        1 => Ok(SignedTx::Provided(Box::new(ProvidedTransaction::read(reader)?))),\n        _ => Err(io::Error::other(\"invalid transaction type\")),\n      }\n    }\n\n    fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {\n      match self {\n        SignedTx::Signed(signed) => {\n          writer.write_all(&[0])?;\n          signed.write(writer)\n        }\n        SignedTx::Provided(pro) => {\n          writer.write_all(&[1])?;\n          pro.write(writer)\n        }\n      }\n    }\n  }\n\n  impl TransactionTrait for SignedTx {\n    fn kind(&self) -> TransactionKind<'_> {\n      match self {\n        SignedTx::Signed(signed) => signed.kind(),\n        SignedTx::Provided(pro) => pro.kind(),\n      }\n    }\n\n    fn hash(&self) -> [u8; 32] {\n      match self {\n        SignedTx::Signed(signed) => signed.hash(),\n        SignedTx::Provided(pro) => pro.hash(),\n      }\n    }\n\n    fn verify(&self) -> Result<(), TransactionError> {\n      Ok(())\n    }\n  }\n\n  let genesis = new_genesis();\n  let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng));\n\n  // signer\n  let signer = crate::tests::signed_transaction(&mut OsRng, genesis, &key, 0).1.signer;\n  let validators = Arc::new(Validators::new(genesis, vec![(signer, 1)]).unwrap());\n\n  let (_, mut blockchain) = new_blockchain::<SignedTx>(genesis, &[signer]);\n  let tip = blockchain.tip();\n\n  // add txs\n  let mut mempool = vec![];\n  let mut provided_txs = vec![];\n  for i in 0 .. 128 {\n    let signed_tx = Transaction::Application(SignedTx::Signed(Box::new(\n      crate::tests::signed_transaction(&mut OsRng, genesis, &key, i),\n    )));\n    blockchain.add_transaction::<N>(true, signed_tx.clone(), &validators).unwrap();\n    mempool.push(signed_tx);\n\n    let unsigned_tx = Transaction::Tendermint(\n      random_evidence_tx::<N>(\n        Signer::new(genesis, key.clone()).into(),\n        TendermintBlock(vec![u8::try_from(i).unwrap()]),\n      )\n      .await,\n    );\n    blockchain.add_transaction::<N>(true, unsigned_tx.clone(), &validators).unwrap();\n    mempool.push(unsigned_tx);\n\n    let provided_tx =\n      SignedTx::Provided(Box::new(random_provided_transaction(&mut OsRng, \"order1\")));\n    blockchain.provide_transaction(provided_tx.clone()).unwrap();\n    provided_txs.push(provided_tx);\n  }\n  let block = blockchain.build_block::<N>(&validators);\n\n  assert_eq!(blockchain.tip(), tip);\n  assert_eq!(block.header.parent, tip);\n\n  // Make sure all transactions were included\n  assert_eq!(block.transactions.len(), 3 * 128);\n  for bt in &block.transactions[128 ..] {\n    assert!(mempool.contains(bt));\n  }\n\n  // check the tx order\n  let txs = &block.transactions;\n  for tx in txs.iter().take(128) {\n    assert!(matches!(tx.kind(), TransactionKind::Provided(..)));\n  }\n  for tx in txs.iter().take(128).skip(128) {\n    assert!(matches!(tx.kind(), TransactionKind::Unsigned));\n  }\n  for tx in txs.iter().take(128).skip(256) {\n    assert!(matches!(tx.kind(), TransactionKind::Signed(..)));\n  }\n\n  // should be a valid block\n  blockchain.verify_block::<N>(&block, &validators, false).unwrap();\n\n  // Unsigned before Provided\n  {\n    let mut block = block.clone();\n    // Doesn't use swap to preserve the order of Provided, as that's checked before kind ordering\n    let unsigned = block.transactions.remove(128);\n    block.transactions.insert(0, unsigned);\n    assert_eq!(\n      blockchain.verify_block::<N>(&block, &validators, false).unwrap_err(),\n      BlockError::WrongTransactionOrder\n    );\n  }\n\n  // Signed before Provided\n  {\n    let mut block = block.clone();\n    let signed = block.transactions.remove(256);\n    block.transactions.insert(0, signed);\n    assert_eq!(\n      blockchain.verify_block::<N>(&block, &validators, false).unwrap_err(),\n      BlockError::WrongTransactionOrder\n    );\n  }\n\n  // Signed before Unsigned\n  {\n    let mut block = block;\n    block.transactions.swap(128, 256);\n    assert_eq!(\n      blockchain.verify_block::<N>(&block, &validators, false).unwrap_err(),\n      BlockError::WrongTransactionOrder\n    );\n  }\n}\n"
  },
  {
    "path": "coordinator/tributary/src/tests/mempool.rs",
    "content": "use std::{sync::Arc, collections::HashMap};\n\nuse zeroize::Zeroizing;\nuse rand::{RngCore, rngs::OsRng};\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::{group::ff::Field, Ciphersuite};\n\nuse tendermint::ext::Commit;\n\nuse serai_db::MemDb;\n\nuse crate::{\n  transaction::{TransactionError, Transaction as TransactionTrait},\n  tendermint::{TendermintBlock, Validators, Signer, TendermintNetwork},\n  ACCOUNT_MEMPOOL_LIMIT, Transaction, Mempool,\n  tests::{SignedTransaction, signed_transaction, p2p::DummyP2p, random_evidence_tx},\n};\n\ntype N = TendermintNetwork<MemDb, SignedTransaction, DummyP2p>;\n\nfn new_mempool<T: TransactionTrait>() -> ([u8; 32], MemDb, Mempool<MemDb, T>) {\n  let mut genesis = [0; 32];\n  OsRng.fill_bytes(&mut genesis);\n  let db = MemDb::new();\n  (genesis, db.clone(), Mempool::new(db, genesis))\n}\n\n#[tokio::test]\nasync fn mempool_addition() {\n  let (genesis, db, mut mempool) = new_mempool::<SignedTransaction>();\n  let commit = |_: u64| -> Option<Commit<Arc<Validators>>> {\n    Some(Commit::<Arc<Validators>> { end_time: 0, validators: vec![], signature: vec![] })\n  };\n  let unsigned_in_chain = |_: [u8; 32]| false;\n  let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng));\n\n  let first_tx = signed_transaction(&mut OsRng, genesis, &key, 0);\n  let signer = first_tx.1.signer;\n  assert_eq!(mempool.next_nonce_in_mempool(&signer, vec![]), None);\n\n  // validators\n  let validators = Arc::new(Validators::new(genesis, vec![(signer, 1)]).unwrap());\n\n  // Add TX 0\n  assert!(mempool\n    .add::<N, _>(\n      &|_, _| Some(0),\n      true,\n      Transaction::Application(first_tx.clone()),\n      &validators,\n      unsigned_in_chain,\n      commit,\n    )\n    .unwrap());\n  assert_eq!(mempool.next_nonce_in_mempool(&signer, vec![]), Some(1));\n\n  // add a tendermint evidence tx\n  let evidence_tx =\n    random_evidence_tx::<N>(Signer::new(genesis, key.clone()).into(), TendermintBlock(vec![]))\n      .await;\n  assert!(mempool\n    .add::<N, _>(\n      &|_, _| None,\n      true,\n      Transaction::Tendermint(evidence_tx.clone()),\n      &validators,\n      unsigned_in_chain,\n      commit,\n    )\n    .unwrap());\n\n  // Test reloading works\n  assert_eq!(mempool, Mempool::new(db, genesis));\n\n  // Adding them again should fail\n  assert_eq!(\n    mempool.add::<N, _>(\n      &|_, _| Some(0),\n      true,\n      Transaction::Application(first_tx.clone()),\n      &validators,\n      unsigned_in_chain,\n      commit,\n    ),\n    Err(TransactionError::InvalidNonce)\n  );\n  assert_eq!(\n    mempool.add::<N, _>(\n      &|_, _| None,\n      true,\n      Transaction::Tendermint(evidence_tx.clone()),\n      &validators,\n      unsigned_in_chain,\n      commit,\n    ),\n    Ok(false)\n  );\n\n  // Do the same with the next nonce\n  let second_tx = signed_transaction(&mut OsRng, genesis, &key, 1);\n  assert_eq!(\n    mempool.add::<N, _>(\n      &|_, _| Some(0),\n      true,\n      Transaction::Application(second_tx.clone()),\n      &validators,\n      unsigned_in_chain,\n      commit,\n    ),\n    Ok(true)\n  );\n  assert_eq!(mempool.next_nonce_in_mempool(&signer, vec![]), Some(2));\n  assert_eq!(\n    mempool.add::<N, _>(\n      &|_, _| Some(0),\n      true,\n      Transaction::Application(second_tx.clone()),\n      &validators,\n      unsigned_in_chain,\n      commit,\n    ),\n    Err(TransactionError::InvalidNonce)\n  );\n\n  // If the mempool doesn't have a nonce for an account, it should successfully use the\n  // blockchain's\n  let second_key = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng));\n  let tx = signed_transaction(&mut OsRng, genesis, &second_key, 2);\n  let second_signer = tx.1.signer;\n  assert_eq!(mempool.next_nonce_in_mempool(&second_signer, vec![]), None);\n  assert!(mempool\n    .add::<N, _>(\n      &|_, _| Some(2),\n      true,\n      Transaction::Application(tx.clone()),\n      &validators,\n      unsigned_in_chain,\n      commit\n    )\n    .unwrap());\n  assert_eq!(mempool.next_nonce_in_mempool(&second_signer, vec![]), Some(3));\n\n  // Getting a block should work\n  assert_eq!(mempool.block().len(), 4);\n\n  // Removing should successfully prune\n  mempool.remove(&tx.hash());\n\n  assert_eq!(\n    mempool.txs(),\n    &HashMap::from([\n      (first_tx.hash(), Transaction::Application(first_tx)),\n      (second_tx.hash(), Transaction::Application(second_tx)),\n      (evidence_tx.hash(), Transaction::Tendermint(evidence_tx))\n    ])\n  );\n}\n\n#[test]\nfn too_many_mempool() {\n  let (genesis, _, mut mempool) = new_mempool::<SignedTransaction>();\n  let validators = Arc::new(Validators::new(genesis, vec![]).unwrap());\n  let commit = |_: u64| -> Option<Commit<Arc<Validators>>> {\n    Some(Commit::<Arc<Validators>> { end_time: 0, validators: vec![], signature: vec![] })\n  };\n  let unsigned_in_chain = |_: [u8; 32]| false;\n  let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng));\n\n  // We should be able to add transactions up to the limit\n  for i in 0 .. ACCOUNT_MEMPOOL_LIMIT {\n    assert!(mempool\n      .add::<N, _>(\n        &|_, _| Some(0),\n        false,\n        Transaction::Application(signed_transaction(&mut OsRng, genesis, &key, i)),\n        &validators,\n        unsigned_in_chain,\n        commit,\n      )\n      .unwrap());\n  }\n  // Yet adding more should fail\n  assert_eq!(\n    mempool.add::<N, _>(\n      &|_, _| Some(0),\n      false,\n      Transaction::Application(signed_transaction(\n        &mut OsRng,\n        genesis,\n        &key,\n        ACCOUNT_MEMPOOL_LIMIT\n      )),\n      &validators,\n      unsigned_in_chain,\n      commit,\n    ),\n    Err(TransactionError::TooManyInMempool)\n  );\n}\n"
  },
  {
    "path": "coordinator/tributary/src/tests/merkle.rs",
    "content": "use std::collections::HashSet;\n\nuse rand::{RngCore, rngs::OsRng};\n\n#[test]\nfn merkle() {\n  let mut used = HashSet::new();\n  // Test this produces a unique root\n  let mut test = |hashes: &[[u8; 32]]| {\n    let hash = crate::merkle(hashes);\n    assert!(!used.contains(&hash));\n    used.insert(hash);\n  };\n\n  // Zero should be a special case which return 0\n  assert_eq!(crate::merkle(&[]), [0; 32]);\n  test(&[]);\n\n  let mut one = [0; 32];\n  OsRng.fill_bytes(&mut one);\n  let mut two = [0; 32];\n  OsRng.fill_bytes(&mut two);\n  let mut three = [0; 32];\n  OsRng.fill_bytes(&mut three);\n\n  // Make sure it's deterministic\n  assert_eq!(crate::merkle(&[one]), crate::merkle(&[one]));\n\n  // Test a few basic structures\n  test(&[one]);\n  test(&[one, two]);\n  test(&[one, two, three]);\n  test(&[one, three]);\n}\n"
  },
  {
    "path": "coordinator/tributary/src/tests/mod.rs",
    "content": "#[cfg(test)]\nmod tendermint;\n\nmod transaction;\npub use transaction::*;\n\n#[cfg(test)]\nmod merkle;\n\n#[cfg(test)]\nmod block;\n#[cfg(test)]\nmod blockchain;\n#[cfg(test)]\nmod mempool;\n#[cfg(test)]\nmod p2p;\n"
  },
  {
    "path": "coordinator/tributary/src/tests/p2p.rs",
    "content": "pub use crate::P2p;\n\n#[derive(Clone, Debug)]\npub struct DummyP2p;\n\n#[async_trait::async_trait]\nimpl P2p for DummyP2p {\n  async fn broadcast(&self, _: [u8; 32], _: Vec<u8>) {\n    unimplemented!()\n  }\n}\n"
  },
  {
    "path": "coordinator/tributary/src/tests/tendermint.rs",
    "content": "use tendermint::ext::Network;\nuse crate::{\n  P2p, TendermintTx,\n  tendermint::{TARGET_BLOCK_TIME, TendermintNetwork},\n};\n\n#[test]\nfn assert_target_block_time() {\n  use serai_db::MemDb;\n\n  #[derive(Clone, Debug)]\n  pub struct DummyP2p;\n\n  #[async_trait::async_trait]\n  impl P2p for DummyP2p {\n    async fn broadcast(&self, _: [u8; 32], _: Vec<u8>) {\n      unimplemented!()\n    }\n  }\n\n  // Type paremeters don't matter here since we only need to call the block_time()\n  // and it only relies on the constants of the trait implementation. block_time() is in seconds,\n  // TARGET_BLOCK_TIME is in milliseconds.\n  assert_eq!(\n    <TendermintNetwork<MemDb, TendermintTx, DummyP2p> as Network>::block_time(),\n    TARGET_BLOCK_TIME / 1000\n  )\n}\n"
  },
  {
    "path": "coordinator/tributary/src/tests/transaction/mod.rs",
    "content": "use core::ops::Deref;\nuse std::{sync::Arc, io};\n\nuse zeroize::Zeroizing;\nuse rand::{RngCore, CryptoRng, rngs::OsRng};\n\nuse blake2::{Digest, Blake2s256};\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::{\n  group::{ff::Field, Group},\n  Ciphersuite,\n};\nuse schnorr::SchnorrSignature;\n\nuse scale::Encode;\n\nuse ::tendermint::{\n  ext::{Network, Signer as SignerTrait, SignatureScheme, BlockNumber, RoundNumber},\n  SignedMessageFor, DataFor, Message, SignedMessage, Data, Evidence,\n};\n\nuse crate::{\n  transaction::{Signed, TransactionError, TransactionKind, Transaction, verify_transaction},\n  ReadWrite,\n  tendermint::{tx::TendermintTx, Validators, Signer},\n};\n\n#[cfg(test)]\nmod signed;\n\n#[cfg(test)]\nmod tendermint;\n\npub fn random_signed<R: RngCore + CryptoRng>(rng: &mut R) -> Signed {\n  Signed {\n    signer: <Ristretto as Ciphersuite>::G::random(&mut *rng),\n    nonce: u32::try_from(rng.next_u64() >> 32 >> 1).unwrap(),\n    signature: SchnorrSignature::<Ristretto> {\n      R: <Ristretto as Ciphersuite>::G::random(&mut *rng),\n      s: <Ristretto as Ciphersuite>::F::random(rng),\n    },\n  }\n}\n\npub fn random_signed_with_nonce<R: RngCore + CryptoRng>(rng: &mut R, nonce: u32) -> Signed {\n  let mut signed = random_signed(rng);\n  signed.nonce = nonce;\n  signed\n}\n\n#[derive(Clone, PartialEq, Eq, Debug)]\npub struct ProvidedTransaction(pub Vec<u8>);\n\nimpl ReadWrite for ProvidedTransaction {\n  fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {\n    let mut len = [0; 4];\n    reader.read_exact(&mut len)?;\n    let mut data = vec![0; usize::try_from(u32::from_le_bytes(len)).unwrap()];\n    reader.read_exact(&mut data)?;\n    Ok(ProvidedTransaction(data))\n  }\n\n  fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {\n    writer.write_all(&u32::try_from(self.0.len()).unwrap().to_le_bytes())?;\n    writer.write_all(&self.0)\n  }\n}\n\nimpl Transaction for ProvidedTransaction {\n  fn kind(&self) -> TransactionKind<'_> {\n    match self.0[0] {\n      1 => TransactionKind::Provided(\"order1\"),\n      2 => TransactionKind::Provided(\"order2\"),\n      _ => panic!(\"unknown order\"),\n    }\n  }\n\n  fn hash(&self) -> [u8; 32] {\n    Blake2s256::digest(self.serialize()).into()\n  }\n\n  fn verify(&self) -> Result<(), TransactionError> {\n    Ok(())\n  }\n}\n\npub fn random_provided_transaction<R: RngCore + CryptoRng>(\n  rng: &mut R,\n  order: &str,\n) -> ProvidedTransaction {\n  let mut data = vec![0; 512];\n  rng.fill_bytes(&mut data);\n  data[0] = match order {\n    \"order1\" => 1,\n    \"order2\" => 2,\n    _ => panic!(\"unknown order\"),\n  };\n  ProvidedTransaction(data)\n}\n\n#[derive(Clone, PartialEq, Eq, Debug)]\npub struct SignedTransaction(pub Vec<u8>, pub Signed);\n\nimpl ReadWrite for SignedTransaction {\n  fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {\n    let mut len = [0; 4];\n    reader.read_exact(&mut len)?;\n    let mut data = vec![0; usize::try_from(u32::from_le_bytes(len)).unwrap()];\n    reader.read_exact(&mut data)?;\n\n    Ok(SignedTransaction(data, Signed::read(reader)?))\n  }\n\n  fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {\n    writer.write_all(&u32::try_from(self.0.len()).unwrap().to_le_bytes())?;\n    writer.write_all(&self.0)?;\n    self.1.write(writer)\n  }\n}\n\nimpl Transaction for SignedTransaction {\n  fn kind(&self) -> TransactionKind<'_> {\n    TransactionKind::Signed(vec![], &self.1)\n  }\n\n  fn hash(&self) -> [u8; 32] {\n    let serialized = self.serialize();\n    Blake2s256::digest(&serialized[.. (serialized.len() - 64)]).into()\n  }\n\n  fn verify(&self) -> Result<(), TransactionError> {\n    Ok(())\n  }\n}\n\npub fn signed_transaction<R: RngCore + CryptoRng>(\n  rng: &mut R,\n  genesis: [u8; 32],\n  key: &Zeroizing<<Ristretto as Ciphersuite>::F>,\n  nonce: u32,\n) -> SignedTransaction {\n  let mut data = vec![0; 512];\n  rng.fill_bytes(&mut data);\n\n  let signer = <Ristretto as Ciphersuite>::generator() * **key;\n\n  let mut tx =\n    SignedTransaction(data, Signed { signer, nonce, signature: random_signed(rng).signature });\n\n  let sig_nonce = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(rng));\n  tx.1.signature.R = Ristretto::generator() * sig_nonce.deref();\n  tx.1.signature = SchnorrSignature::sign(key, sig_nonce, tx.sig_hash(genesis));\n\n  verify_transaction(&tx, genesis, &mut |_, _| Some(tx.1.nonce)).unwrap();\n\n  tx\n}\n\npub fn random_signed_transaction<R: RngCore + CryptoRng>(\n  rng: &mut R,\n) -> ([u8; 32], SignedTransaction) {\n  let mut genesis = [0; 32];\n  rng.fill_bytes(&mut genesis);\n\n  let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut *rng));\n  // Shift over an additional bit to ensure it won't overflow when incremented\n  let nonce = u32::try_from(rng.next_u64() >> 32 >> 1).unwrap();\n\n  (genesis, signed_transaction(rng, genesis, &key, nonce))\n}\n\npub fn new_genesis() -> [u8; 32] {\n  let mut genesis = [0; 32];\n  OsRng.fill_bytes(&mut genesis);\n  genesis\n}\n\npub async fn tendermint_meta() -> ([u8; 32], Signer, [u8; 32], Arc<Validators>) {\n  // signer\n  let genesis = new_genesis();\n  let signer =\n    Signer::new(genesis, Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng)));\n  let validator_id = signer.validator_id().await.unwrap();\n\n  // schema\n  let signer_pub =\n    <Ristretto as Ciphersuite>::read_G::<&[u8]>(&mut validator_id.as_slice()).unwrap();\n  let validators = Arc::new(Validators::new(genesis, vec![(signer_pub, 1)]).unwrap());\n\n  (genesis, signer, validator_id, validators)\n}\n\npub async fn signed_from_data<N: Network>(\n  signer: <N::SignatureScheme as SignatureScheme>::Signer,\n  signer_id: N::ValidatorId,\n  block_number: u64,\n  round_number: u32,\n  data: DataFor<N>,\n) -> SignedMessageFor<N> {\n  let msg = Message {\n    sender: signer_id,\n    block: BlockNumber(block_number),\n    round: RoundNumber(round_number),\n    data,\n  };\n  let sig = signer.sign(&msg.encode()).await;\n  SignedMessage { msg, sig }\n}\n\npub async fn random_evidence_tx<N: Network>(\n  signer: <N::SignatureScheme as SignatureScheme>::Signer,\n  b: N::Block,\n) -> TendermintTx {\n  // Creates a TX with an invalid valid round number\n  // TODO: Use a random failure reason\n  let data = Data::Proposal(Some(RoundNumber(0)), b);\n  let signer_id = signer.validator_id().await.unwrap();\n  let signed = signed_from_data::<N>(signer, signer_id, 0, 0, data).await;\n  TendermintTx::SlashEvidence(Evidence::InvalidValidRound(signed.encode()))\n}\n"
  },
  {
    "path": "coordinator/tributary/src/tests/transaction/signed.rs",
    "content": "use rand::rngs::OsRng;\n\nuse blake2::{Digest, Blake2s256};\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::{group::ff::Field, Ciphersuite};\n\nuse crate::{\n  ReadWrite,\n  transaction::{Signed, Transaction, verify_transaction},\n  tests::{random_signed, random_signed_transaction},\n};\n\n#[test]\nfn serialize_signed() {\n  let signed = random_signed(&mut rand::rngs::OsRng);\n  assert_eq!(Signed::read::<&[u8]>(&mut signed.serialize().as_ref()).unwrap(), signed);\n}\n\n#[test]\nfn sig_hash() {\n  let (genesis, tx1) = random_signed_transaction(&mut OsRng);\n  assert!(tx1.sig_hash(genesis) != tx1.sig_hash(Blake2s256::digest(genesis).into()));\n\n  let (_, tx2) = random_signed_transaction(&mut OsRng);\n  assert!(tx1.hash() != tx2.hash());\n  assert!(tx1.sig_hash(genesis) != tx2.sig_hash(genesis));\n}\n\n#[test]\nfn signed_transaction() {\n  let (genesis, tx) = random_signed_transaction(&mut OsRng);\n\n  // Mutate various properties and verify it no longer works\n\n  // Different genesis\n  assert!(verify_transaction(&tx, Blake2s256::digest(genesis).into(), &mut |_, _| Some(\n    tx.1.nonce\n  ))\n  .is_err());\n\n  // Different data\n  {\n    let mut tx = tx.clone();\n    tx.0 = Blake2s256::digest(tx.0).to_vec();\n    assert!(verify_transaction(&tx, genesis, &mut |_, _| Some(tx.1.nonce)).is_err());\n  }\n\n  // Different signer\n  {\n    let mut tx = tx.clone();\n    tx.1.signer += Ristretto::generator();\n    assert!(verify_transaction(&tx, genesis, &mut |_, _| Some(tx.1.nonce)).is_err());\n  }\n\n  // Different nonce\n  {\n    #[allow(clippy::redundant_clone)] // False positive?\n    let mut tx = tx.clone();\n    tx.1.nonce = tx.1.nonce.wrapping_add(1);\n    assert!(verify_transaction(&tx, genesis, &mut |_, _| Some(tx.1.nonce)).is_err());\n  }\n\n  // Different signature\n  {\n    let mut tx = tx.clone();\n    tx.1.signature.R += Ristretto::generator();\n    assert!(verify_transaction(&tx, genesis, &mut |_, _| Some(tx.1.nonce)).is_err());\n  }\n  {\n    let mut tx = tx.clone();\n    tx.1.signature.s += <Ristretto as Ciphersuite>::F::ONE;\n    assert!(verify_transaction(&tx, genesis, &mut |_, _| Some(tx.1.nonce)).is_err());\n  }\n\n  // Sanity check the original TX was never mutated and is valid\n  verify_transaction(&tx, genesis, &mut |_, _| Some(tx.1.nonce)).unwrap();\n}\n\n#[test]\nfn invalid_nonce() {\n  let (genesis, tx) = random_signed_transaction(&mut OsRng);\n\n  assert!(verify_transaction(&tx, genesis, &mut |_, _| Some(tx.1.nonce.wrapping_add(1)),).is_err());\n}\n"
  },
  {
    "path": "coordinator/tributary/src/tests/transaction/tendermint.rs",
    "content": "use std::sync::Arc;\n\nuse zeroize::Zeroizing;\nuse rand::{RngCore, rngs::OsRng};\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::{Ciphersuite, group::ff::Field};\n\nuse scale::Encode;\n\nuse tendermint::{\n  time::CanonicalInstant,\n  round::RoundData,\n  Data, commit_msg, Evidence,\n  ext::{RoundNumber, Commit, Signer as SignerTrait},\n};\n\nuse serai_db::MemDb;\n\nuse crate::{\n  ReadWrite,\n  tendermint::{\n    tx::{TendermintTx, verify_tendermint_tx},\n    TendermintBlock, Signer, Validators, TendermintNetwork,\n  },\n  tests::{\n    p2p::DummyP2p, SignedTransaction, random_evidence_tx, tendermint_meta, signed_from_data,\n  },\n};\n\ntype N = TendermintNetwork<MemDb, SignedTransaction, DummyP2p>;\n\n#[tokio::test]\nasync fn serialize_tendermint() {\n  // make a tendermint tx with random evidence\n  let (_, signer, _, _) = tendermint_meta().await;\n  let tx = random_evidence_tx::<N>(signer.into(), TendermintBlock(vec![])).await;\n  let res = TendermintTx::read::<&[u8]>(&mut tx.serialize().as_ref()).unwrap();\n  assert_eq!(res, tx);\n}\n\n#[tokio::test]\nasync fn invalid_valid_round() {\n  // signer\n  let (_, signer, signer_id, validators) = tendermint_meta().await;\n  let commit = |_: u64| -> Option<Commit<Arc<Validators>>> {\n    Some(Commit::<Arc<Validators>> { end_time: 0, validators: vec![], signature: vec![] })\n  };\n\n  let valid_round_tx = |valid_round| {\n    let signer = signer.clone();\n    async move {\n      let data = Data::Proposal(valid_round, TendermintBlock(vec![]));\n      let signed = signed_from_data::<N>(signer.clone().into(), signer_id, 0, 0, data).await;\n      (signed.clone(), TendermintTx::SlashEvidence(Evidence::InvalidValidRound(signed.encode())))\n    }\n  };\n\n  // This should be invalid evidence if a valid valid round is specified\n  let (_, tx) = valid_round_tx(None).await;\n  assert!(verify_tendermint_tx::<N>(&tx, &validators, commit).is_err());\n\n  // If an invalid valid round is specified (>= current), this should be invalid evidence\n  let (mut signed, tx) = valid_round_tx(Some(RoundNumber(0))).await;\n\n  // should pass\n  verify_tendermint_tx::<N>(&tx, &validators, commit).unwrap();\n\n  // change the signature\n  let mut random_sig = [0u8; 64];\n  OsRng.fill_bytes(&mut random_sig);\n  signed.sig = random_sig;\n  let tx = TendermintTx::SlashEvidence(Evidence::InvalidValidRound(signed.encode()));\n\n  // should fail\n  assert!(verify_tendermint_tx::<N>(&tx, &validators, commit).is_err());\n}\n\n#[tokio::test]\nasync fn invalid_precommit_signature() {\n  let (_, signer, signer_id, validators) = tendermint_meta().await;\n  let commit = |i: u64| -> Option<Commit<Arc<Validators>>> {\n    assert_eq!(i, 0);\n    Some(Commit::<Arc<Validators>> { end_time: 0, validators: vec![], signature: vec![] })\n  };\n\n  let precommit = |precommit| {\n    let signer = signer.clone();\n    async move {\n      let signed =\n        signed_from_data::<N>(signer.clone().into(), signer_id, 1, 0, Data::Precommit(precommit))\n          .await;\n      (signed.clone(), TendermintTx::SlashEvidence(Evidence::InvalidPrecommit(signed.encode())))\n    }\n  };\n\n  // Empty Precommit should fail.\n  assert!(verify_tendermint_tx::<N>(&precommit(None).await.1, &validators, commit).is_err());\n\n  // valid precommit signature should fail.\n  let block_id = [0x22u8; 32];\n  let last_end_time =\n    RoundData::<N>::new(RoundNumber(0), CanonicalInstant::new(commit(0).unwrap().end_time))\n      .end_time();\n  let commit_msg = commit_msg(last_end_time.canonical(), block_id.as_ref());\n\n  assert!(verify_tendermint_tx::<N>(\n    &precommit(Some((block_id, signer.clone().sign(&commit_msg).await))).await.1,\n    &validators,\n    commit\n  )\n  .is_err());\n\n  // any other signature can be used as evidence.\n  {\n    let (mut signed, tx) = precommit(Some((block_id, signer.sign(&[]).await))).await;\n    verify_tendermint_tx::<N>(&tx, &validators, commit).unwrap();\n\n    // So long as we can authenticate where it came from\n    let mut random_sig = [0u8; 64];\n    OsRng.fill_bytes(&mut random_sig);\n    signed.sig = random_sig;\n    let tx = TendermintTx::SlashEvidence(Evidence::InvalidPrecommit(signed.encode()));\n    assert!(verify_tendermint_tx::<N>(&tx, &validators, commit).is_err());\n  }\n}\n\n#[tokio::test]\nasync fn evidence_with_prevote() {\n  let (_, signer, signer_id, validators) = tendermint_meta().await;\n  let commit = |_: u64| -> Option<Commit<Arc<Validators>>> {\n    Some(Commit::<Arc<Validators>> { end_time: 0, validators: vec![], signature: vec![] })\n  };\n\n  let prevote = |block_id| {\n    let signer = signer.clone();\n    async move {\n      // it should fail for all reasons.\n      let mut txs = vec![];\n      txs.push(TendermintTx::SlashEvidence(Evidence::InvalidPrecommit(\n        signed_from_data::<N>(signer.clone().into(), signer_id, 0, 0, Data::Prevote(block_id))\n          .await\n          .encode(),\n      )));\n      txs.push(TendermintTx::SlashEvidence(Evidence::InvalidValidRound(\n        signed_from_data::<N>(signer.clone().into(), signer_id, 0, 0, Data::Prevote(block_id))\n          .await\n          .encode(),\n      )));\n      // Since these require a second message, provide this one again\n      // ConflictingMessages can be fired for actually conflicting Prevotes however\n      txs.push(TendermintTx::SlashEvidence(Evidence::ConflictingMessages(\n        signed_from_data::<N>(signer.clone().into(), signer_id, 0, 0, Data::Prevote(block_id))\n          .await\n          .encode(),\n        signed_from_data::<N>(signer.clone().into(), signer_id, 0, 0, Data::Prevote(block_id))\n          .await\n          .encode(),\n      )));\n      txs\n    }\n  };\n\n  // No prevote message alone should be valid as slash evidence at this time\n  for prevote in prevote(None).await {\n    assert!(verify_tendermint_tx::<N>(&prevote, &validators, commit).is_err());\n  }\n  for prevote in prevote(Some([0x22u8; 32])).await {\n    assert!(verify_tendermint_tx::<N>(&prevote, &validators, commit).is_err());\n  }\n}\n\n#[tokio::test]\nasync fn conflicting_msgs_evidence_tx() {\n  let (genesis, signer, signer_id, validators) = tendermint_meta().await;\n  let commit = |i: u64| -> Option<Commit<Arc<Validators>>> {\n    assert_eq!(i, 0);\n    Some(Commit::<Arc<Validators>> { end_time: 0, validators: vec![], signature: vec![] })\n  };\n\n  // Block b, round n\n  let signed_for_b_r = |block, round, data| {\n    let signer = signer.clone();\n    async move { signed_from_data::<N>(signer.clone().into(), signer_id, block, round, data).await }\n  };\n\n  // Proposal\n  {\n    // non-conflicting data should fail\n    let signed_1 = signed_for_b_r(0, 0, Data::Proposal(None, TendermintBlock(vec![0x11]))).await;\n    let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages(\n      signed_1.encode(),\n      signed_1.encode(),\n    ));\n    assert!(verify_tendermint_tx::<N>(&tx, &validators, commit).is_err());\n\n    // conflicting data should pass\n    let signed_2 = signed_for_b_r(0, 0, Data::Proposal(None, TendermintBlock(vec![0x22]))).await;\n    let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages(\n      signed_1.encode(),\n      signed_2.encode(),\n    ));\n    verify_tendermint_tx::<N>(&tx, &validators, commit).unwrap();\n\n    // Except if it has a distinct round number, as we don't check cross-round conflicts\n    // (except for Precommit)\n    let signed_2 = signed_for_b_r(0, 1, Data::Proposal(None, TendermintBlock(vec![0x22]))).await;\n    let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages(\n      signed_1.encode(),\n      signed_2.encode(),\n    ));\n    verify_tendermint_tx::<N>(&tx, &validators, commit).unwrap_err();\n\n    // Proposals for different block numbers should also fail as evidence\n    let signed_2 = signed_for_b_r(1, 0, Data::Proposal(None, TendermintBlock(vec![0x22]))).await;\n    let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages(\n      signed_1.encode(),\n      signed_2.encode(),\n    ));\n    verify_tendermint_tx::<N>(&tx, &validators, commit).unwrap_err();\n  }\n\n  // Prevote\n  {\n    // non-conflicting data should fail\n    let signed_1 = signed_for_b_r(0, 0, Data::Prevote(Some([0x11; 32]))).await;\n    let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages(\n      signed_1.encode(),\n      signed_1.encode(),\n    ));\n    assert!(verify_tendermint_tx::<N>(&tx, &validators, commit).is_err());\n\n    // conflicting data should pass\n    let signed_2 = signed_for_b_r(0, 0, Data::Prevote(Some([0x22; 32]))).await;\n    let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages(\n      signed_1.encode(),\n      signed_2.encode(),\n    ));\n    verify_tendermint_tx::<N>(&tx, &validators, commit).unwrap();\n\n    // Except if it has a distinct round number, as we don't check cross-round conflicts\n    // (except for Precommit)\n    let signed_2 = signed_for_b_r(0, 1, Data::Prevote(Some([0x22; 32]))).await;\n    let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages(\n      signed_1.encode(),\n      signed_2.encode(),\n    ));\n    verify_tendermint_tx::<N>(&tx, &validators, commit).unwrap_err();\n\n    // Proposals for different block numbers should also fail as evidence\n    let signed_2 = signed_for_b_r(1, 0, Data::Prevote(Some([0x22; 32]))).await;\n    let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages(\n      signed_1.encode(),\n      signed_2.encode(),\n    ));\n    verify_tendermint_tx::<N>(&tx, &validators, commit).unwrap_err();\n  }\n\n  // msgs from different senders should fail\n  {\n    let signed_1 = signed_for_b_r(0, 0, Data::Proposal(None, TendermintBlock(vec![0x11]))).await;\n\n    let signer_2 =\n      Signer::new(genesis, Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng)));\n    let signed_id_2 = signer_2.validator_id().await.unwrap();\n    let signed_2 = signed_from_data::<N>(\n      signer_2.into(),\n      signed_id_2,\n      0,\n      0,\n      Data::Proposal(None, TendermintBlock(vec![0x22])),\n    )\n    .await;\n\n    let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages(\n      signed_1.encode(),\n      signed_2.encode(),\n    ));\n\n    // update schema so that we don't fail due to invalid signature\n    let signer_pub =\n      <Ristretto as Ciphersuite>::read_G::<&[u8]>(&mut signer_id.as_slice()).unwrap();\n    let signer_pub_2 =\n      <Ristretto as Ciphersuite>::read_G::<&[u8]>(&mut signed_id_2.as_slice()).unwrap();\n    let validators =\n      Arc::new(Validators::new(genesis, vec![(signer_pub, 1), (signer_pub_2, 1)]).unwrap());\n\n    assert!(verify_tendermint_tx::<N>(&tx, &validators, commit).is_err());\n  }\n\n  // msgs with different steps should fail\n  {\n    let signed_1 = signed_for_b_r(0, 0, Data::Proposal(None, TendermintBlock(vec![]))).await;\n    let signed_2 = signed_for_b_r(0, 0, Data::Prevote(None)).await;\n    let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages(\n      signed_1.encode(),\n      signed_2.encode(),\n    ));\n    assert!(verify_tendermint_tx::<N>(&tx, &validators, commit).is_err());\n  }\n}\n"
  },
  {
    "path": "coordinator/tributary/src/transaction.rs",
    "content": "use core::fmt::Debug;\nuse std::io;\n\nuse zeroize::Zeroize;\nuse thiserror::Error;\n\nuse blake2::{Digest, Blake2b512};\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::{\n  group::{Group, GroupEncoding},\n  Ciphersuite,\n};\nuse schnorr::SchnorrSignature;\n\nuse crate::{TRANSACTION_SIZE_LIMIT, ReadWrite};\n\n#[derive(Clone, PartialEq, Eq, Debug, Error)]\npub enum TransactionError {\n  /// Transaction exceeded the size limit.\n  #[error(\"transaction is too large\")]\n  TooLargeTransaction,\n  /// Transaction's signer isn't a participant.\n  #[error(\"invalid signer\")]\n  InvalidSigner,\n  /// Transaction's nonce isn't the prior nonce plus one.\n  #[error(\"invalid nonce\")]\n  InvalidNonce,\n  /// Transaction's signature is invalid.\n  #[error(\"invalid signature\")]\n  InvalidSignature,\n  /// Transaction's content is invalid.\n  #[error(\"transaction content is invalid\")]\n  InvalidContent,\n  /// Transaction's signer has too many transactions in the mempool.\n  #[error(\"signer has too many transactions in the mempool\")]\n  TooManyInMempool,\n  /// Provided Transaction added to mempool.\n  #[error(\"provided transaction added to mempool\")]\n  ProvidedAddedToMempool,\n}\n\n/// Data for a signed transaction.\n#[derive(Clone, PartialEq, Eq, Debug)]\npub struct Signed {\n  pub signer: <Ristretto as Ciphersuite>::G,\n  pub nonce: u32,\n  pub signature: SchnorrSignature<Ristretto>,\n}\n\nimpl ReadWrite for Signed {\n  fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {\n    let signer = Ristretto::read_G(reader)?;\n\n    let mut nonce = [0; 4];\n    reader.read_exact(&mut nonce)?;\n    let nonce = u32::from_le_bytes(nonce);\n    if nonce >= (u32::MAX - 1) {\n      Err(io::Error::other(\"nonce exceeded limit\"))?;\n    }\n\n    let mut signature = SchnorrSignature::<Ristretto>::read(reader)?;\n    if signature.R.is_identity().into() {\n      // Anyone malicious could remove this and try to find zero signatures\n      // We should never produce zero signatures though meaning this should never come up\n      // If it does somehow come up, this is a decent courtesy\n      signature.zeroize();\n      Err(io::Error::other(\"signature nonce was identity\"))?;\n    }\n\n    Ok(Signed { signer, nonce, signature })\n  }\n\n  fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {\n    // This is either an invalid signature or a private key leak\n    if self.signature.R.is_identity().into() {\n      Err(io::Error::other(\"signature nonce was identity\"))?;\n    }\n    writer.write_all(&self.signer.to_bytes())?;\n    writer.write_all(&self.nonce.to_le_bytes())?;\n    self.signature.write(writer)\n  }\n}\n\nimpl Signed {\n  pub fn read_without_nonce<R: io::Read>(reader: &mut R, nonce: u32) -> io::Result<Self> {\n    let signer = Ristretto::read_G(reader)?;\n\n    let mut signature = SchnorrSignature::<Ristretto>::read(reader)?;\n    if signature.R.is_identity().into() {\n      // Anyone malicious could remove this and try to find zero signatures\n      // We should never produce zero signatures though meaning this should never come up\n      // If it does somehow come up, this is a decent courtesy\n      signature.zeroize();\n      Err(io::Error::other(\"signature nonce was identity\"))?;\n    }\n\n    Ok(Signed { signer, nonce, signature })\n  }\n\n  pub fn write_without_nonce<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {\n    // This is either an invalid signature or a private key leak\n    if self.signature.R.is_identity().into() {\n      Err(io::Error::other(\"signature nonce was identity\"))?;\n    }\n    writer.write_all(&self.signer.to_bytes())?;\n    self.signature.write(writer)\n  }\n}\n\n#[allow(clippy::large_enum_variant)]\n#[derive(Clone, PartialEq, Eq, Debug)]\npub enum TransactionKind<'a> {\n  /// This transaction should be provided by every validator, in an exact order.\n  ///\n  /// The contained static string names the orderer to use. This allows two distinct provided\n  /// transaction kinds, without a synchronized order, to be ordered within their own kind without\n  /// requiring ordering with each other.\n  ///\n  /// The only malleability is in when this transaction appears on chain. The block producer will\n  /// include it when they have it. Block verification will fail for validators without it.\n  ///\n  /// If a supermajority of validators produce a commit for a block with a provided transaction\n  /// which isn't locally held, the block will be added to the local chain. When the transaction is\n  /// locally provided, it will be compared for correctness to the on-chain version\n  ///\n  /// In order to ensure TXs aren't accidentally provided multiple times, all provided transactions\n  /// must have a unique hash which is also unique to all Unsigned transactions.\n  Provided(&'static str),\n\n  /// An unsigned transaction, only able to be included by the block producer.\n  ///\n  /// Once an Unsigned transaction is included on-chain, it may not be included again. In order to\n  /// have multiple Unsigned transactions with the same values included on-chain, some distinct\n  /// nonce must be included in order to cause a distinct hash.\n  ///\n  /// The hash must also be unique with all Provided transactions.\n  Unsigned,\n\n  /// A signed transaction.\n  Signed(Vec<u8>, &'a Signed),\n}\n\n// TODO: Should this be renamed TransactionTrait now that a literal Transaction exists?\n// Or should the literal Transaction be renamed to Event?\npub trait Transaction: 'static + Send + Sync + Clone + Eq + Debug + ReadWrite {\n  /// Return what type of transaction this is.\n  fn kind(&self) -> TransactionKind<'_>;\n\n  /// Return the hash of this transaction.\n  ///\n  /// The hash must NOT commit to the signature.\n  fn hash(&self) -> [u8; 32];\n\n  /// Perform transaction-specific verification.\n  fn verify(&self) -> Result<(), TransactionError>;\n\n  /// Obtain the challenge for this transaction's signature.\n  ///\n  /// Do not override this unless you know what you're doing.\n  ///\n  /// Panics if called on non-signed transactions.\n  fn sig_hash(&self, genesis: [u8; 32]) -> <Ristretto as Ciphersuite>::F {\n    match self.kind() {\n      TransactionKind::Signed(order, Signed { signature, .. }) => {\n        <Ristretto as Ciphersuite>::F::from_bytes_mod_order_wide(\n          &Blake2b512::digest(\n            [\n              b\"Tributary Signed Transaction\",\n              genesis.as_ref(),\n              &self.hash(),\n              order.as_ref(),\n              signature.R.to_bytes().as_ref(),\n            ]\n            .concat(),\n          )\n          .into(),\n        )\n      }\n      _ => panic!(\"sig_hash called on non-signed transaction\"),\n    }\n  }\n}\n\npub trait GAIN: FnMut(&<Ristretto as Ciphersuite>::G, &[u8]) -> Option<u32> {}\nimpl<F: FnMut(&<Ristretto as Ciphersuite>::G, &[u8]) -> Option<u32>> GAIN for F {}\n\npub(crate) fn verify_transaction<F: GAIN, T: Transaction>(\n  tx: &T,\n  genesis: [u8; 32],\n  get_and_increment_nonce: &mut F,\n) -> Result<(), TransactionError> {\n  if tx.serialize().len() > TRANSACTION_SIZE_LIMIT {\n    Err(TransactionError::TooLargeTransaction)?;\n  }\n\n  tx.verify()?;\n\n  match tx.kind() {\n    TransactionKind::Provided(_) | TransactionKind::Unsigned => {}\n    TransactionKind::Signed(order, Signed { signer, nonce, signature }) => {\n      if let Some(next_nonce) = get_and_increment_nonce(signer, &order) {\n        if *nonce != next_nonce {\n          Err(TransactionError::InvalidNonce)?;\n        }\n      } else {\n        // Not a participant\n        Err(TransactionError::InvalidSigner)?;\n      }\n\n      // TODO: Use a batch verification here\n      if !signature.verify(*signer, tx.sig_hash(genesis)) {\n        Err(TransactionError::InvalidSignature)?;\n      }\n    }\n  }\n\n  Ok(())\n}\n"
  },
  {
    "path": "coordinator/tributary/tendermint/Cargo.toml",
    "content": "[package]\nname = \"tendermint-machine\"\nversion = \"0.2.0\"\ndescription = \"An implementation of the Tendermint state machine in Rust\"\nlicense = \"MIT\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/coordinator/tendermint\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nedition = \"2021\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nasync-trait = { version = \"0.1\", default-features = false }\nthiserror = { version = \"1\", default-features = false }\n\nhex = { version = \"0.4\", default-features = false, features = [\"std\"] }\nlog = { version = \"0.4\", default-features = false, features = [\"std\"] }\n\nparity-scale-codec = { version = \"3\", default-features = false, features = [\"std\", \"derive\"] }\n\nfutures-util = { version = \"0.3\", default-features = false, features = [\"std\", \"async-await-macro\", \"sink\", \"channel\"] }\nfutures-channel = { version = \"0.3\", default-features = false, features = [\"std\", \"sink\"] }\npatchable-async-sleep = { version = \"0.1\", path = \"../../../common/patchable-async-sleep\", default-features = false }\n\nserai-db = { path = \"../../../common/db\", version = \"0.1\", default-features = false }\n\n[dev-dependencies]\ntokio = { version = \"1\", features = [\"sync\", \"rt-multi-thread\", \"macros\"] }\n"
  },
  {
    "path": "coordinator/tributary/tendermint/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2022-2023 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "coordinator/tributary/tendermint/README.md",
    "content": "# Tendermint\n\nAn implementation of the Tendermint state machine in Rust.\n\nThis is solely the state machine, intended to be mapped to any arbitrary system.\nIt supports an arbitrary signature scheme, weighting, and block definition\naccordingly. It is not intended to work with the Cosmos SDK, solely to be an\nimplementation of the [academic protocol](https://arxiv.org/pdf/1807.04938.pdf).\n\n### Caveats\n\n- Only SCALE serialization is supported currently. Ideally, everything from\n  SCALE to borsh to bincode would be supported. SCALE was chosen due to this\n  being under Serai, which uses Substrate, which uses SCALE. Accordingly, when\n  deciding which of the three (mutually incompatible) options to support...\n\n- The only supported runtime is tokio due to requiring a `sleep` implementation.\n  Ideally, the runtime choice will be moved to a feature in the future.\n\n- It is possible for `add_block` to be called on a block which failed (or never\n  went through in the first place) validation. This is a break from the paper\n  which is accepted here. This is for two reasons.\n\n  1) Serai needing this functionality.\n  2) If a block is committed which is invalid, either there's a malicious\n     majority now defining consensus OR the local node is malicious by virtue of\n     being faulty. Considering how either represents a fatal circumstance,\n     except with regards to system like Serai which have their own logic for\n     pseudo-valid blocks, it is accepted as a possible behavior with the caveat\n     any consumers must be aware of it. No machine will vote nor precommit to a\n     block it considers invalid, so for a network with an honest majority, this\n     is a non-issue.\n\n### Paper\n\nThe [paper](https://arxiv.org/abs/1807.04938) describes the algorithm with\npseudocode on page 6. This pseudocode isn't directly implementable, nor does it\nspecify faulty behavior. Instead, it's solely a series of conditions which\ntrigger events in order to successfully achieve consensus.\n\nThe included pseudocode segments can be minimally described as follows:\n\n```\n01-09 Init\n10-10 StartRound(0)\n11-21 StartRound\n22-27 Fresh proposal\n28-33 Proposal building off a valid round with prevotes\n34-35 2f+1 prevote -> schedule timeout prevote\n36-43 First proposal with prevotes -> precommit Some\n44-46 2f+1 nil prevote -> precommit nil\n47-48 2f+1 precommit -> schedule timeout precommit\n49-54 First proposal with precommits -> finalize\n55-56 f+1 round > local round, jump\n57-60 on timeout propose\n61-64 on timeout prevote\n65-67 on timeout precommit\n```\n\nThe corresponding Rust code implementing these tasks are marked with their\nrelated line numbers.\n"
  },
  {
    "path": "coordinator/tributary/tendermint/src/block.rs",
    "content": "use std::{\n  sync::Arc,\n  collections::{HashSet, HashMap},\n};\n\nuse serai_db::{Get, DbTxn, Db};\n\nuse crate::{\n  time::CanonicalInstant,\n  ext::{RoundNumber, BlockNumber, Block, Network},\n  round::RoundData,\n  message_log::MessageLog,\n  Step, Data, DataFor, Message, MessageFor,\n};\n\npub(crate) struct BlockData<N: Network> {\n  db: N::Db,\n  genesis: [u8; 32],\n\n  pub(crate) number: BlockNumber,\n  pub(crate) validator_id: Option<N::ValidatorId>,\n  pub(crate) our_proposal: Option<N::Block>,\n\n  pub(crate) log: MessageLog<N>,\n  pub(crate) slashes: HashSet<N::ValidatorId>,\n  // We track the end times of each round for two reasons:\n  // 1) Knowing the start time of the next round\n  // 2) Validating precommits, which include the end time of the round which produced it\n  // This HashMap contains the end time of the round we're currently in and every round prior\n  pub(crate) end_time: HashMap<RoundNumber, CanonicalInstant>,\n\n  pub(crate) round: Option<RoundData<N>>,\n\n  pub(crate) locked: Option<(RoundNumber, <N::Block as Block>::Id)>,\n  pub(crate) valid: Option<(RoundNumber, N::Block)>,\n}\n\nimpl<N: Network> BlockData<N> {\n  pub(crate) fn new(\n    db: N::Db,\n    genesis: [u8; 32],\n    weights: Arc<N::Weights>,\n    number: BlockNumber,\n    validator_id: Option<N::ValidatorId>,\n    our_proposal: Option<N::Block>,\n  ) -> BlockData<N> {\n    BlockData {\n      db,\n      genesis,\n\n      number,\n      validator_id,\n      our_proposal,\n\n      log: MessageLog::new(weights),\n      slashes: HashSet::new(),\n      end_time: HashMap::new(),\n\n      // The caller of BlockData::new is expected to be populated after by the caller\n      round: None,\n\n      locked: None,\n      valid: None,\n    }\n  }\n\n  pub(crate) fn round(&self) -> &RoundData<N> {\n    self.round.as_ref().unwrap()\n  }\n\n  pub(crate) fn round_mut(&mut self) -> &mut RoundData<N> {\n    self.round.as_mut().unwrap()\n  }\n\n  // Populate the end time up to the specified round\n  // This is generally used when moving to the next round, where this will only populate one time,\n  // yet is also used when jumping rounds (when 33% of the validators are on a round ahead of us)\n  pub(crate) fn populate_end_time(&mut self, round: RoundNumber) {\n    // Starts from the current round since we only start the current round once we have have all\n    // the prior time data\n    for r in (self.round().number.0 + 1) ..= round.0 {\n      self.end_time.insert(\n        RoundNumber(r),\n        RoundData::<N>::new(RoundNumber(r), self.end_time[&RoundNumber(r - 1)]).end_time(),\n      );\n    }\n  }\n\n  // Start a new round. Optionally takes in the time for when this is the first round, and the time\n  // isn't simply the time of the prior round (yet rather the prior block). Returns the proposal\n  // data, if we are the proposer.\n  pub(crate) fn new_round(\n    &mut self,\n    round: RoundNumber,\n    proposer: N::ValidatorId,\n    time: Option<CanonicalInstant>,\n  ) -> Option<DataFor<N>> {\n    debug_assert_eq!(round.0 == 0, time.is_some());\n\n    // If this is the first round, we don't have a prior round's end time to use as the start\n    // We use the passed in time instead\n    // If this isn't the first round, ensure we have the prior round's end time by populating the\n    // map with all rounds till this round\n    // This can happen we jump from round x to round x+n, where n != 1\n    // The paper says to do so whenever you observe a sufficient amount of peers on a higher round\n    if round.0 != 0 {\n      self.populate_end_time(round);\n    }\n\n    // L11-13\n    self.round = Some(RoundData::<N>::new(\n      round,\n      time.unwrap_or_else(|| self.end_time[&RoundNumber(round.0 - 1)]),\n    ));\n    self.end_time.insert(round, self.round().end_time());\n\n    // L14-21\n    if Some(proposer) == self.validator_id {\n      let (round, block) = self.valid.clone().unzip();\n      block.or_else(|| self.our_proposal.clone()).map(|block| Data::Proposal(round, block))\n    } else {\n      self.round_mut().set_timeout(Step::Propose);\n      None\n    }\n  }\n\n  // Transform Data into an actual Message, using the contextual data from this block\n  pub(crate) fn message(&mut self, data: DataFor<N>) -> Option<MessageFor<N>> {\n    debug_assert_eq!(\n      self.round().step,\n      match data.step() {\n        Step::Propose | Step::Prevote => Step::Propose,\n        Step::Precommit => Step::Prevote,\n      },\n    );\n    // Tendermint always sets the round's step to whatever it just broadcasted\n    // Consolidate all of those here to ensure they aren't missed by an oversight\n    // 27, 33, 41, 46, 60, 64\n    self.round_mut().step = data.step();\n\n    // Only return a message to if we're actually a current validator\n    let round_number = self.round().number;\n    let res = self.validator_id.map(|validator_id| Message {\n      sender: validator_id,\n      block: self.number,\n      round: round_number,\n      data,\n    });\n\n    if let Some(res) = res.as_ref() {\n      const LATEST_BLOCK_KEY: &[u8] = b\"tendermint-machine-sent_block\";\n      const LATEST_ROUND_KEY: &[u8] = b\"tendermint-machine-sent_round\";\n      const PROPOSE_KEY: &[u8] = b\"tendermint-machine-sent_propose\";\n      const PEVOTE_KEY: &[u8] = b\"tendermint-machine-sent_prevote\";\n      const PRECOMMIT_KEY: &[u8] = b\"tendermint-machine-sent_commit\";\n\n      let genesis = self.genesis;\n      let key = |prefix: &[u8]| [prefix, &genesis].concat();\n\n      let mut txn = self.db.txn();\n\n      // Ensure we haven't prior sent a message for a future block/round\n      let last_block_or_round = |txn: &mut <N::Db as Db>::Transaction<'_>, prefix, current| {\n        let key = key(prefix);\n        let latest =\n          u64::from_le_bytes(txn.get(key.as_slice()).unwrap_or(vec![0; 8]).try_into().unwrap());\n        if latest > current {\n          None?;\n        }\n        if current > latest {\n          txn.put(&key, current.to_le_bytes());\n          return Some(true);\n        }\n        Some(false)\n      };\n      let new_block = last_block_or_round(&mut txn, LATEST_BLOCK_KEY, self.number.0)?;\n      if new_block {\n        // Delete the latest round key\n        txn.del(key(LATEST_ROUND_KEY));\n      }\n      let new_round = last_block_or_round(&mut txn, LATEST_ROUND_KEY, round_number.0.into())?;\n      if new_block || new_round {\n        // Delete the messages for the old round\n        txn.del(key(PROPOSE_KEY));\n        txn.del(key(PEVOTE_KEY));\n        txn.del(key(PRECOMMIT_KEY));\n      }\n\n      // Check we haven't sent this message within this round\n      let msg_key = key(match res.data.step() {\n        Step::Propose => PROPOSE_KEY,\n        Step::Prevote => PEVOTE_KEY,\n        Step::Precommit => PRECOMMIT_KEY,\n      });\n      if txn.get(&msg_key).is_some() {\n        assert!(!new_block);\n        assert!(!new_round);\n        None?;\n      }\n      // Put that we're sending this message to the DB\n      txn.put(&msg_key, []);\n\n      txn.commit();\n    }\n\n    res\n  }\n}\n"
  },
  {
    "path": "coordinator/tributary/tendermint/src/ext.rs",
    "content": "use core::{hash::Hash, fmt::Debug};\nuse std::{sync::Arc, collections::HashSet};\n\nuse async_trait::async_trait;\nuse thiserror::Error;\n\nuse parity_scale_codec::{Encode, Decode};\n\nuse crate::{SignedMessageFor, SlashEvent, commit_msg};\n\n/// An alias for a series of traits required for a type to be usable as a validator ID,\n/// automatically implemented for all types satisfying those traits.\npub trait ValidatorId:\n  Send + Sync + Clone + Copy + PartialEq + Eq + Hash + Debug + Encode + Decode\n{\n}\nimpl<V: Send + Sync + Clone + Copy + PartialEq + Eq + Hash + Debug + Encode + Decode> ValidatorId\n  for V\n{\n}\n\n/// An alias for a series of traits required for a type to be usable as a signature,\n/// automatically implemented for all types satisfying those traits.\npub trait Signature: Send + Sync + Clone + PartialEq + Eq + Debug + Encode + Decode {}\nimpl<S: Send + Sync + Clone + PartialEq + Eq + Debug + Encode + Decode> Signature for S {}\n\n// Type aliases which are distinct according to the type system\n\n/// A struct containing a Block Number, wrapped to have a distinct type.\n#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode)]\npub struct BlockNumber(pub u64);\n/// A struct containing a round number, wrapped to have a distinct type.\n#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode)]\npub struct RoundNumber(pub u32);\n\n/// A signer for a validator.\n#[async_trait]\npub trait Signer: Send + Sync {\n  // Type used to identify validators.\n  type ValidatorId: ValidatorId;\n  /// Signature type.\n  type Signature: Signature;\n\n  /// Returns the validator's current ID. Returns None if they aren't a current validator.\n  async fn validator_id(&self) -> Option<Self::ValidatorId>;\n  /// Sign a signature with the current validator's private key.\n  async fn sign(&self, msg: &[u8]) -> Self::Signature;\n}\n\n#[async_trait]\nimpl<S: Signer> Signer for Arc<S> {\n  type ValidatorId = S::ValidatorId;\n  type Signature = S::Signature;\n\n  async fn validator_id(&self) -> Option<Self::ValidatorId> {\n    self.as_ref().validator_id().await\n  }\n\n  async fn sign(&self, msg: &[u8]) -> Self::Signature {\n    self.as_ref().sign(msg).await\n  }\n}\n\n/// A signature scheme used by validators.\npub trait SignatureScheme: Send + Sync + Clone {\n  // Type used to identify validators.\n  type ValidatorId: ValidatorId;\n  /// Signature type.\n  type Signature: Signature;\n  /// Type representing an aggregate signature. This would presumably be a BLS signature,\n  /// yet even with Schnorr signatures\n  /// [half-aggregation is possible](https://eprint.iacr.org/2021/350).\n  /// It could even be a threshold signature scheme, though that's currently unexpected.\n  type AggregateSignature: Signature;\n\n  /// Type representing a signer of this scheme.\n  type Signer: Signer<ValidatorId = Self::ValidatorId, Signature = Self::Signature>;\n\n  /// Verify a signature from the validator in question.\n  #[must_use]\n  fn verify(&self, validator: Self::ValidatorId, msg: &[u8], sig: &Self::Signature) -> bool;\n\n  /// Aggregate signatures.\n  /// It may panic if corrupted data passed in.\n  fn aggregate(\n    &self,\n    validators: &[Self::ValidatorId],\n    msg: &[u8],\n    sigs: &[Self::Signature],\n  ) -> Self::AggregateSignature;\n  /// Verify an aggregate signature for the list of signers.\n  #[must_use]\n  fn verify_aggregate(\n    &self,\n    signers: &[Self::ValidatorId],\n    msg: &[u8],\n    sig: &Self::AggregateSignature,\n  ) -> bool;\n}\n\nimpl<S: SignatureScheme> SignatureScheme for Arc<S> {\n  type ValidatorId = S::ValidatorId;\n  type Signature = S::Signature;\n  type AggregateSignature = S::AggregateSignature;\n  type Signer = S::Signer;\n\n  fn verify(&self, validator: Self::ValidatorId, msg: &[u8], sig: &Self::Signature) -> bool {\n    self.as_ref().verify(validator, msg, sig)\n  }\n\n  fn aggregate(\n    &self,\n    validators: &[Self::ValidatorId],\n    msg: &[u8],\n    sigs: &[Self::Signature],\n  ) -> Self::AggregateSignature {\n    self.as_ref().aggregate(validators, msg, sigs)\n  }\n\n  #[must_use]\n  fn verify_aggregate(\n    &self,\n    signers: &[Self::ValidatorId],\n    msg: &[u8],\n    sig: &Self::AggregateSignature,\n  ) -> bool {\n    self.as_ref().verify_aggregate(signers, msg, sig)\n  }\n}\n\n/// A commit for a specific block.\n///\n/// The list of validators have weight exceeding the threshold for a valid commit.\n#[derive(PartialEq, Debug, Encode, Decode)]\npub struct Commit<S: SignatureScheme> {\n  /// End time of the round which created this commit, used as the start time of the next block.\n  pub end_time: u64,\n  /// Validators participating in the signature.\n  pub validators: Vec<S::ValidatorId>,\n  /// Aggregate signature.\n  pub signature: S::AggregateSignature,\n}\n\nimpl<S: SignatureScheme> Clone for Commit<S> {\n  fn clone(&self) -> Self {\n    Self {\n      end_time: self.end_time,\n      validators: self.validators.clone(),\n      signature: self.signature.clone(),\n    }\n  }\n}\n\n/// Weights for the validators present.\npub trait Weights: Send + Sync {\n  type ValidatorId: ValidatorId;\n\n  /// Total weight of all validators.\n  fn total_weight(&self) -> u64;\n  /// Weight for a specific validator.\n  fn weight(&self, validator: Self::ValidatorId) -> u64;\n  /// Threshold needed for BFT consensus.\n  fn threshold(&self) -> u64 {\n    ((self.total_weight() * 2) / 3) + 1\n  }\n  /// Threshold preventing BFT consensus.\n  fn fault_threshold(&self) -> u64 {\n    (self.total_weight() - self.threshold()) + 1\n  }\n\n  /// Weighted round robin function.\n  fn proposer(&self, block: BlockNumber, round: RoundNumber) -> Self::ValidatorId;\n}\n\nimpl<W: Weights> Weights for Arc<W> {\n  type ValidatorId = W::ValidatorId;\n\n  fn total_weight(&self) -> u64 {\n    self.as_ref().total_weight()\n  }\n\n  fn weight(&self, validator: Self::ValidatorId) -> u64 {\n    self.as_ref().weight(validator)\n  }\n\n  fn proposer(&self, block: BlockNumber, round: RoundNumber) -> Self::ValidatorId {\n    self.as_ref().proposer(block, round)\n  }\n}\n\n/// Simplified error enum representing a block's validity.\n#[derive(Clone, Copy, PartialEq, Eq, Debug, Error, Encode, Decode)]\npub enum BlockError {\n  /// Malformed block which is wholly invalid.\n  #[error(\"invalid block\")]\n  Fatal,\n  /// Valid block by syntax, with semantics which may or may not be valid yet are locally\n  /// considered invalid. If a block fails to validate with this, a slash will not be triggered.\n  #[error(\"invalid block under local view\")]\n  Temporal,\n}\n\n/// Trait representing a Block.\npub trait Block: Send + Sync + Clone + PartialEq + Eq + Debug + Encode + Decode {\n  // Type used to identify blocks. Presumably a cryptographic hash of the block.\n  type Id: Send + Sync + Copy + Clone + PartialEq + Eq + AsRef<[u8]> + Debug + Encode + Decode;\n\n  /// Return the deterministic, unique ID for this block.\n  fn id(&self) -> Self::Id;\n}\n\n/// Trait representing the distributed system Tendermint is providing consensus over.\n#[async_trait]\npub trait Network: Sized + Send + Sync {\n  /// The database used to back this.\n  type Db: serai_db::Db;\n\n  // Type used to identify validators.\n  type ValidatorId: ValidatorId;\n  /// Signature scheme used by validators.\n  type SignatureScheme: SignatureScheme<ValidatorId = Self::ValidatorId>;\n  /// Object representing the weights of validators.\n  type Weights: Weights<ValidatorId = Self::ValidatorId>;\n  /// Type used for ordered blocks of information.\n  type Block: Block;\n\n  /// Maximum block processing time in milliseconds.\n  ///\n  /// This should include both the time to download the block and the actual processing time.\n  ///\n  /// BLOCK_PROCESSING_TIME + (3 * LATENCY_TIME) must be divisible by 1000.\n  const BLOCK_PROCESSING_TIME: u32;\n  /// Network latency time in milliseconds.\n  ///\n  /// BLOCK_PROCESSING_TIME + (3 * LATENCY_TIME) must be divisible by 1000.\n  const LATENCY_TIME: u32;\n\n  /// The block time, in seconds. Defined as the processing time plus three times the latency.\n  fn block_time() -> u32 {\n    let raw = Self::BLOCK_PROCESSING_TIME + (3 * Self::LATENCY_TIME);\n    let res = raw / 1000;\n    assert_eq!(res * 1000, raw);\n    res\n  }\n\n  /// Return a handle on the signer in use, usable for the entire lifetime of the machine.\n  fn signer(&self) -> <Self::SignatureScheme as SignatureScheme>::Signer;\n  /// Return a handle on the signing scheme in use, usable for the entire lifetime of the machine.\n  fn signature_scheme(&self) -> Self::SignatureScheme;\n  /// Return a handle on the validators' weights, usable for the entire lifetime of the machine.\n  fn weights(&self) -> Self::Weights;\n\n  /// Verify a commit for a given block. Intended for use when syncing or when not an active\n  /// validator.\n  #[must_use]\n  fn verify_commit(\n    &self,\n    id: <Self::Block as Block>::Id,\n    commit: &Commit<Self::SignatureScheme>,\n  ) -> bool {\n    if commit.validators.iter().collect::<HashSet<_>>().len() != commit.validators.len() {\n      return false;\n    }\n\n    if !self.signature_scheme().verify_aggregate(\n      &commit.validators,\n      &commit_msg(commit.end_time, id.as_ref()),\n      &commit.signature,\n    ) {\n      return false;\n    }\n\n    let weights = self.weights();\n    commit.validators.iter().map(|v| weights.weight(*v)).sum::<u64>() >= weights.threshold()\n  }\n\n  /// Broadcast a message to the other validators.\n  ///\n  /// If authenticated channels have already been established, this will double-authenticate.\n  /// Switching to unauthenticated channels in a system already providing authenticated channels is\n  /// not recommended as this is a minor, temporal inefficiency, while downgrading channels may\n  /// have wider implications.\n  async fn broadcast(&mut self, msg: SignedMessageFor<Self>);\n\n  /// Trigger a slash for the validator in question who was definitively malicious.\n  ///\n  /// The exact process of triggering a slash is undefined and left to the network as a whole.\n  async fn slash(&mut self, validator: Self::ValidatorId, slash_event: SlashEvent);\n\n  /// Validate a block.\n  async fn validate(&self, block: &Self::Block) -> Result<(), BlockError>;\n\n  /// Add a block, returning the proposal for the next one.\n  ///\n  /// It's possible a block, which was never validated or even failed validation, may be passed\n  /// here if a supermajority of validators did consider it valid and created a commit for it.\n  ///\n  /// This deviates from the paper which will have a local node refuse to decide on a block it\n  /// considers invalid. This library acknowledges the network did decide on it, leaving handling\n  /// of it to the network, and outside of this scope.\n  async fn add_block(\n    &mut self,\n    block: Self::Block,\n    commit: Commit<Self::SignatureScheme>,\n  ) -> Option<Self::Block>;\n}\n"
  },
  {
    "path": "coordinator/tributary/tendermint/src/lib.rs",
    "content": "#![expect(clippy::cast_possible_truncation)]\n\nuse core::fmt::Debug;\n\nuse std::{\n  sync::Arc,\n  time::{SystemTime, Instant, Duration},\n  collections::{VecDeque, HashMap},\n};\n\nuse parity_scale_codec::{Encode, Decode, IoReader};\n\nuse futures_channel::mpsc;\nuse futures_util::{\n  FutureExt, StreamExt, SinkExt,\n  future::{self, Fuse},\n};\nuse patchable_async_sleep::sleep;\n\nuse serai_db::{Get, DbTxn, Db};\n\npub mod time;\nuse time::{sys_time, CanonicalInstant};\n\npub mod round;\nuse round::RoundData;\n\nmod block;\nuse block::BlockData;\n\npub(crate) mod message_log;\n\n/// Traits and types of the external network being integrated with to provide consensus over.\npub mod ext;\nuse ext::*;\n\nconst MESSAGE_TAPE_KEY: &[u8] = b\"tendermint-machine-message_tape\";\nfn message_tape_key(genesis: [u8; 32]) -> Vec<u8> {\n  [MESSAGE_TAPE_KEY, &genesis].concat()\n}\n\npub fn commit_msg(end_time: u64, id: &[u8]) -> Vec<u8> {\n  [&end_time.to_le_bytes(), id].concat()\n}\n\n#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode)]\npub enum Step {\n  Propose,\n  Prevote,\n  Precommit,\n}\n\n#[derive(Clone, Eq, Debug, Encode, Decode)]\npub enum Data<B: Block, S: Signature> {\n  Proposal(Option<RoundNumber>, B),\n  Prevote(Option<B::Id>),\n  Precommit(Option<(B::Id, S)>),\n}\n\nimpl<B: Block, S: Signature> PartialEq for Data<B, S> {\n  fn eq(&self, other: &Data<B, S>) -> bool {\n    match (self, other) {\n      (Data::Proposal(valid_round, block), Data::Proposal(valid_round2, block2)) => {\n        (valid_round == valid_round2) && (block == block2)\n      }\n      (Data::Prevote(id), Data::Prevote(id2)) => id == id2,\n      (Data::Precommit(None), Data::Precommit(None)) => true,\n      (Data::Precommit(Some((id, _))), Data::Precommit(Some((id2, _)))) => id == id2,\n      _ => false,\n    }\n  }\n}\n\nimpl<B: Block, S: Signature> core::hash::Hash for Data<B, S> {\n  fn hash<H: core::hash::Hasher>(&self, state: &mut H) {\n    match self {\n      Data::Proposal(valid_round, block) => (0, valid_round, block.id().as_ref()).hash(state),\n      Data::Prevote(id) => (1, id.as_ref().map(AsRef::<[u8]>::as_ref)).hash(state),\n      Data::Precommit(None) => (2, 0).hash(state),\n      Data::Precommit(Some((id, _))) => (2, 1, id.as_ref()).hash(state),\n    }\n  }\n}\n\nimpl<B: Block, S: Signature> Data<B, S> {\n  pub fn step(&self) -> Step {\n    match self {\n      Data::Proposal(..) => Step::Propose,\n      Data::Prevote(..) => Step::Prevote,\n      Data::Precommit(..) => Step::Precommit,\n    }\n  }\n}\n\n#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)]\npub struct Message<V: ValidatorId, B: Block, S: Signature> {\n  pub sender: V,\n  pub block: BlockNumber,\n  pub round: RoundNumber,\n\n  pub data: Data<B, S>,\n}\n\n/// A signed Tendermint consensus message to be broadcast to the other validators.\n#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)]\npub struct SignedMessage<V: ValidatorId, B: Block, S: Signature> {\n  pub msg: Message<V, B, S>,\n  pub sig: S,\n}\n\nimpl<V: ValidatorId, B: Block, S: Signature> SignedMessage<V, B, S> {\n  /// Number of the block this message is attempting to add to the chain.\n  pub fn block(&self) -> BlockNumber {\n    self.msg.block\n  }\n\n  #[must_use]\n  pub fn verify_signature<Scheme: SignatureScheme<ValidatorId = V, Signature = S>>(\n    &self,\n    signer: &Scheme,\n  ) -> bool {\n    signer.verify(self.msg.sender, &self.msg.encode(), &self.sig)\n  }\n}\n\n#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, Decode)]\npub enum SlashReason {\n  FailToPropose,\n  InvalidBlock,\n  InvalidProposer,\n}\n\n#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)]\npub enum Evidence {\n  ConflictingMessages(Vec<u8>, Vec<u8>),\n  InvalidPrecommit(Vec<u8>),\n  InvalidValidRound(Vec<u8>),\n}\n\n#[derive(Clone, PartialEq, Eq, Debug)]\npub enum TendermintError {\n  Malicious,\n  Temporal,\n  AlreadyHandled,\n  InvalidEvidence,\n}\n\n// Type aliases to abstract over generic hell\npub type DataFor<N> =\n  Data<<N as Network>::Block, <<N as Network>::SignatureScheme as SignatureScheme>::Signature>;\npub(crate) type MessageFor<N> = Message<\n  <N as Network>::ValidatorId,\n  <N as Network>::Block,\n  <<N as Network>::SignatureScheme as SignatureScheme>::Signature,\n>;\n/// Type alias to the SignedMessage type for a given Network\npub type SignedMessageFor<N> = SignedMessage<\n  <N as Network>::ValidatorId,\n  <N as Network>::Block,\n  <<N as Network>::SignatureScheme as SignatureScheme>::Signature,\n>;\n\npub fn decode_signed_message<N: Network>(mut data: &[u8]) -> Option<SignedMessageFor<N>> {\n  SignedMessageFor::<N>::decode(&mut data).ok()\n}\n\nfn decode_and_verify_signed_message<N: Network>(\n  data: &[u8],\n  schema: &N::SignatureScheme,\n) -> Result<SignedMessageFor<N>, TendermintError> {\n  let msg = decode_signed_message::<N>(data).ok_or(TendermintError::InvalidEvidence)?;\n\n  // verify that evidence messages are signed correctly\n  if !msg.verify_signature(schema) {\n    Err(TendermintError::InvalidEvidence)?;\n  }\n\n  Ok(msg)\n}\n\npub fn verify_tendermint_evidence<N: Network>(\n  evidence: &Evidence,\n  schema: &N::SignatureScheme,\n  commit: impl Fn(u64) -> Option<Commit<N::SignatureScheme>>,\n) -> Result<(), TendermintError> {\n  match evidence {\n    Evidence::ConflictingMessages(first, second) => {\n      let first = decode_and_verify_signed_message::<N>(first, schema)?.msg;\n      let second = decode_and_verify_signed_message::<N>(second, schema)?.msg;\n\n      // Make sure they're distinct messages, from the same sender, within the same block\n      if (first == second) || (first.sender != second.sender) || (first.block != second.block) {\n        Err(TendermintError::InvalidEvidence)?;\n      }\n\n      // Distinct messages within the same step\n      if !((first.round == second.round) && (first.data.step() == second.data.step())) {\n        Err(TendermintError::InvalidEvidence)?;\n      }\n    }\n    Evidence::InvalidPrecommit(msg) => {\n      let msg = decode_and_verify_signed_message::<N>(msg, schema)?.msg;\n\n      let Data::Precommit(Some((id, sig))) = &msg.data else {\n        Err(TendermintError::InvalidEvidence)?\n      };\n      // TODO: We need to be passed in the genesis time to handle this edge case\n      if msg.block.0 == 0 {\n        Err(TendermintError::InvalidEvidence)?\n        // todo!(\"invalid precommit signature on first block\")\n      }\n\n      // get the last commit\n      let prior_commit = match commit(msg.block.0 - 1) {\n        Some(c) => c,\n        // If we have yet to sync the block in question, we will return InvalidEvidence based\n        // on our own temporal ambiguity\n        // This will also cause an InvalidEvidence for anything using a non-existent block,\n        // yet that's valid behavior\n        // TODO: Double check the ramifications of this\n        _ => Err(TendermintError::InvalidEvidence)?,\n      };\n\n      // calculate the end time till the msg round\n      let mut last_end_time = CanonicalInstant::new(prior_commit.end_time);\n      for r in 0 ..= msg.round.0 {\n        last_end_time = RoundData::<N>::new(RoundNumber(r), last_end_time).end_time();\n      }\n\n      // verify that the commit was actually invalid\n      if schema.verify(msg.sender, &commit_msg(last_end_time.canonical(), id.as_ref()), sig) {\n        Err(TendermintError::InvalidEvidence)?\n      }\n    }\n    Evidence::InvalidValidRound(msg) => {\n      let msg = decode_and_verify_signed_message::<N>(msg, schema)?.msg;\n\n      let Data::Proposal(Some(vr), _) = &msg.data else { Err(TendermintError::InvalidEvidence)? };\n      if vr.0 < msg.round.0 {\n        Err(TendermintError::InvalidEvidence)?\n      }\n    }\n  }\n  Ok(())\n}\n\n#[derive(Clone, PartialEq, Eq, Debug)]\npub enum SlashEvent {\n  Id(SlashReason, u64, u32),\n  WithEvidence(Evidence),\n}\n\n// Struct for if various upon handlers have been triggered to ensure they don't trigger multiple\n// times.\n#[derive(Clone, PartialEq, Eq, Debug)]\nstruct Upons {\n  upon_prevotes: bool,\n  upon_successful_current_round_prevotes: bool,\n  upon_negative_current_round_prevotes: bool,\n  upon_precommits: bool,\n}\n\n/// A machine executing the Tendermint protocol.\npub struct TendermintMachine<N: Network> {\n  db: N::Db,\n  genesis: [u8; 32],\n\n  network: N,\n  signer: <N::SignatureScheme as SignatureScheme>::Signer,\n  validators: N::SignatureScheme,\n  weights: Arc<N::Weights>,\n\n  queue: VecDeque<MessageFor<N>>,\n  msg_recv: mpsc::UnboundedReceiver<SignedMessageFor<N>>,\n  synced_block_recv: mpsc::UnboundedReceiver<SyncedBlock<N>>,\n  synced_block_result_send: mpsc::UnboundedSender<bool>,\n\n  block: BlockData<N>,\n  // TODO: Move this into the Block struct\n  round_proposals: HashMap<RoundNumber, (Option<RoundNumber>, N::Block)>,\n  // TODO: Move this into the Round struct\n  upons: Upons,\n}\n\npub struct SyncedBlock<N: Network> {\n  pub number: BlockNumber,\n  pub block: <N as Network>::Block,\n  pub commit: Commit<<N as Network>::SignatureScheme>,\n}\n\npub type SyncedBlockSender<N> = mpsc::UnboundedSender<SyncedBlock<N>>;\npub type SyncedBlockResultReceiver = mpsc::UnboundedReceiver<bool>;\n\npub type MessageSender<N> = mpsc::UnboundedSender<SignedMessageFor<N>>;\n\n/// A Tendermint machine and its channel to receive messages from the gossip layer over.\npub struct TendermintHandle<N: Network> {\n  /// Channel to trigger the machine to move to the next block.\n  /// Takes in the the previous block's commit, along with the new proposal.\n  pub synced_block: SyncedBlockSender<N>,\n  /// A channel to communicate the result of a synced_block message.\n  pub synced_block_result: SyncedBlockResultReceiver,\n  /// Channel to send messages received from the P2P layer.\n  pub messages: MessageSender<N>,\n  /// Tendermint machine to be run on an asynchronous task.\n  pub machine: TendermintMachine<N>,\n}\n\nimpl<N: Network + 'static> TendermintMachine<N> {\n  // Broadcast the given piece of data\n  // Tendermint messages always specify their block/round, yet Tendermint only ever broadcasts for\n  // the current block/round. Accordingly, instead of manually fetching those at every call-site,\n  // this function can simply pass the data to the block which can contextualize it\n  fn broadcast(&mut self, data: DataFor<N>) {\n    if let Some(msg) = self.block.message(data) {\n      // Push it on to the queue. This is done so we only handle one message at a time, and so we\n      // can handle our own message before broadcasting it. That way, we fail before before\n      // becoming malicious\n      self.queue.push_back(msg);\n    }\n  }\n\n  // Start a new round. Returns true if we were the proposer\n  fn round(&mut self, round: RoundNumber, time: Option<CanonicalInstant>) -> bool {\n    // Clear upons\n    self.upons = Upons {\n      upon_prevotes: false,\n      upon_successful_current_round_prevotes: false,\n      upon_negative_current_round_prevotes: false,\n      upon_precommits: false,\n    };\n\n    let proposer = self.weights.proposer(self.block.number, round);\n    let res = if let Some(data) = self.block.new_round(round, proposer, time) {\n      self.broadcast(data);\n      true\n    } else {\n      false\n    };\n    log::debug!(\n      target: \"tendermint\",\n      \"proposer for block {}, round {round:?} was {} (me: {res})\",\n      self.block.number.0,\n      hex::encode(proposer.encode()),\n    );\n    res\n  }\n\n  // 53-54\n  async fn reset(&mut self, end_round: RoundNumber, proposal: Option<N::Block>) {\n    // Ensure we have the end time data for the last round\n    self.block.populate_end_time(end_round);\n\n    // Sleep until this round ends\n    let round_end = self.block.end_time[&end_round];\n    let time_until_round_end = round_end.instant().saturating_duration_since(Instant::now());\n    if time_until_round_end == Duration::ZERO {\n      log::trace!(\n        target: \"tendermint\",\n        \"resetting when prior round ended {}ms ago\",\n        Instant::now().saturating_duration_since(round_end.instant()).as_millis(),\n      );\n    }\n    log::trace!(\n      target: \"tendermint\",\n      \"sleeping until round ends in {}ms\",\n      time_until_round_end.as_millis(),\n    );\n    sleep(time_until_round_end).await;\n\n    // Clear the message tape\n    {\n      let mut txn = self.db.txn();\n      txn.del(message_tape_key(self.genesis));\n      txn.commit();\n    }\n\n    // Clear our outbound message queue\n    self.queue = VecDeque::new();\n\n    // Create the new block\n    self.block = BlockData::new(\n      self.db.clone(),\n      self.genesis,\n      self.weights.clone(),\n      BlockNumber(self.block.number.0 + 1),\n      self.signer.validator_id().await,\n      proposal,\n    );\n\n    // Reset the round proposals\n    self.round_proposals = HashMap::new();\n\n    // Start the first round\n    self.round(RoundNumber(0), Some(round_end));\n  }\n\n  async fn reset_by_commit(\n    &mut self,\n    commit: Commit<N::SignatureScheme>,\n    proposal: Option<N::Block>,\n  ) {\n    let mut round = self.block.round().number;\n    // If this commit is for a round we don't have, jump up to it\n    while self.block.end_time[&round].canonical() < commit.end_time {\n      round.0 += 1;\n      self.block.populate_end_time(round);\n    }\n    // If this commit is for a prior round, find it\n    while self.block.end_time[&round].canonical() > commit.end_time {\n      if round.0 == 0 {\n        panic!(\"commit isn't for this machine's next block\");\n      }\n      round.0 -= 1;\n    }\n    debug_assert_eq!(self.block.end_time[&round].canonical(), commit.end_time);\n\n    self.reset(round, proposal).await;\n  }\n\n  async fn slash(&mut self, validator: N::ValidatorId, slash_event: SlashEvent) {\n    // TODO: If the new slash event has evidence, emit to prevent a low-importance slash from\n    // cancelling emission of high-importance slashes\n    if !self.block.slashes.contains(&validator) {\n      log::info!(target: \"tendermint\", \"Slashing validator {}\", hex::encode(validator.encode()));\n      self.block.slashes.insert(validator);\n      self.network.slash(validator, slash_event).await;\n    }\n  }\n\n  fn proposal_for_round(&self, round: RoundNumber) -> Option<(Option<RoundNumber>, &N::Block)> {\n    self.round_proposals.get(&round).map(|(round, block)| (*round, block))\n  }\n\n  // L22-27\n  fn upon_proposal_without_valid_round(&mut self) {\n    if self.block.round().step != Step::Propose {\n      return;\n    }\n\n    // If we have the proposal message...\n    let Some((None, block)) = self.proposal_for_round(self.block.round().number) else {\n      return;\n    };\n\n    // There either needs to not be a locked value or it must be equivalent\n    #[allow(clippy::map_unwrap_or)]\n    if self\n      .block\n      .locked\n      .as_ref()\n      .map(|(_round, locked_block)| block.id() == *locked_block)\n      .unwrap_or(true)\n    {\n      self.broadcast(Data::Prevote(Some(block.id())));\n    } else {\n      self.broadcast(Data::Prevote(None));\n    }\n  }\n\n  // L28-33\n  fn upon_proposal_with_valid_round(&mut self) {\n    if self.block.round().step != Step::Propose {\n      return;\n    }\n\n    // If we have the proposal message...\n    let Some((Some(proposal_valid_round), block)) =\n      self.proposal_for_round(self.block.round().number)\n    else {\n      return;\n    };\n\n    // Check we have the necessary prevotes\n    if !self.block.log.has_consensus(proposal_valid_round, &Data::Prevote(Some(block.id()))) {\n      return;\n    }\n\n    // We don't check valid round < current round as the `message` function does\n\n    // If locked is None, lockedRoundp is -1 and less than valid round\n    #[allow(clippy::map_unwrap_or)]\n    let locked_clause_1 = self\n      .block\n      .locked\n      .as_ref()\n      .map(|(locked_round, _block)| locked_round.0 <= proposal_valid_round.0)\n      .unwrap_or(true);\n    // The second clause is if the locked values are equivalent. If no value is locked, they aren't\n    #[allow(clippy::map_unwrap_or)]\n    let locked_clause_2 = self\n      .block\n      .locked\n      .as_ref()\n      .map(|(_round, locked_block)| block.id() == *locked_block)\n      .unwrap_or(false);\n\n    if locked_clause_1 || locked_clause_2 {\n      self.broadcast(Data::Prevote(Some(block.id())));\n    } else {\n      self.broadcast(Data::Prevote(None));\n    }\n  }\n\n  // L34-35\n  fn upon_prevotes(&mut self) {\n    if self.upons.upon_prevotes || (self.block.round().step != Step::Prevote) {\n      return;\n    }\n\n    if self.block.log.has_participation(self.block.round().number, Step::Prevote) {\n      self.block.round_mut().set_timeout(Step::Prevote);\n      self.upons.upon_prevotes = true;\n    }\n  }\n\n  // L36-43\n  async fn upon_successful_current_round_prevotes(&mut self) {\n    // Returning if `self.step == Step::Propose` is equivalent to guarding `step >= prevote`\n    if self.upons.upon_successful_current_round_prevotes ||\n      (self.block.round().step == Step::Propose)\n    {\n      return;\n    }\n\n    // If we have the proposal message...\n    let Some((_, block)) = self.proposal_for_round(self.block.round().number) else {\n      return;\n    };\n\n    // Check we have the necessary prevotes\n    if !self.block.log.has_consensus(self.block.round().number, &Data::Prevote(Some(block.id()))) {\n      return;\n    }\n\n    let block = block.clone();\n    self.upons.upon_successful_current_round_prevotes = true;\n\n    if self.block.round().step == Step::Prevote {\n      self.block.locked = Some((self.block.round().number, block.id()));\n      let signature = self\n        .signer\n        .sign(&commit_msg(\n          self.block.end_time[&self.block.round().number].canonical(),\n          block.id().as_ref(),\n        ))\n        .await;\n      self.broadcast(Data::Precommit(Some((block.id(), signature))));\n    }\n    self.block.valid = Some((self.block.round().number, block));\n  }\n\n  // L44-46\n  fn upon_negative_current_round_prevotes(&mut self) {\n    if self.upons.upon_negative_current_round_prevotes || (self.block.round().step != Step::Prevote)\n    {\n      return;\n    }\n\n    if self.block.log.has_consensus(self.block.round().number, &Data::Prevote(None)) {\n      self.broadcast(Data::Precommit(None));\n    }\n\n    self.upons.upon_negative_current_round_prevotes = true;\n  }\n\n  // L47-48\n  fn upon_precommits(&mut self) {\n    if self.upons.upon_precommits {\n      return;\n    }\n\n    if self.block.log.has_participation(self.block.round().number, Step::Precommit) {\n      self.block.round_mut().set_timeout(Step::Precommit);\n      self.upons.upon_precommits = true;\n    }\n  }\n\n  // L22-48\n  async fn all_current_round_upons(&mut self) {\n    self.upon_proposal_without_valid_round();\n    self.upon_proposal_with_valid_round();\n    self.upon_prevotes();\n    self.upon_successful_current_round_prevotes().await;\n    self.upon_negative_current_round_prevotes();\n    self.upon_precommits();\n  }\n\n  // L49-54\n  async fn upon_successful_precommits(&mut self, round: RoundNumber) -> bool {\n    // If we have the proposal message...\n    let Some((_, block)) = self.proposal_for_round(round) else { return false };\n\n    // Check we have the necessary precommits\n    // The precommit we check we have consensus upon uses a junk signature since message equality\n    // disregards the signature\n    if !self\n      .block\n      .log\n      .has_consensus(round, &Data::Precommit(Some((block.id(), self.signer.sign(&[]).await))))\n    {\n      return false;\n    }\n\n    // Get all participants in this commit\n    let mut validators = vec![];\n    let mut sigs = vec![];\n    // Get all precommits for this round\n    for (validator, msgs) in &self.block.log.log[&round] {\n      if let Some(signed) = msgs.get(&Step::Precommit) {\n        if let Data::Precommit(Some((id, sig))) = &signed.msg.data {\n          // If this precommit was for this block, include it\n          if *id == block.id() {\n            validators.push(*validator);\n            sigs.push(sig.clone());\n          }\n        }\n      }\n    }\n\n    // Form the commit itself\n    let commit_msg = commit_msg(self.block.end_time[&round].canonical(), block.id().as_ref());\n    let commit = Commit {\n      end_time: self.block.end_time[&round].canonical(),\n      validators: validators.clone(),\n      signature: self.network.signature_scheme().aggregate(&validators, &commit_msg, &sigs),\n    };\n    debug_assert!(self.network.verify_commit(block.id(), &commit));\n\n    // Add the block and reset the machine\n    log::info!(\n      target: \"tendermint\",\n      \"TendermintMachine produced block {}\",\n      hex::encode(block.id().as_ref()),\n    );\n    let id = block.id();\n    let proposal = self.network.add_block(block.clone(), commit).await;\n    log::trace!(\n      target: \"tendermint\",\n      \"added block {} (produced by machine)\",\n      hex::encode(id.as_ref()),\n    );\n    self.reset(round, proposal).await;\n\n    true\n  }\n\n  // L49-54\n  async fn all_any_round_upons(&mut self, round: RoundNumber) -> bool {\n    self.upon_successful_precommits(round).await\n  }\n\n  // Returns Ok(true) if this was a Precommit which had either no signature or its signature\n  // validated\n  // Returns Ok(false) if it wasn't a Precommit or the signature wasn't validated yet\n  // Returns Err if the signature was invalid\n  async fn verify_precommit_signature(\n    &mut self,\n    signed: &SignedMessageFor<N>,\n  ) -> Result<bool, TendermintError> {\n    let msg = &signed.msg;\n    if let Data::Precommit(precommit) = &msg.data {\n      let Some((id, sig)) = precommit else { return Ok(true) };\n      // Also verify the end_time of the commit\n      // Only perform this verification if we already have the end_time\n      // Else, there's a DoS where we receive a precommit for some round infinitely in the future\n      // which forces us to calculate every end time\n      if let Some(end_time) = self.block.end_time.get(&msg.round) {\n        if !self.validators.verify(msg.sender, &commit_msg(end_time.canonical(), id.as_ref()), sig)\n        {\n          log::warn!(target: \"tendermint\", \"validator produced an invalid commit signature\");\n          self\n            .slash(\n              msg.sender,\n              SlashEvent::WithEvidence(Evidence::InvalidPrecommit(signed.encode())),\n            )\n            .await;\n          Err(TendermintError::Malicious)?;\n        }\n        return Ok(true);\n      }\n    }\n    Ok(false)\n  }\n\n  async fn message(&mut self, signed: &SignedMessageFor<N>) -> Result<(), TendermintError> {\n    let msg = &signed.msg;\n    if msg.block != self.block.number {\n      Err(TendermintError::Temporal)?;\n    }\n\n    // If this is a precommit, verify its signature\n    self.verify_precommit_signature(signed).await?;\n\n    // Only let the proposer propose\n    if matches!(msg.data, Data::Proposal(..)) &&\n      (msg.sender != self.weights.proposer(msg.block, msg.round))\n    {\n      log::warn!(target: \"tendermint\", \"validator who wasn't the proposer proposed\");\n      // TODO: This should have evidence\n      self\n        .slash(msg.sender, SlashEvent::Id(SlashReason::InvalidProposer, msg.block.0, msg.round.0))\n        .await;\n      Err(TendermintError::Malicious)?;\n    };\n\n    // If this is a proposal, verify the block\n    // If the block is invalid, drop the message, letting the timeout cover it\n    // This prevents needing to check if valid inside every `upon` block\n    if let Data::Proposal(_, block) = &msg.data {\n      match self.network.validate(block).await {\n        Ok(()) => {}\n        Err(BlockError::Temporal) => {\n          if self.block.round().step == Step::Propose {\n            self.broadcast(Data::Prevote(None));\n          }\n          Err(TendermintError::Temporal)?;\n        }\n        Err(BlockError::Fatal) => {\n          log::warn!(target: \"tendermint\", \"validator proposed a fatally invalid block\");\n          if self.block.round().step == Step::Propose {\n            self.broadcast(Data::Prevote(None));\n          }\n          self\n            .slash(\n              msg.sender,\n              SlashEvent::Id(SlashReason::InvalidBlock, self.block.number.0, msg.round.0),\n            )\n            .await;\n          Err(TendermintError::Malicious)?;\n        }\n      };\n    }\n\n    // If this is a proposal, verify the valid round isn't fundamentally invalid\n    if let Data::Proposal(Some(valid_round), _) = msg.data {\n      if valid_round.0 >= msg.round.0 {\n        log::warn!(\n          target: \"tendermint\",\n          \"proposed proposed with a syntactically invalid valid round\",\n        );\n        if self.block.round().step == Step::Propose {\n          self.broadcast(Data::Prevote(None));\n        }\n        self\n          .slash(msg.sender, SlashEvent::WithEvidence(Evidence::InvalidValidRound(msg.encode())))\n          .await;\n        Err(TendermintError::Malicious)?;\n      }\n    }\n\n    // Add it to the log, returning if it was already handled\n    match self.block.log.log(signed.clone()) {\n      Ok(true) => {}\n      Ok(false) => Err(TendermintError::AlreadyHandled)?,\n      Err(evidence) => {\n        self.slash(msg.sender, SlashEvent::WithEvidence(evidence)).await;\n        Err(TendermintError::Malicious)?;\n      }\n    }\n    log::debug!(\n      target: \"tendermint\",\n      \"received new tendermint message (block: {}, round: {}, step: {:?})\",\n      msg.block.0,\n      msg.round.0,\n      msg.data.step(),\n    );\n\n    // If this is a proposal, insert it\n    if let Data::Proposal(vr, block) = &msg.data {\n      self.round_proposals.insert(msg.round, (*vr, block.clone()));\n    }\n\n    // L55-56\n    // Jump ahead if we should\n    if (msg.round.0 > self.block.round().number.0) &&\n      (self.block.log.round_participation(msg.round) >= self.weights.fault_threshold())\n    {\n      log::debug!(\n        target: \"tendermint\",\n        \"jumping from round {} to round {}\",\n        self.block.round().number.0,\n        msg.round.0,\n      );\n\n      // Jump to the new round.\n      let old_round = self.block.round().number;\n      self.round(msg.round, None);\n\n      // If any jumped over/to round already has precommit messages, verify their signatures\n      for jumped in (old_round.0 + 1) ..= msg.round.0 {\n        let jumped = RoundNumber(jumped);\n        let round_msgs = self.block.log.log.get(&jumped).cloned().unwrap_or_default();\n        for (validator, msgs) in &round_msgs {\n          if let Some(existing) = msgs.get(&Step::Precommit) {\n            if let Ok(res) = self.verify_precommit_signature(existing).await {\n              // Ensure this actually verified the signature instead of believing it shouldn't yet\n              assert!(res);\n            } else {\n              // Remove the message so it isn't counted towards forming a commit/included in one\n              // This won't remove the fact they precommitted for this block hash in the MessageLog\n              // TODO: Don't even log these in the first place until we jump, preventing needing\n              // to do this in the first place\n              self\n                .block\n                .log\n                .log\n                .get_mut(&jumped)\n                .unwrap()\n                .get_mut(validator)\n                .unwrap()\n                .remove(&Step::Precommit)\n                .unwrap();\n            }\n          }\n        }\n      }\n    }\n\n    // Now that we've jumped, and:\n    // 1) If this is a message for an old round, verified the precommit signatures\n    // 2) If this is a message for what was the current round, verified the precommit signatures\n    // 3) If this is a message for what was a future round, verified the precommit signatures if it\n    //    has 34+% participation\n    // Run all `upons` run for any round, which may produce a Commit if it has 67+% participation\n    // (returning true if it does, letting us return now)\n    // It's necessary to verify the precommit signatures before Commit production is allowed, hence\n    // this specific flow\n    if self.all_any_round_upons(msg.round).await {\n      return Ok(());\n    }\n\n    // If this is a historic round, or a future round without sufficient participation, return\n    if msg.round.0 != self.block.round().number.0 {\n      return Ok(());\n    }\n    // msg.round is now guaranteed to be equal to self.block.round().number\n    debug_assert_eq!(msg.round, self.block.round().number);\n\n    // Run all `upons` run for the current round\n    self.all_current_round_upons().await;\n\n    Ok(())\n  }\n\n  /// Create a new Tendermint machine, from the specified point, with the specified block as the\n  /// one to propose next. This will return a channel to send messages from the gossip layer and\n  /// the machine itself. The machine should have `run` called from an asynchronous task.\n  #[allow(clippy::new_ret_no_self)]\n  pub async fn new(\n    db: N::Db,\n    network: N,\n    genesis: [u8; 32],\n    last_block: BlockNumber,\n    last_time: u64,\n    proposal: N::Block,\n  ) -> TendermintHandle<N> {\n    let (msg_send, msg_recv) = mpsc::unbounded();\n    let (synced_block_send, synced_block_recv) = mpsc::unbounded();\n    let (synced_block_result_send, synced_block_result_recv) = mpsc::unbounded();\n    TendermintHandle {\n      synced_block: synced_block_send,\n      synced_block_result: synced_block_result_recv,\n      messages: msg_send,\n      machine: {\n        let now = SystemTime::now();\n        let sys_time = sys_time(last_time);\n        let mut negative = false;\n        let time_until = sys_time.duration_since(now).unwrap_or_else(|_| {\n          negative = true;\n          now.duration_since(sys_time).unwrap_or(Duration::ZERO)\n        });\n        log::info!(\n          target: \"tendermint\",\n          \"new TendermintMachine building off block {} is scheduled to start in {}{}s\",\n          last_block.0,\n          if negative { \"-\" } else { \"\" },\n          time_until.as_secs(),\n        );\n\n        // If the last block hasn't ended yet, sleep until it has\n        if !negative {\n          sleep(time_until).await;\n        }\n\n        let signer = network.signer();\n        let validators = network.signature_scheme();\n        let weights = Arc::new(network.weights());\n        let validator_id = signer.validator_id().await;\n        // L01-10\n        let mut machine = TendermintMachine {\n          db: db.clone(),\n          genesis,\n\n          network,\n          signer,\n          validators,\n          weights: weights.clone(),\n\n          queue: VecDeque::new(),\n          msg_recv,\n          synced_block_recv,\n          synced_block_result_send,\n\n          block: BlockData::new(\n            db,\n            genesis,\n            weights,\n            BlockNumber(last_block.0 + 1),\n            validator_id,\n            Some(proposal),\n          ),\n\n          round_proposals: HashMap::new(),\n\n          upons: Upons {\n            upon_prevotes: false,\n            upon_successful_current_round_prevotes: false,\n            upon_negative_current_round_prevotes: false,\n            upon_precommits: false,\n          },\n        };\n\n        // The end time of the last block is the start time for this one\n        // The Commit explicitly contains the end time, so loading the last commit will provide\n        // this. The only exception is for the genesis block, which doesn't have a commit\n        // Using the genesis time in place will cause this block to be created immediately\n        // after it, without the standard amount of separation (so their times will be\n        // equivalent or minimally offset)\n        // For callers wishing to avoid this, they should pass (0, GENESIS + N::block_time())\n        machine.round(RoundNumber(0), Some(CanonicalInstant::new(last_time)));\n        machine\n      },\n    }\n  }\n\n  pub async fn run(mut self) {\n    log::debug!(target: \"tendermint\", \"running TendermintMachine\");\n\n    let mut rebroadcast_future = Box::pin(sleep(Duration::from_secs(60))).fuse();\n    loop {\n      // Also create a future for if the queue has a message\n      // Does not pop_front as if another message has higher priority, its future will be handled\n      // instead in this loop, and the popped value would be dropped with the next iteration\n      let mut queue_future =\n        if self.queue.is_empty() { Fuse::terminated() } else { future::ready(()).fuse() };\n\n      if let Some((our_message, msg, mut sig)) = futures_util::select_biased! {\n        // Handle a new block occurring externally (from an external sync loop)\n        // Has the highest priority as it makes all other futures here irrelevant\n        msg = self.synced_block_recv.next() => {\n          if let Some(SyncedBlock { number, block, commit }) = msg {\n            // Commit is for a block we've already moved past\n            if number != self.block.number {\n              self.synced_block_result_send.send(false).await.unwrap();\n              continue;\n            }\n\n            // Commit is invalid\n            if !self.network.verify_commit(block.id(), &commit) {\n              self.synced_block_result_send.send(false).await.unwrap();\n              continue;\n            }\n\n            log::debug!(\n              target: \"tendermint\",\n              \"TendermintMachine received a block from the external sync loop\",\n            );\n            let proposal = self.network.add_block(block, commit.clone()).await;\n            self.reset_by_commit(commit, proposal).await;\n            self.synced_block_result_send.send(true).await.unwrap();\n            None\n          } else {\n            break;\n          }\n        },\n\n        // Handle our messages\n        () = queue_future => {\n          Some((true, self.queue.pop_front().unwrap(), None))\n        },\n\n        // L57-67\n        // Handle any timeouts\n        step = self.block.round().timeout_future().fuse() => {\n          // Remove the timeout so it doesn't persist, always being the selected future due to bias\n          // While this does enable the timeout to be entered again, the timeout setting code will\n          // never attempt to add a timeout after its timeout has expired\n          // (due to it setting an `upon` boolean)\n          self.block.round_mut().timeouts.remove(&step);\n\n          match step {\n            Step::Propose => {\n              // Only run if it's still the step in question\n              if self.block.round().step == step {\n                // Slash the validator for not proposing when they should've\n                log::debug!(target: \"tendermint\", \"validator didn't propose when they should have\");\n                // this slash will be voted on.\n                self.slash(\n                  self.weights.proposer(self.block.number, self.block.round().number),\n                  SlashEvent::Id(\n                    SlashReason::FailToPropose,\n                    self.block.number.0,\n                    self.block.round().number.0\n                  ),\n                ).await;\n                self.broadcast(Data::Prevote(None));\n              }\n            },\n            Step::Prevote => {\n              // Only run if it's still the step in question\n              if self.block.round().step == step {\n                self.broadcast(Data::Precommit(None))\n              }\n            },\n            Step::Precommit => {\n              self.round(RoundNumber(self.block.round().number.0 + 1), None);\n            }\n          };\n\n          // Execute the upons now that the state has changed\n          self.all_any_round_upons(self.block.round().number).await;\n          self.all_current_round_upons().await;\n\n          None\n        },\n\n        // If it's been more than 60s, rebroadcast our own messages\n        () = rebroadcast_future => {\n          log::trace!(\"rebroadcast future hit within tendermint machine\");\n          let key = message_tape_key(self.genesis);\n          let messages = self.db.get(key).unwrap_or(vec![]);\n          let mut messages = messages.as_slice();\n\n          while !messages.is_empty() {\n            self.network.broadcast(\n              SignedMessageFor::<N>::decode(&mut IoReader(&mut messages))\n                .expect(\"saved invalid message to DB\")\n            ).await;\n          }\n\n          // Reset the rebroadcast future\n          rebroadcast_future = Box::pin(sleep(core::time::Duration::from_secs(60))).fuse();\n\n          None\n        },\n\n        // Handle any received messages\n        msg = self.msg_recv.next() => {\n          if let Some(msg) = msg {\n            if !msg.verify_signature(&self.validators) {\n              continue;\n            }\n            Some((false, msg.msg, Some(msg.sig)))\n          } else {\n            break;\n          }\n        }\n      } {\n        if our_message {\n          assert!(sig.is_none());\n          sig = Some(self.signer.sign(&msg.encode()).await);\n        }\n        let sig = sig.unwrap();\n\n        let signed_msg = SignedMessage { msg: msg.clone(), sig: sig.clone() };\n        let res = self.message(&signed_msg).await;\n        // If this is our message, and we hit an invariant, we could be slashed.\n        // We only broadcast our message after running it ourselves, to ensure it doesn't error, to\n        // ensure we don't get slashed on invariants.\n        if res.is_err() && our_message {\n          panic!(\"honest node (ourselves) had invalid behavior\");\n        }\n\n        // Save this message to a linear tape of all our messages for this block, if ours\n        // TODO: Since we do this after we mark this message as sent to prevent equivocations, a\n        // precisely time reboot could cause this message marked as sent yet not added to the tape\n        if our_message {\n          let message_tape_key = message_tape_key(self.genesis);\n          let mut txn = self.db.txn();\n          let mut message_tape = txn.get(&message_tape_key).unwrap_or(vec![]);\n          message_tape.extend(signed_msg.encode());\n          txn.put(&message_tape_key, message_tape);\n          txn.commit();\n        }\n\n        // Re-broadcast this since it's an original consensus message worth handling\n        if res.is_ok() {\n          self.network.broadcast(signed_msg).await;\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "coordinator/tributary/tendermint/src/message_log.rs",
    "content": "use std::{sync::Arc, collections::HashMap};\n\nuse parity_scale_codec::Encode;\n\nuse crate::{ext::*, RoundNumber, Step, DataFor, SignedMessageFor, Evidence};\n\ntype RoundLog<N> = HashMap<<N as Network>::ValidatorId, HashMap<Step, SignedMessageFor<N>>>;\npub(crate) struct MessageLog<N: Network> {\n  weights: Arc<N::Weights>,\n  round_participation: HashMap<RoundNumber, u64>,\n  participation: HashMap<(RoundNumber, Step), u64>,\n  message_instances: HashMap<(RoundNumber, DataFor<N>), u64>,\n  pub(crate) log: HashMap<RoundNumber, RoundLog<N>>,\n}\n\nimpl<N: Network> MessageLog<N> {\n  pub(crate) fn new(weights: Arc<N::Weights>) -> MessageLog<N> {\n    MessageLog {\n      weights,\n      round_participation: HashMap::new(),\n      participation: HashMap::new(),\n      message_instances: HashMap::new(),\n      log: HashMap::new(),\n    }\n  }\n\n  // Returns true if it's a new message\n  pub(crate) fn log(&mut self, signed: SignedMessageFor<N>) -> Result<bool, Evidence> {\n    let msg = &signed.msg;\n    // Clarity, and safety around default != new edge cases\n    let round = self.log.entry(msg.round).or_insert_with(HashMap::new);\n    let msgs = round.entry(msg.sender).or_insert_with(HashMap::new);\n\n    // Handle message replays without issue. It's only multiple messages which is malicious\n    let step = msg.data.step();\n    if let Some(existing) = msgs.get(&step) {\n      if existing.msg.data != msg.data {\n        log::debug!(\n          target: \"tendermint\",\n          \"Validator sent multiple messages for the same block + round + step\"\n        );\n        Err(Evidence::ConflictingMessages(existing.encode(), signed.encode()))?;\n      }\n      return Ok(false);\n    }\n\n    // Since we have a new message, update the participation\n    let sender_weight = self.weights.weight(msg.sender);\n    if msgs.is_empty() {\n      *self.round_participation.entry(msg.round).or_insert_with(|| 0) += sender_weight;\n    }\n    *self.participation.entry((msg.round, step)).or_insert_with(|| 0) += sender_weight;\n    *self.message_instances.entry((msg.round, msg.data.clone())).or_insert_with(|| 0) +=\n      sender_weight;\n\n    msgs.insert(step, signed);\n    Ok(true)\n  }\n\n  // Get the participation in a given round\n  pub(crate) fn round_participation(&self, round: RoundNumber) -> u64 {\n    *self.round_participation.get(&round).unwrap_or(&0)\n  }\n\n  // Check if a supermajority of nodes have participated on a specific step\n  pub(crate) fn has_participation(&self, round: RoundNumber, step: Step) -> bool {\n    *self.participation.get(&(round, step)).unwrap_or(&0) >= self.weights.threshold()\n  }\n\n  // Check if consensus has been reached on a specific piece of data\n  pub(crate) fn has_consensus(&self, round: RoundNumber, data: &DataFor<N>) -> bool {\n    *self.message_instances.get(&(round, data.clone())).unwrap_or(&0) >= self.weights.threshold()\n  }\n}\n"
  },
  {
    "path": "coordinator/tributary/tendermint/src/round.rs",
    "content": "use std::{\n  marker::PhantomData,\n  time::{Duration, Instant},\n  collections::HashMap,\n};\n\nuse futures_util::{FutureExt, future};\nuse patchable_async_sleep::sleep;\n\nuse crate::{\n  time::CanonicalInstant,\n  Step,\n  ext::{RoundNumber, Network},\n};\n\npub struct RoundData<N: Network> {\n  _network: PhantomData<N>,\n  pub number: RoundNumber,\n  pub start_time: CanonicalInstant,\n  pub step: Step,\n  pub timeouts: HashMap<Step, Instant>,\n}\n\nimpl<N: Network> RoundData<N> {\n  pub fn new(number: RoundNumber, start_time: CanonicalInstant) -> Self {\n    RoundData {\n      _network: PhantomData,\n      number,\n      start_time,\n      step: Step::Propose,\n      timeouts: HashMap::new(),\n    }\n  }\n\n  fn timeout(&self, step: Step) -> CanonicalInstant {\n    let adjusted_block = N::BLOCK_PROCESSING_TIME * (self.number.0 + 1);\n    let adjusted_latency = N::LATENCY_TIME * (self.number.0 + 1);\n    let offset = Duration::from_millis(\n      (match step {\n        Step::Propose => adjusted_block + adjusted_latency,\n        Step::Prevote => adjusted_block + (2 * adjusted_latency),\n        Step::Precommit => adjusted_block + (3 * adjusted_latency),\n      })\n      .into(),\n    );\n    self.start_time + offset\n  }\n\n  pub fn end_time(&self) -> CanonicalInstant {\n    self.timeout(Step::Precommit)\n  }\n\n  pub(crate) fn set_timeout(&mut self, step: Step) {\n    let timeout = self.timeout(step).instant();\n    self.timeouts.entry(step).or_insert(timeout);\n  }\n\n  // Poll all set timeouts, returning the Step whose timeout has just expired\n  pub(crate) async fn timeout_future(&self) -> Step {\n    /*\n    let now = Instant::now();\n    log::trace!(\n      target: \"tendermint\",\n      \"getting timeout_future, from step {:?}, off timeouts: {:?}\",\n      self.step,\n      self.timeouts.iter().map(|(k, v)| (k, v.duration_since(now))).collect::<HashMap<_, _>>()\n    );\n    */\n\n    let timeout_future = |step| {\n      let timeout = self.timeouts.get(&step).copied();\n      (async move {\n        if let Some(timeout) = timeout {\n          sleep(timeout.saturating_duration_since(Instant::now())).await;\n        } else {\n          future::pending::<()>().await;\n        }\n        step\n      })\n      .fuse()\n    };\n    let propose_timeout = timeout_future(Step::Propose);\n    let prevote_timeout = timeout_future(Step::Prevote);\n    let precommit_timeout = timeout_future(Step::Precommit);\n    futures_util::pin_mut!(propose_timeout, prevote_timeout, precommit_timeout);\n\n    futures_util::select_biased! {\n      step = propose_timeout => step,\n      step = prevote_timeout => step,\n      step = precommit_timeout => step,\n    }\n  }\n}\n"
  },
  {
    "path": "coordinator/tributary/tendermint/src/time.rs",
    "content": "use core::ops::Add;\nuse std::time::{UNIX_EPOCH, SystemTime, Instant, Duration};\n\n#[derive(Clone, Copy, PartialEq, Eq, Debug)]\npub struct CanonicalInstant {\n  /// Time since the epoch.\n  time: u64,\n  /// An Instant synchronized with the above time.\n  instant: Instant,\n}\n\npub(crate) fn sys_time(time: u64) -> SystemTime {\n  UNIX_EPOCH + Duration::from_secs(time)\n}\n\nimpl CanonicalInstant {\n  pub fn new(time: u64) -> CanonicalInstant {\n    // This is imprecise yet should be precise enough, as it'll resolve within a few ms\n    let instant_now = Instant::now();\n    let sys_now = SystemTime::now();\n\n    // If the time is in the future, this will be off by that much time\n    let elapsed = sys_now.duration_since(sys_time(time)).unwrap_or(Duration::ZERO);\n    // Except for the fact this panics here\n    let synced_instant = instant_now.checked_sub(elapsed).unwrap();\n\n    CanonicalInstant { time, instant: synced_instant }\n  }\n\n  pub fn canonical(&self) -> u64 {\n    self.time\n  }\n\n  pub fn instant(&self) -> Instant {\n    self.instant\n  }\n}\n\nimpl Add<Duration> for CanonicalInstant {\n  type Output = CanonicalInstant;\n  fn add(self, duration: Duration) -> CanonicalInstant {\n    CanonicalInstant { time: self.time + duration.as_secs(), instant: self.instant + duration }\n  }\n}\n"
  },
  {
    "path": "coordinator/tributary/tendermint/tests/ext.rs",
    "content": "use std::{\n  sync::Arc,\n  time::{UNIX_EPOCH, SystemTime, Duration},\n};\n\nuse async_trait::async_trait;\n\nuse parity_scale_codec::{Encode, Decode};\n\nuse futures_util::sink::SinkExt;\nuse tokio::{sync::RwLock, time::sleep};\n\nuse serai_db::MemDb;\n\nuse tendermint_machine::{\n  ext::*, SignedMessageFor, SyncedBlockSender, SyncedBlockResultReceiver, MessageSender,\n  SlashEvent, TendermintMachine, TendermintHandle,\n};\n\ntype TestValidatorId = u16;\ntype TestBlockId = [u8; 4];\n\nstruct TestSigner(u16);\n#[async_trait]\nimpl Signer for TestSigner {\n  type ValidatorId = TestValidatorId;\n  type Signature = [u8; 32];\n\n  async fn validator_id(&self) -> Option<TestValidatorId> {\n    Some(self.0)\n  }\n\n  async fn sign(&self, msg: &[u8]) -> [u8; 32] {\n    let mut sig = [0; 32];\n    sig[.. 2].copy_from_slice(&self.0.to_le_bytes());\n    sig[2 .. (2 + 30.min(msg.len()))].copy_from_slice(&msg[.. 30.min(msg.len())]);\n    sig\n  }\n}\n\n#[derive(Clone)]\nstruct TestSignatureScheme;\nimpl SignatureScheme for TestSignatureScheme {\n  type ValidatorId = TestValidatorId;\n  type Signature = [u8; 32];\n  type AggregateSignature = Vec<[u8; 32]>;\n  type Signer = TestSigner;\n\n  #[must_use]\n  fn verify(&self, validator: u16, msg: &[u8], sig: &[u8; 32]) -> bool {\n    (sig[.. 2] == validator.to_le_bytes()) && (sig[2 ..] == [msg, &[0; 30]].concat()[.. 30])\n  }\n\n  fn aggregate(\n    &self,\n    _: &[Self::ValidatorId],\n    _: &[u8],\n    sigs: &[Self::Signature],\n  ) -> Self::AggregateSignature {\n    sigs.to_vec()\n  }\n\n  #[must_use]\n  fn verify_aggregate(\n    &self,\n    signers: &[TestValidatorId],\n    msg: &[u8],\n    sigs: &Vec<[u8; 32]>,\n  ) -> bool {\n    assert_eq!(signers.len(), sigs.len());\n    for sig in signers.iter().zip(sigs.iter()) {\n      assert!(self.verify(*sig.0, msg, sig.1));\n    }\n    true\n  }\n}\n\nstruct TestWeights;\nimpl Weights for TestWeights {\n  type ValidatorId = TestValidatorId;\n\n  fn total_weight(&self) -> u64 {\n    4\n  }\n  fn weight(&self, id: TestValidatorId) -> u64 {\n    [1; 4][usize::from(id)]\n  }\n\n  fn proposer(&self, number: BlockNumber, round: RoundNumber) -> TestValidatorId {\n    TestValidatorId::try_from((number.0 + u64::from(round.0)) % 4).unwrap()\n  }\n}\n\n#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)]\nstruct TestBlock {\n  id: TestBlockId,\n  valid: Result<(), BlockError>,\n}\n\nimpl Block for TestBlock {\n  type Id = TestBlockId;\n\n  fn id(&self) -> TestBlockId {\n    self.id\n  }\n}\n\n#[allow(clippy::type_complexity)]\nstruct TestNetwork(\n  u16,\n  Arc<RwLock<Vec<(MessageSender<Self>, SyncedBlockSender<Self>, SyncedBlockResultReceiver)>>>,\n);\n\n#[async_trait]\nimpl Network for TestNetwork {\n  type Db = MemDb;\n\n  type ValidatorId = TestValidatorId;\n  type SignatureScheme = TestSignatureScheme;\n  type Weights = TestWeights;\n  type Block = TestBlock;\n\n  const BLOCK_PROCESSING_TIME: u32 = 2000;\n  const LATENCY_TIME: u32 = 1000;\n\n  fn signer(&self) -> TestSigner {\n    TestSigner(self.0)\n  }\n\n  fn signature_scheme(&self) -> TestSignatureScheme {\n    TestSignatureScheme\n  }\n\n  fn weights(&self) -> TestWeights {\n    TestWeights\n  }\n\n  async fn broadcast(&mut self, msg: SignedMessageFor<Self>) {\n    for (messages, _, _) in self.1.write().await.iter_mut() {\n      messages.send(msg.clone()).await.unwrap();\n    }\n  }\n\n  async fn slash(&mut self, id: TestValidatorId, event: SlashEvent) {\n    println!(\"Slash for {id} due to {event:?}\");\n  }\n\n  async fn validate(&self, block: &TestBlock) -> Result<(), BlockError> {\n    block.valid\n  }\n\n  async fn add_block(\n    &mut self,\n    block: TestBlock,\n    commit: Commit<TestSignatureScheme>,\n  ) -> Option<TestBlock> {\n    println!(\"Adding {:?}\", &block);\n    assert!(block.valid.is_ok());\n    assert!(self.verify_commit(block.id(), &commit));\n    Some(TestBlock { id: (u32::from_le_bytes(block.id) + 1).to_le_bytes(), valid: Ok(()) })\n  }\n}\n\nimpl TestNetwork {\n  async fn new(\n    validators: usize,\n    start_time: u64,\n  ) -> Arc<RwLock<Vec<(MessageSender<Self>, SyncedBlockSender<Self>, SyncedBlockResultReceiver)>>>\n  {\n    let arc = Arc::new(RwLock::new(vec![]));\n    {\n      let mut write = arc.write().await;\n      for i in 0 .. validators {\n        let i = u16::try_from(i).unwrap();\n        let TendermintHandle { messages, synced_block, synced_block_result, machine } =\n          TendermintMachine::new(\n            MemDb::new(),\n            TestNetwork(i, arc.clone()),\n            [0; 32],\n            BlockNumber(1),\n            start_time,\n            TestBlock { id: 1u32.to_le_bytes(), valid: Ok(()) },\n          )\n          .await;\n        tokio::spawn(machine.run());\n        write.push((messages, synced_block, synced_block_result));\n      }\n    }\n    arc\n  }\n}\n\n#[tokio::test]\nasync fn test_machine() {\n  TestNetwork::new(4, SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs()).await;\n  sleep(Duration::from_secs(30)).await;\n}\n\n#[tokio::test]\nasync fn test_machine_with_historic_start_time() {\n  TestNetwork::new(4, SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs() - 60).await;\n  sleep(Duration::from_secs(30)).await;\n}\n"
  },
  {
    "path": "crypto/ciphersuite/Cargo.toml",
    "content": "[package]\nname = \"ciphersuite\"\nversion = \"0.4.2\"\ndescription = \"Ciphersuites built around ff/group\"\nlicense = \"MIT\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/crypto/ciphersuite\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nkeywords = [\"ciphersuite\", \"ff\", \"group\"]\nedition = \"2021\"\nrust-version = \"1.66\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nstd-shims = { path = \"../../common/std-shims\", version = \"^0.1.1\", default-features = false, optional = true }\n\nrand_core = { version = \"0.6\", default-features = false }\n\nzeroize = { version = \"^1.5\", default-features = false, features = [\"derive\"] }\nsubtle = { version = \"^2.4\", default-features = false }\n\ndigest = { version = \"0.10\", default-features = false, features = [\"core-api\"] }\ntranscript = { package = \"flexible-transcript\", path = \"../transcript\", version = \"^0.3.2\", default-features = false }\n\nff = { version = \"0.13\", default-features = false, features = [\"bits\"] }\ngroup = { version = \"0.13\", default-features = false }\n\n[dev-dependencies]\nhex = { version = \"0.4\", default-features = false, features = [\"std\"] }\n\nrand_core = { version = \"0.6\", default-features = false, features = [\"std\"] }\n\nff-group-tests = { version = \"0.13\", path = \"../ff-group-tests\" }\n\n[features]\nalloc = [\"std-shims\", \"ff/alloc\"]\nstd = [\n  \"std-shims/std\",\n\n  \"rand_core/std\",\n\n  \"zeroize/std\",\n  \"subtle/std\",\n\n  \"digest/std\",\n  \"transcript/std\",\n\n  \"ff/std\",\n]\n\ndefault = [\"std\"]\n"
  },
  {
    "path": "crypto/ciphersuite/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2021-2023 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "crypto/ciphersuite/README.md",
    "content": "# Ciphersuite\n\nCiphersuites for elliptic curves premised on ff/group.\n\nThis library, except for the not recommended Ed448 ciphersuite, was\n[audited by Cypher Stack in March 2023](https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf),\nculminating in commit\n[669d2dbffc1dafb82a09d9419ea182667115df06](https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06).\nAny subsequent changes have not undergone auditing.\n\nThis library is usable under no_std. The `alloc` and `std` features enable\nreading from the `io::Read` trait, shimmed by `std-shims` under `alloc`.\n\n### Secp256k1/P-256\n\nSecp256k1 and P-256 are offered via [k256](https://crates.io/crates/k256) and\n[p256](https://crates.io/crates/p256), two libraries maintained by\n[RustCrypto](https://github.com/RustCrypto).\n\nTheir `hash_to_F` is the\n[IETF's hash to curve](https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html),\nyet applied to their scalar field.\n\nPlease see the [`ciphersuite-kp256`](https://docs.rs/ciphersuite-kp256) crate for more info.\n\n### Ed25519/Ristretto\n\nEd25519/Ristretto are offered via\n[dalek-ff-group](https://crates.io/crates/dalek-ff-group), an ff/group wrapper\naround [curve25519-dalek](https://crates.io/crates/curve25519-dalek).\n\nTheir `hash_to_F` is the wide reduction of SHA2-512, as used in\n[RFC-8032](https://www.rfc-editor.org/rfc/rfc8032). This is also compliant with\nthe draft\n[RFC-RISTRETTO](https://www.ietf.org/archive/id/draft-irtf-cfrg-ristretto255-decaf448-05.html).\nThe domain-separation tag is naively prefixed to the message.\n\nPlease see the [`dalek-ff-group`](https://docs.rs/dalek-ff-group) crate for more info.\n\n### Ed448\n\nEd448 is offered via [minimal-ed448](https://crates.io/crates/minimal-ed448), an\nexplicitly not recommended, unaudited, incomplete Ed448 implementation, limited\nto its prime-order subgroup.\n\nIts `hash_to_F` is the wide reduction of SHAKE256, with a 114-byte output, as\nused in [RFC-8032](https://www.rfc-editor.org/rfc/rfc8032). The\ndomain-separation tag is naively prefixed to the message.\n\nPlease see the [`minimal-ed448`](https://docs.rs/minimal-ed448) crate for more info.\n"
  },
  {
    "path": "crypto/ciphersuite/kp256/Cargo.toml",
    "content": "[package]\nname = \"ciphersuite-kp256\"\nversion = \"0.4.0\"\ndescription = \"Ciphersuites built around ff/group\"\nlicense = \"MIT\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/crypto/ciphersuite/kp256\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nkeywords = [\"ciphersuite\", \"ff\", \"group\"]\nedition = \"2021\"\nrust-version = \"1.66\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nrand_core = { version = \"0.6\", default-features = false }\n\nzeroize = { version = \"^1.5\", default-features = false, features = [\"derive\"] }\n\nsha2 = { version = \"0.10\", default-features = false }\n\nelliptic-curve = { version = \"0.13\", default-features = false, features = [\"hash2curve\"] }\np256 = { version = \"^0.13.1\", default-features = false, features = [\"arithmetic\", \"bits\", \"hash2curve\"] }\nk256 = { version = \"^0.13.1\", default-features = false, features = [\"arithmetic\", \"bits\", \"hash2curve\"] }\n\nciphersuite = { path = \"../\", version = \"0.4\", default-features = false }\n\n[dev-dependencies]\nhex = { version = \"0.4\", default-features = false, features = [\"std\"] }\n\nrand_core = { version = \"0.6\", default-features = false, features = [\"std\"] }\n\nff-group-tests = { version = \"0.13\", path = \"../../ff-group-tests\" }\n\n[features]\nalloc = [\"ciphersuite/alloc\"]\nstd = [\n  \"rand_core/std\",\n\n  \"zeroize/std\",\n\n  \"sha2/std\",\n\n  \"elliptic-curve/std\",\n  \"p256/std\",\n  \"k256/std\",\n\n  \"ciphersuite/std\",\n]\n\ndefault = [\"std\"]\n"
  },
  {
    "path": "crypto/ciphersuite/kp256/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2021-2023 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "crypto/ciphersuite/kp256/README.md",
    "content": "# Ciphersuite {k, p}256\n\nSECP256k1 and P-256 Ciphersuites around k256 and p256.\n"
  },
  {
    "path": "crypto/ciphersuite/kp256/src/lib.rs",
    "content": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![cfg_attr(not(feature = \"std\"), no_std)]\n\nuse zeroize::Zeroize;\n\nuse sha2::Sha256;\n\nuse elliptic_curve::{\n  generic_array::GenericArray,\n  bigint::{NonZero, CheckedAdd, Encoding, U384},\n  hash2curve::{Expander, ExpandMsg, ExpandMsgXmd},\n};\n\nuse ciphersuite::{group::ff::PrimeField, Ciphersuite};\n\nmacro_rules! kp_curve {\n  (\n    $feature: literal,\n    $lib:     ident,\n\n    $Ciphersuite: ident,\n    $ID:          literal\n  ) => {\n    impl Ciphersuite for $Ciphersuite {\n      type F = $lib::Scalar;\n      type G = $lib::ProjectivePoint;\n      type H = Sha256;\n\n      const ID: &'static [u8] = $ID;\n\n      fn generator() -> Self::G {\n        $lib::ProjectivePoint::GENERATOR\n      }\n\n      fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F {\n        // While one of these two libraries does support directly hashing to the Scalar field, the\n        // other doesn't. While that's probably an oversight, this is a universally working method\n\n        // This method is from\n        // https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html\n        // Specifically, Section 5\n\n        // While that draft, overall, is intended for hashing to curves, that necessitates\n        // detailing how to hash to a finite field. The draft comments that its mechanism for\n        // doing so, which it uses to derive field elements, is also applicable to the scalar field\n\n        // The hash_to_field function is intended to provide unbiased values\n        // In order to do so, a wide reduction from an extra k bits is applied, minimizing bias to\n        // 2^-k\n        // k is intended to be the bits of security of the suite, which is 128 for secp256k1 and\n        // P-256\n        const K: usize = 128;\n\n        // L is the amount of bytes of material which should be used in the wide reduction\n        // The 256 is for the bit-length of the primes, rounded up to the nearest byte threshold\n        // This is a simplification of the formula from the end of section 5\n        const L: usize = (256 + K) / 8; // 48\n\n        // In order to perform this reduction, we need to use 48-byte numbers\n        // First, convert the modulus to a 48-byte number\n        // This is done by getting -1 as bytes, parsing it into a U384, and then adding back one\n        let mut modulus = [0; L];\n        // The byte repr of scalars will be 32 big-endian bytes\n        // Set the lower 32 bytes of our 48-byte array accordingly\n        modulus[16 ..].copy_from_slice(&(Self::F::ZERO - Self::F::ONE).to_bytes());\n        // Use a checked_add + unwrap since this addition cannot fail (being a 32-byte value with\n        // 48-bytes of space)\n        // While a non-panicking saturating_add/wrapping_add could be used, they'd likely be less\n        // performant\n        let modulus = U384::from_be_slice(&modulus).checked_add(&U384::ONE).unwrap();\n\n        // The defined P-256 and secp256k1 ciphersuites both use expand_message_xmd\n        let mut wide = U384::from_be_bytes({\n          let mut bytes = [0; 48];\n          ExpandMsgXmd::<Sha256>::expand_message(&[msg], &[dst], 48)\n            .unwrap()\n            .fill_bytes(&mut bytes);\n          bytes\n        })\n        .rem(&NonZero::new(modulus).unwrap())\n        .to_be_bytes();\n\n        // Now that this has been reduced back to a 32-byte value, grab the lower 32-bytes\n        let mut array = *GenericArray::from_slice(&wide[16 ..]);\n        let res = $lib::Scalar::from_repr(array).unwrap();\n\n        // Zeroize the temp values we can due to the possibility hash_to_F is being used for nonces\n        wide.zeroize();\n        array.zeroize();\n        res\n      }\n    }\n  };\n}\n\n#[cfg(test)]\nfn test_oversize_dst<C: Ciphersuite>() {\n  use sha2::Digest;\n\n  // The draft specifies DSTs >255 bytes should be hashed into a 32-byte DST\n  let oversize_dst = [0x00; 256];\n  let actual_dst = Sha256::digest([b\"H2C-OVERSIZE-DST-\".as_ref(), &oversize_dst].concat());\n  // Test the hash_to_F function handles this\n  // If it didn't, these would return different values\n  assert_eq!(C::hash_to_F(&oversize_dst, &[]), C::hash_to_F(&actual_dst, &[]));\n}\n\n/// Ciphersuite for Secp256k1.\n///\n/// hash_to_F is implemented via the IETF draft for hash to curve's hash_to_field (v16).\n#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]\npub struct Secp256k1;\nkp_curve!(\"secp256k1\", k256, Secp256k1, b\"secp256k1\");\n#[test]\nfn test_secp256k1() {\n  ff_group_tests::group::test_prime_group_bits::<_, k256::ProjectivePoint>(&mut rand_core::OsRng);\n\n  // Ideally, a test vector from hash_to_field (not FROST) would be here\n  // Unfortunately, the IETF draft only provides vectors for field elements, not scalars\n  // Vectors have been requested in\n  // https://github.com/cfrg/draft-irtf-cfrg-hash-to-curve/issues/343\n\n  assert_eq!(\n    Secp256k1::hash_to_F(\n      b\"FROST-secp256k1-SHA256-v11nonce\",\n      &hex::decode(\n        \"\\\n80cbea5e405d169999d8c4b30b755fedb26ab07ec8198cda4873ed8ce5e16773\\\n08f89ffe80ac94dcb920c26f3f46140bfc7f95b493f8310f5fc1ea2b01f4254c\"\n      )\n      .unwrap()\n    )\n    .to_repr()\n    .iter()\n    .copied()\n    .collect::<Vec<_>>(),\n    hex::decode(\"acc83278035223c1ba464e2d11bfacfc872b2b23e1041cf5f6130da21e4d8068\").unwrap()\n  );\n\n  test_oversize_dst::<Secp256k1>();\n}\n\n/// Ciphersuite for P-256.\n///\n/// hash_to_F is implemented via the IETF draft for hash to curve's hash_to_field (v16).\n#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]\npub struct P256;\nkp_curve!(\"p256\", p256, P256, b\"P-256\");\n#[test]\nfn test_p256() {\n  ff_group_tests::group::test_prime_group_bits::<_, p256::ProjectivePoint>(&mut rand_core::OsRng);\n\n  assert_eq!(\n    P256::hash_to_F(\n      b\"FROST-P256-SHA256-v11nonce\",\n      &hex::decode(\n        \"\\\nf4e8cf80aec3f888d997900ac7e3e349944b5a6b47649fc32186d2f1238103c6\\\n0c9c1a0fe806c184add50bbdcac913dda73e482daf95dcb9f35dbb0d8a9f7731\"\n      )\n      .unwrap()\n    )\n    .to_repr()\n    .iter()\n    .copied()\n    .collect::<Vec<_>>(),\n    hex::decode(\"f871dfcf6bcd199342651adc361b92c941cb6a0d8c8c1a3b91d79e2c1bf3722d\").unwrap()\n  );\n\n  test_oversize_dst::<P256>();\n}\n"
  },
  {
    "path": "crypto/ciphersuite/src/lib.md",
    "content": "# Ciphersuite\n\nCiphersuites for elliptic curves premised on ff/group.\n\nThis library was\n[audited by Cypher Stack in March 2023](https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf),\nculminating in commit\n[669d2dbffc1dafb82a09d9419ea182667115df06](https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06).\nAny subsequent changes have not undergone auditing.\n\nThis library is usable under no_std. The `alloc` and `std` features enable\nreading from the `io::Read` trait, shimmed by `std-shims` under `alloc`.\n"
  },
  {
    "path": "crypto/ciphersuite/src/lib.rs",
    "content": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![doc = include_str!(\"lib.md\")]\n#![cfg_attr(not(feature = \"std\"), no_std)]\n\nuse core::fmt::Debug;\n#[cfg(any(feature = \"alloc\", feature = \"std\"))]\n#[allow(unused_imports)]\nuse std_shims::prelude::*;\n#[cfg(any(feature = \"alloc\", feature = \"std\"))]\nuse std_shims::io::{self, Read};\n\nuse rand_core::{RngCore, CryptoRng};\n\nuse zeroize::Zeroize;\nuse subtle::ConstantTimeEq;\n\nuse digest::{core_api::BlockSizeUser, Digest, HashMarker};\nuse transcript::SecureDigest;\n\npub use group;\nuse group::{\n  ff::{Field, PrimeField, PrimeFieldBits},\n  Group, GroupOps,\n  prime::PrimeGroup,\n};\n#[cfg(any(feature = \"alloc\", feature = \"std\"))]\nuse group::GroupEncoding;\n\n/// Unified trait defining a ciphersuite around an elliptic curve.\npub trait Ciphersuite:\n  'static + Send + Sync + Clone + Copy + PartialEq + Eq + Debug + Zeroize\n{\n  /// Scalar field element type.\n  // This is available via G::Scalar yet `C::G::Scalar` is ambiguous, forcing horrific accesses\n  type F: PrimeField + PrimeFieldBits + Zeroize;\n  /// Group element type.\n  type G: Group<Scalar = Self::F> + GroupOps + PrimeGroup + Zeroize + ConstantTimeEq;\n  /// Hash algorithm used with this curve.\n  // Requires BlockSizeUser so it can be used within Hkdf which requires that.\n  type H: Send + Clone + BlockSizeUser + Digest + HashMarker + SecureDigest;\n\n  /// ID for this curve.\n  const ID: &'static [u8];\n\n  /// Generator for the group.\n  // While group does provide this in its API, privacy coins may want to use a custom basepoint\n  fn generator() -> Self::G;\n\n  /// Hash the provided domain-separation tag and message to a scalar. Ciphersuites MAY naively\n  /// prefix the tag to the message, enabling transpotion between the two. Accordingly, this\n  /// function should NOT be used in any scheme where one tag is a valid substring of another\n  /// UNLESS the specific Ciphersuite is verified to handle the DST securely.\n  ///\n  /// Verifying specific ciphersuites have secure tag handling is not recommended, due to it\n  /// breaking the intended modularity of ciphersuites. Instead, component-specific tags with\n  /// further purpose tags are recommended (\"Schnorr-nonce\", \"Schnorr-chal\").\n  #[allow(non_snake_case)]\n  fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F;\n\n  /// Generate a random non-zero scalar.\n  #[allow(non_snake_case)]\n  fn random_nonzero_F<R: RngCore + CryptoRng>(rng: &mut R) -> Self::F {\n    let mut res;\n    while {\n      res = Self::F::random(&mut *rng);\n      res.ct_eq(&Self::F::ZERO).into()\n    } {}\n    res\n  }\n\n  /// Read a canonical scalar from something implementing std::io::Read.\n  #[cfg(any(feature = \"alloc\", feature = \"std\"))]\n  #[allow(non_snake_case)]\n  fn read_F<R: Read>(reader: &mut R) -> io::Result<Self::F> {\n    let mut encoding = <Self::F as PrimeField>::Repr::default();\n    reader.read_exact(encoding.as_mut())?;\n\n    // ff mandates this is canonical\n    let res = Option::<Self::F>::from(Self::F::from_repr(encoding))\n      .ok_or_else(|| io::Error::other(\"non-canonical scalar\"));\n    encoding.as_mut().zeroize();\n    res\n  }\n\n  /// Read a canonical point from something implementing std::io::Read.\n  ///\n  /// The provided implementation is safe so long as `GroupEncoding::to_bytes` always returns a\n  /// canonical serialization.\n  #[cfg(any(feature = \"alloc\", feature = \"std\"))]\n  #[allow(non_snake_case)]\n  fn read_G<R: Read>(reader: &mut R) -> io::Result<Self::G> {\n    let mut encoding = <Self::G as GroupEncoding>::Repr::default();\n    reader.read_exact(encoding.as_mut())?;\n\n    let point = Option::<Self::G>::from(Self::G::from_bytes(&encoding))\n      .ok_or_else(|| io::Error::other(\"invalid point\"))?;\n    if point.to_bytes().as_ref() != encoding.as_ref() {\n      Err(io::Error::other(\"non-canonical point\"))?;\n    }\n    Ok(point)\n  }\n}\n"
  },
  {
    "path": "crypto/dalek-ff-group/Cargo.toml",
    "content": "[package]\nname = \"dalek-ff-group\"\nversion = \"0.4.4\"\ndescription = \"ff/group bindings around curve25519-dalek\"\nlicense = \"MIT\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/crypto/dalek-ff-group\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nkeywords = [\"curve25519\", \"ed25519\", \"ristretto\", \"dalek\", \"group\"]\nedition = \"2021\"\nrust-version = \"1.65\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nrustversion = \"1\"\n\nzeroize = { version = \"^1.5\", default-features = false, features = [\"zeroize_derive\"] }\nsubtle = { version = \"^2.4\", default-features = false }\n\nrand_core = { version = \"0.6\", default-features = false }\n\ndigest = { version = \"0.10\", default-features = false }\nsha2 = { version = \"0.10\", default-features = false }\n\nff = { version = \"0.13\", default-features = false, features = [\"bits\"] }\ngroup = { version = \"0.13\", default-features = false }\nciphersuite = { path = \"../ciphersuite\", default-features = false }\n\ncrypto-bigint = { version = \"0.5\", default-features = false, features = [\"zeroize\"] }\n\ncurve25519-dalek = { version = \">= 4.0, < 4.2\", default-features = false, features = [\"alloc\", \"zeroize\", \"digest\", \"group\", \"precomputed-tables\"] }\n\n[dev-dependencies]\nhex = \"0.4\"\nrand_core = { version = \"0.6\", default-features = false, features = [\"std\"] }\nff-group-tests = { path = \"../ff-group-tests\" }\n\n[features]\nalloc = [\"zeroize/alloc\", \"ciphersuite/alloc\"]\nstd = [\"alloc\", \"zeroize/std\", \"subtle/std\", \"rand_core/std\", \"digest/std\", \"sha2/std\", \"ciphersuite/std\"]\ndefault = [\"std\"]\n"
  },
  {
    "path": "crypto/dalek-ff-group/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2022-2023 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "crypto/dalek-ff-group/README.md",
    "content": "# Dalek FF/Group\n\nff/group bindings around curve25519-dalek with a from_hash/random function based\naround modern dependencies.\n\nThis library was\n[audited by Cypher Stack in March 2023](https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf),\nculminating in commit\n[669d2dbffc1dafb82a09d9419ea182667115df06](https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06).\nAny subsequent changes have not undergone auditing.\n\nThis library is usable under no_std.\n"
  },
  {
    "path": "crypto/dalek-ff-group/src/ciphersuite.rs",
    "content": "use zeroize::Zeroize;\n\nuse sha2::{Digest, Sha512};\n\nuse group::Group;\nuse crate::Scalar;\n\nuse ciphersuite::Ciphersuite;\n\nmacro_rules! dalek_curve {\n  (\n    $feature: literal,\n\n    $Ciphersuite: ident,\n    $Point:       ident,\n    $ID:          literal\n  ) => {\n    use crate::$Point;\n\n    impl Ciphersuite for $Ciphersuite {\n      type F = Scalar;\n      type G = $Point;\n      type H = Sha512;\n\n      const ID: &'static [u8] = $ID;\n\n      fn generator() -> Self::G {\n        $Point::generator()\n      }\n\n      fn hash_to_F(dst: &[u8], data: &[u8]) -> Self::F {\n        Scalar::from_hash(Sha512::new_with_prefix(&[dst, data].concat()))\n      }\n    }\n  };\n}\n\n/// Ciphersuite for Ristretto.\n///\n/// hash_to_F is implemented with a naive concatenation of the dst and data, allowing transposition\n/// between the two. This means `dst: b\"abc\", data: b\"def\"`, will produce the same scalar as\n/// `dst: \"abcdef\", data: b\"\"`. Please use carefully, not letting dsts be substrings of each other.\n#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]\npub struct Ristretto;\ndalek_curve!(\"ristretto\", Ristretto, RistrettoPoint, b\"ristretto\");\n#[test]\nfn test_ristretto() {\n  ff_group_tests::group::test_prime_group_bits::<_, RistrettoPoint>(&mut rand_core::OsRng);\n\n  assert_eq!(\n    Ristretto::hash_to_F(\n      b\"FROST-RISTRETTO255-SHA512-v11nonce\",\n      &hex::decode(\n        \"\\\n81800157bb554f299fe0b6bd658e4c4591d74168b5177bf55e8dceed59dc80c7\\\n5c3430d391552f6e60ecdc093ff9f6f4488756aa6cebdbad75a768010b8f830e\"\n      )\n      .unwrap()\n    )\n    .to_bytes()\n    .as_ref(),\n    &hex::decode(\"40f58e8df202b21c94f826e76e4647efdb0ea3ca7ae7e3689bc0cbe2e2f6660c\").unwrap()\n  );\n}\n\n/// Ciphersuite for Ed25519, inspired by RFC-8032.\n///\n/// hash_to_F is implemented with a naive concatenation of the dst and data, allowing transposition\n/// between the two. This means `dst: b\"abc\", data: b\"def\"`, will produce the same scalar as\n/// `dst: \"abcdef\", data: b\"\"`. Please use carefully, not letting dsts be substrings of each other.\n#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]\npub struct Ed25519;\ndalek_curve!(\"ed25519\", Ed25519, EdwardsPoint, b\"edwards25519\");\n#[test]\nfn test_ed25519() {\n  ff_group_tests::group::test_prime_group_bits::<_, EdwardsPoint>(&mut rand_core::OsRng);\n\n  // Ideally, a test vector from RFC-8032 (not FROST) would be here\n  // Unfortunately, the IETF draft doesn't provide any vectors for the derived challenges\n  assert_eq!(\n    Ed25519::hash_to_F(\n      b\"FROST-ED25519-SHA512-v11nonce\",\n      &hex::decode(\n        \"\\\n9d06a6381c7a4493929761a73692776772b274236fb5cfcc7d1b48ac3a9c249f\\\n929dcc590407aae7d388761cddb0c0db6f5627aea8e217f4a033f2ec83d93509\"\n      )\n      .unwrap()\n    )\n    .to_bytes()\n    .as_ref(),\n    &hex::decode(\"70652da3e8d7533a0e4b9e9104f01b48c396b5b553717784ed8d05c6a36b9609\").unwrap()\n  );\n}\n"
  },
  {
    "path": "crypto/dalek-ff-group/src/field.rs",
    "content": "use core::{\n  ops::{Add, AddAssign, Sub, SubAssign, Neg, Mul, MulAssign},\n  iter::{Sum, Product},\n};\n\nuse zeroize::Zeroize;\nuse rand_core::RngCore;\n\nuse subtle::{\n  Choice, CtOption, ConstantTimeEq, ConstantTimeLess, ConditionallyNegatable,\n  ConditionallySelectable,\n};\n\nuse crypto_bigint::{\n  Integer, NonZero, Encoding, U256, U512,\n  modular::constant_mod::{ResidueParams, Residue},\n  impl_modulus,\n};\n\nuse group::ff::{Field, PrimeField, FieldBits, PrimeFieldBits, FromUniformBytes};\n\nuse crate::{u8_from_bool, constant_time, math_op, math};\n\n// 2 ** 255 - 19\n// Uses saturating_sub because checked_sub isn't available at compile time\nconst MODULUS: U256 = U256::from_u8(1).shl_vartime(255).saturating_sub(&U256::from_u8(19));\nconst WIDE_MODULUS: U512 = U256::ZERO.concat(&MODULUS);\n\nimpl_modulus!(\n  FieldModulus,\n  U256,\n  // 2 ** 255 - 19\n  \"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffed\"\n);\ntype ResidueType = Residue<FieldModulus, { FieldModulus::LIMBS }>;\n\n/// A constant-time implementation of the Ed25519 field.\n#[derive(Clone, Copy, PartialEq, Eq, Default, Debug, Zeroize)]\n#[repr(transparent)]\npub struct FieldElement(ResidueType);\n\n// Square root of -1.\n// Formula from RFC-8032 (modp_sqrt_m1/sqrt8k5 z)\n// 2 ** ((MODULUS - 1) // 4) % MODULUS\nconst SQRT_M1: FieldElement = FieldElement(\n  ResidueType::new(&U256::from_u8(2))\n    .pow(&MODULUS.saturating_sub(&U256::ONE).wrapping_div(&U256::from_u8(4))),\n);\n\n// Constant useful in calculating square roots (RFC-8032 sqrt8k5's exponent used to calculate y)\nconst MOD_3_8: FieldElement = FieldElement(ResidueType::new(\n  &MODULUS.saturating_add(&U256::from_u8(3)).wrapping_div(&U256::from_u8(8)),\n));\n\n// Constant useful in sqrt_ratio_i (sqrt(u / v))\nconst MOD_5_8: FieldElement = FieldElement(ResidueType::sub(&MOD_3_8.0, &ResidueType::ONE));\n\nfn reduce(x: U512) -> ResidueType {\n  ResidueType::new(&U256::from_le_slice(\n    &x.rem(&NonZero::new(WIDE_MODULUS).unwrap()).to_le_bytes()[.. 32],\n  ))\n}\n\nconstant_time!(FieldElement, ResidueType);\nmath!(\n  FieldElement,\n  FieldElement,\n  |x: ResidueType, y: ResidueType| x.add(&y),\n  |x: ResidueType, y: ResidueType| x.sub(&y),\n  |x: ResidueType, y: ResidueType| x.mul(&y)\n);\n\nmacro_rules! from_wrapper {\n  ($uint: ident) => {\n    impl From<$uint> for FieldElement {\n      fn from(a: $uint) -> FieldElement {\n        Self(ResidueType::new(&U256::from(a)))\n      }\n    }\n  };\n}\n\nfrom_wrapper!(u8);\nfrom_wrapper!(u16);\nfrom_wrapper!(u32);\nfrom_wrapper!(u64);\nfrom_wrapper!(u128);\n\nimpl Neg for FieldElement {\n  type Output = Self;\n  fn neg(self) -> Self::Output {\n    Self(self.0.neg())\n  }\n}\n\nimpl Neg for &FieldElement {\n  type Output = FieldElement;\n  fn neg(self) -> Self::Output {\n    (*self).neg()\n  }\n}\n\nimpl Field for FieldElement {\n  const ZERO: Self = Self(ResidueType::ZERO);\n  const ONE: Self = Self(ResidueType::ONE);\n\n  fn random(mut rng: impl RngCore) -> Self {\n    let mut bytes = [0; 64];\n    rng.fill_bytes(&mut bytes);\n    FieldElement(reduce(U512::from_le_bytes(bytes)))\n  }\n\n  fn square(&self) -> Self {\n    FieldElement(self.0.square())\n  }\n  fn double(&self) -> Self {\n    FieldElement(self.0.add(&self.0))\n  }\n\n  fn invert(&self) -> CtOption<Self> {\n    const NEG_2: FieldElement =\n      FieldElement(ResidueType::new(&MODULUS.saturating_sub(&U256::from_u8(2))));\n    CtOption::new(self.pow(NEG_2), !self.is_zero())\n  }\n\n  // RFC-8032 sqrt8k5\n  fn sqrt(&self) -> CtOption<Self> {\n    let tv1 = self.pow(MOD_3_8);\n    let tv2 = tv1 * SQRT_M1;\n    let candidate = Self::conditional_select(&tv2, &tv1, tv1.square().ct_eq(self));\n    CtOption::new(candidate, candidate.square().ct_eq(self))\n  }\n\n  fn sqrt_ratio(u: &FieldElement, v: &FieldElement) -> (Choice, FieldElement) {\n    let i = SQRT_M1;\n\n    let u = *u;\n    let v = *v;\n\n    let v3 = v.square() * v;\n    let v7 = v3.square() * v;\n    let mut r = (u * v3) * (u * v7).pow(MOD_5_8);\n\n    let check = v * r.square();\n    let correct_sign = check.ct_eq(&u);\n    let flipped_sign = check.ct_eq(&(-u));\n    let flipped_sign_i = check.ct_eq(&((-u) * i));\n\n    r.conditional_assign(&(r * i), flipped_sign | flipped_sign_i);\n\n    let r_is_negative = r.is_odd();\n    r.conditional_negate(r_is_negative);\n\n    (correct_sign | flipped_sign, r)\n  }\n}\n\nimpl PrimeField for FieldElement {\n  type Repr = [u8; 32];\n\n  // Big endian representation of the modulus\n  const MODULUS: &'static str = \"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffed\";\n\n  const NUM_BITS: u32 = 255;\n  const CAPACITY: u32 = 254;\n\n  const TWO_INV: Self = FieldElement(ResidueType::new(&U256::from_u8(2)).invert().0);\n\n  // This was calculated with the method from the ff crate docs\n  // SageMath GF(modulus).primitive_element()\n  const MULTIPLICATIVE_GENERATOR: Self = Self(ResidueType::new(&U256::from_u8(2)));\n  // This was set per the specification in the ff crate docs\n  // The number of leading zero bits in the little-endian bit representation of (modulus - 1)\n  const S: u32 = 2;\n\n  // This was calculated via the formula from the ff crate docs\n  // Self::MULTIPLICATIVE_GENERATOR ** ((modulus - 1) >> Self::S)\n  const ROOT_OF_UNITY: Self = FieldElement(ResidueType::new(&U256::from_be_hex(\n    \"2b8324804fc1df0b2b4d00993dfbd7a72f431806ad2fe478c4ee1b274a0ea0b0\",\n  )));\n  // Self::ROOT_OF_UNITY.invert()\n  const ROOT_OF_UNITY_INV: Self = FieldElement(Self::ROOT_OF_UNITY.0.invert().0);\n\n  // This was calculated via the formula from the ff crate docs\n  // Self::MULTIPLICATIVE_GENERATOR ** (2 ** Self::S)\n  const DELTA: Self = FieldElement(ResidueType::new(&U256::from_be_hex(\n    \"0000000000000000000000000000000000000000000000000000000000000010\",\n  )));\n\n  fn from_repr(bytes: [u8; 32]) -> CtOption<Self> {\n    let res = U256::from_le_bytes(bytes);\n    CtOption::new(Self(ResidueType::new(&res)), res.ct_lt(&MODULUS))\n  }\n  fn to_repr(&self) -> [u8; 32] {\n    self.0.retrieve().to_le_bytes()\n  }\n\n  fn is_odd(&self) -> Choice {\n    self.0.retrieve().is_odd()\n  }\n\n  fn from_u128(num: u128) -> Self {\n    Self::from(num)\n  }\n}\n\nimpl PrimeFieldBits for FieldElement {\n  type ReprBits = [u8; 32];\n\n  fn to_le_bits(&self) -> FieldBits<Self::ReprBits> {\n    self.to_repr().into()\n  }\n\n  fn char_le_bits() -> FieldBits<Self::ReprBits> {\n    MODULUS.to_le_bytes().into()\n  }\n}\n\nimpl FieldElement {\n  /// Create a FieldElement from a `crypto_bigint::U256`.\n  ///\n  /// This will reduce the `U256` by the modulus, into a member of the field.\n  pub const fn from_u256(u256: &U256) -> Self {\n    FieldElement(Residue::new(u256))\n  }\n\n  /// Create a `FieldElement` from the reduction of a 512-bit number.\n  ///\n  /// The bytes are interpreted in little-endian format.\n  pub fn wide_reduce(value: [u8; 64]) -> Self {\n    FieldElement(reduce(U512::from_le_bytes(value)))\n  }\n\n  /// Perform an exponentiation.\n  pub fn pow(&self, other: FieldElement) -> FieldElement {\n    let mut table = [FieldElement::ONE; 16];\n    table[1] = *self;\n    for i in 2 .. 16 {\n      table[i] = table[i - 1] * self;\n    }\n\n    let mut res = FieldElement::ONE;\n    let mut bits = 0;\n    for (i, mut bit) in other.to_le_bits().iter_mut().rev().enumerate() {\n      bits <<= 1;\n      let mut bit = u8_from_bool(&mut bit);\n      bits |= bit;\n      bit.zeroize();\n\n      if ((i + 1) % 4) == 0 {\n        if i != 3 {\n          for _ in 0 .. 4 {\n            res *= res;\n          }\n        }\n\n        let mut scale_by = FieldElement::ONE;\n        #[allow(clippy::needless_range_loop)]\n        for i in 0 .. 16 {\n          #[allow(clippy::cast_possible_truncation)] // Safe since 0 .. 16\n          {\n            scale_by = <_>::conditional_select(&scale_by, &table[i], bits.ct_eq(&(i as u8)));\n          }\n        }\n        res *= scale_by;\n        bits = 0;\n      }\n    }\n    res\n  }\n\n  /// The square root of u/v, as used for Ed25519 point decoding (RFC 8032 5.1.3) and within\n  /// Ristretto (5.1 Extracting an Inverse Square Root).\n  ///\n  /// The result is only a valid square root if the Choice is true.\n  /// RFC 8032 simply fails if there isn't a square root, leaving any return value undefined.\n  /// Ristretto explicitly returns 0 or sqrt((SQRT_M1 * u) / v).\n  pub fn sqrt_ratio_i(u: FieldElement, v: FieldElement) -> (Choice, FieldElement) {\n    let i = SQRT_M1;\n\n    let v3 = v.square() * v;\n    let v7 = v3.square() * v;\n    // Candidate root\n    let mut r = (u * v3) * (u * v7).pow(MOD_5_8);\n\n    // 8032 3.1\n    let check = v * r.square();\n    let correct_sign = check.ct_eq(&u);\n    // 8032 3.2 conditional\n    let neg_u = -u;\n    let flipped_sign = check.ct_eq(&neg_u);\n    // Ristretto Step 5\n    let flipped_sign_i = check.ct_eq(&(neg_u * i));\n\n    // 3.2 set\n    r.conditional_assign(&(r * i), flipped_sign | flipped_sign_i);\n\n    // Always return the even root, per Ristretto\n    // This doesn't break Ed25519 point decoding as that doesn't expect these steps to return a\n    // specific root\n    // Ed25519 points include a dedicated sign bit to determine which root to use, so at worst\n    // this is a pointless inefficiency\n    r.conditional_negate(r.is_odd());\n\n    (correct_sign | flipped_sign, r)\n  }\n}\n\nimpl FromUniformBytes<64> for FieldElement {\n  fn from_uniform_bytes(bytes: &[u8; 64]) -> Self {\n    Self::wide_reduce(*bytes)\n  }\n}\n\nimpl Sum<FieldElement> for FieldElement {\n  fn sum<I: Iterator<Item = FieldElement>>(iter: I) -> FieldElement {\n    let mut res = FieldElement::ZERO;\n    for item in iter {\n      res += item;\n    }\n    res\n  }\n}\n\nimpl<'a> Sum<&'a FieldElement> for FieldElement {\n  fn sum<I: Iterator<Item = &'a FieldElement>>(iter: I) -> FieldElement {\n    iter.copied().sum()\n  }\n}\n\nimpl Product<FieldElement> for FieldElement {\n  fn product<I: Iterator<Item = FieldElement>>(iter: I) -> FieldElement {\n    let mut res = FieldElement::ONE;\n    for item in iter {\n      res *= item;\n    }\n    res\n  }\n}\n\nimpl<'a> Product<&'a FieldElement> for FieldElement {\n  fn product<I: Iterator<Item = &'a FieldElement>>(iter: I) -> FieldElement {\n    iter.copied().product()\n  }\n}\n\n#[test]\nfn test_wide_modulus() {\n  let mut wide = [0; 64];\n  wide[.. 32].copy_from_slice(&MODULUS.to_le_bytes());\n  assert_eq!(wide, WIDE_MODULUS.to_le_bytes());\n}\n\n#[test]\nfn test_sqrt_m1() {\n  // Test equivalence against the known constant value\n  const SQRT_M1_MAGIC: U256 =\n    U256::from_be_hex(\"2b8324804fc1df0b2b4d00993dfbd7a72f431806ad2fe478c4ee1b274a0ea0b0\");\n  assert_eq!(SQRT_M1.0.retrieve(), SQRT_M1_MAGIC);\n\n  // Also test equivalence against the result of the formula from RFC-8032 (modp_sqrt_m1/sqrt8k5 z)\n  // 2 ** ((MODULUS - 1) // 4) % MODULUS\n  assert_eq!(\n    SQRT_M1,\n    FieldElement::from(2u8).pow(FieldElement(ResidueType::new(\n      &(FieldElement::ZERO - FieldElement::ONE).0.retrieve().wrapping_div(&U256::from(4u8))\n    )))\n  );\n}\n\n#[test]\nfn test_field() {\n  ff_group_tests::prime_field::test_prime_field_bits::<_, FieldElement>(&mut rand_core::OsRng);\n}\n"
  },
  {
    "path": "crypto/dalek-ff-group/src/lib.rs",
    "content": "#![allow(deprecated)]\n#![cfg_attr(docsrs, feature(doc_cfg))]\n#![no_std] // Prevents writing new code, in what should be a simple wrapper, which requires std\n#![doc = include_str!(\"../README.md\")]\n#![allow(clippy::redundant_closure_call)]\n\nuse core::{\n  borrow::Borrow,\n  ops::{Deref, Add, AddAssign, Sub, SubAssign, Neg, Mul, MulAssign},\n  iter::{Iterator, Sum, Product},\n  hash::{Hash, Hasher},\n};\n\nuse zeroize::Zeroize;\nuse subtle::{ConstantTimeEq, ConditionallySelectable};\n\nuse rand_core::RngCore;\nuse digest::{consts::U64, Digest, HashMarker};\n\nuse subtle::{Choice, CtOption};\n\npub use curve25519_dalek as dalek;\n\nuse dalek::{\n  constants::{self, BASEPOINT_ORDER},\n  scalar::Scalar as DScalar,\n  edwards::{EdwardsPoint as DEdwardsPoint, EdwardsBasepointTable, CompressedEdwardsY},\n  ristretto::{RistrettoPoint as DRistrettoPoint, RistrettoBasepointTable, CompressedRistretto},\n};\npub use constants::{ED25519_BASEPOINT_TABLE, RISTRETTO_BASEPOINT_TABLE};\n\nuse group::{\n  ff::{Field, PrimeField, FieldBits, PrimeFieldBits, FromUniformBytes},\n  Group, GroupEncoding,\n  prime::PrimeGroup,\n};\n\nmod field;\npub use field::FieldElement;\n\nmod ciphersuite;\npub use crate::ciphersuite::{Ed25519, Ristretto};\n\n// Use black_box when possible\n#[rustversion::since(1.66)]\nmod black_box {\n  pub(crate) fn black_box<T>(val: T) -> T {\n    #[allow(clippy::incompatible_msrv)]\n    core::hint::black_box(val)\n  }\n}\n#[rustversion::before(1.66)]\nmod black_box {\n  pub(crate) fn black_box<T>(val: T) -> T {\n    val\n  }\n}\nuse black_box::black_box;\n\nfn u8_from_bool(bit_ref: &mut bool) -> u8 {\n  let bit_ref = black_box(bit_ref);\n\n  let mut bit = black_box(*bit_ref);\n  #[allow(clippy::cast_lossless)]\n  let res = black_box(bit as u8);\n  bit.zeroize();\n  debug_assert!((res | 1) == 1);\n\n  bit_ref.zeroize();\n  res\n}\n\n// Convert a boolean to a Choice in a *presumably* constant time manner\nfn choice(mut value: bool) -> Choice {\n  Choice::from(u8_from_bool(&mut value))\n}\n\nmacro_rules! deref_borrow {\n  ($Source: ident, $Target: ident) => {\n    impl Deref for $Source {\n      type Target = $Target;\n\n      fn deref(&self) -> &Self::Target {\n        &self.0\n      }\n    }\n\n    impl Borrow<$Target> for $Source {\n      fn borrow(&self) -> &$Target {\n        &self.0\n      }\n    }\n\n    impl Borrow<$Target> for &$Source {\n      fn borrow(&self) -> &$Target {\n        &self.0\n      }\n    }\n  };\n}\n\nmacro_rules! constant_time {\n  ($Value: ident, $Inner: ident) => {\n    impl ConstantTimeEq for $Value {\n      fn ct_eq(&self, other: &Self) -> Choice {\n        self.0.ct_eq(&other.0)\n      }\n    }\n\n    impl ConditionallySelectable for $Value {\n      fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {\n        $Value($Inner::conditional_select(&a.0, &b.0, choice))\n      }\n    }\n  };\n}\npub(crate) use constant_time;\n\nmacro_rules! math_op {\n  (\n    $Value: ident,\n    $Other: ident,\n    $Op: ident,\n    $op_fn: ident,\n    $Assign: ident,\n    $assign_fn: ident,\n    $function: expr\n  ) => {\n    impl $Op<$Other> for $Value {\n      type Output = $Value;\n      fn $op_fn(self, other: $Other) -> Self::Output {\n        Self($function(self.0, other.0))\n      }\n    }\n    impl $Assign<$Other> for $Value {\n      fn $assign_fn(&mut self, other: $Other) {\n        self.0 = $function(self.0, other.0);\n      }\n    }\n    impl<'a> $Op<&'a $Other> for $Value {\n      type Output = $Value;\n      fn $op_fn(self, other: &'a $Other) -> Self::Output {\n        Self($function(self.0, other.0))\n      }\n    }\n    impl<'a> $Assign<&'a $Other> for $Value {\n      fn $assign_fn(&mut self, other: &'a $Other) {\n        self.0 = $function(self.0, other.0);\n      }\n    }\n  };\n}\npub(crate) use math_op;\n\nmacro_rules! math {\n  ($Value: ident, $Factor: ident, $add: expr, $sub: expr, $mul: expr) => {\n    math_op!($Value, $Value, Add, add, AddAssign, add_assign, $add);\n    math_op!($Value, $Value, Sub, sub, SubAssign, sub_assign, $sub);\n    math_op!($Value, $Factor, Mul, mul, MulAssign, mul_assign, $mul);\n  };\n}\npub(crate) use math;\n\nmacro_rules! math_neg {\n  ($Value: ident, $Factor: ident, $add: expr, $sub: expr, $mul: expr) => {\n    math!($Value, $Factor, $add, $sub, $mul);\n\n    impl Neg for $Value {\n      type Output = Self;\n      fn neg(self) -> Self::Output {\n        Self(-self.0)\n      }\n    }\n  };\n}\n\n/// Wrapper around the dalek Scalar type.\n#[derive(Clone, Copy, PartialEq, Eq, Default, Debug, Zeroize)]\npub struct Scalar(pub DScalar);\nderef_borrow!(Scalar, DScalar);\nconstant_time!(Scalar, DScalar);\nmath_neg!(Scalar, Scalar, DScalar::add, DScalar::sub, DScalar::mul);\n\nmacro_rules! from_wrapper {\n  ($uint: ident) => {\n    impl From<$uint> for Scalar {\n      fn from(a: $uint) -> Scalar {\n        Scalar(DScalar::from(a))\n      }\n    }\n  };\n}\n\nfrom_wrapper!(u8);\nfrom_wrapper!(u16);\nfrom_wrapper!(u32);\nfrom_wrapper!(u64);\nfrom_wrapper!(u128);\n\nimpl Scalar {\n  pub fn pow(&self, other: Scalar) -> Scalar {\n    let mut table = [Scalar::ONE; 16];\n    table[1] = *self;\n    for i in 2 .. 16 {\n      table[i] = table[i - 1] * self;\n    }\n\n    let mut res = Scalar::ONE;\n    let mut bits = 0;\n    for (i, mut bit) in other.to_le_bits().iter_mut().rev().enumerate() {\n      bits <<= 1;\n      let mut bit = u8_from_bool(&mut bit);\n      bits |= bit;\n      bit.zeroize();\n\n      if ((i + 1) % 4) == 0 {\n        if i != 3 {\n          for _ in 0 .. 4 {\n            res *= res;\n          }\n        }\n\n        let mut scale_by = Scalar::ONE;\n        #[allow(clippy::needless_range_loop)]\n        for i in 0 .. 16 {\n          #[allow(clippy::cast_possible_truncation)] // Safe since 0 .. 16\n          {\n            scale_by = <_>::conditional_select(&scale_by, &table[i], bits.ct_eq(&(i as u8)));\n          }\n        }\n        res *= scale_by;\n        bits = 0;\n      }\n    }\n    res\n  }\n\n  /// Perform wide reduction on a 64-byte array to create a Scalar without bias.\n  pub fn from_bytes_mod_order_wide(bytes: &[u8; 64]) -> Scalar {\n    Self(DScalar::from_bytes_mod_order_wide(bytes))\n  }\n\n  /// Derive a Scalar without bias from a digest via wide reduction.\n  pub fn from_hash<D: Digest<OutputSize = U64> + HashMarker>(hash: D) -> Scalar {\n    let mut output = [0u8; 64];\n    output.copy_from_slice(&hash.finalize());\n    let res = Scalar(DScalar::from_bytes_mod_order_wide(&output));\n    output.zeroize();\n    res\n  }\n}\n\nimpl Field for Scalar {\n  const ZERO: Scalar = Scalar(DScalar::ZERO);\n  const ONE: Scalar = Scalar(DScalar::ONE);\n\n  fn random(rng: impl RngCore) -> Self {\n    Self(<DScalar as Field>::random(rng))\n  }\n\n  fn square(&self) -> Self {\n    Self(self.0.square())\n  }\n  fn double(&self) -> Self {\n    Self(self.0.double())\n  }\n  fn invert(&self) -> CtOption<Self> {\n    <DScalar as Field>::invert(&self.0).map(Self)\n  }\n\n  fn sqrt(&self) -> CtOption<Self> {\n    self.0.sqrt().map(Self)\n  }\n\n  fn sqrt_ratio(num: &Self, div: &Self) -> (Choice, Self) {\n    let (choice, res) = DScalar::sqrt_ratio(num, div);\n    (choice, Self(res))\n  }\n}\n\nimpl PrimeField for Scalar {\n  type Repr = [u8; 32];\n\n  const MODULUS: &'static str = <DScalar as PrimeField>::MODULUS;\n\n  const NUM_BITS: u32 = <DScalar as PrimeField>::NUM_BITS;\n  const CAPACITY: u32 = <DScalar as PrimeField>::CAPACITY;\n\n  const TWO_INV: Scalar = Scalar(<DScalar as PrimeField>::TWO_INV);\n\n  const MULTIPLICATIVE_GENERATOR: Scalar =\n    Scalar(<DScalar as PrimeField>::MULTIPLICATIVE_GENERATOR);\n  const S: u32 = <DScalar as PrimeField>::S;\n\n  const ROOT_OF_UNITY: Scalar = Scalar(<DScalar as PrimeField>::ROOT_OF_UNITY);\n  const ROOT_OF_UNITY_INV: Scalar = Scalar(<DScalar as PrimeField>::ROOT_OF_UNITY_INV);\n\n  const DELTA: Scalar = Scalar(<DScalar as PrimeField>::DELTA);\n\n  fn from_repr(bytes: [u8; 32]) -> CtOption<Self> {\n    <DScalar as PrimeField>::from_repr(bytes).map(Scalar)\n  }\n  fn to_repr(&self) -> [u8; 32] {\n    self.0.to_repr()\n  }\n\n  fn is_odd(&self) -> Choice {\n    self.0.is_odd()\n  }\n\n  fn from_u128(num: u128) -> Self {\n    Scalar(DScalar::from_u128(num))\n  }\n}\n\nimpl PrimeFieldBits for Scalar {\n  type ReprBits = [u8; 32];\n\n  fn to_le_bits(&self) -> FieldBits<Self::ReprBits> {\n    self.to_repr().into()\n  }\n\n  fn char_le_bits() -> FieldBits<Self::ReprBits> {\n    BASEPOINT_ORDER.to_bytes().into()\n  }\n}\n\nimpl FromUniformBytes<64> for Scalar {\n  fn from_uniform_bytes(bytes: &[u8; 64]) -> Self {\n    Self::from_bytes_mod_order_wide(bytes)\n  }\n}\n\nimpl Sum<Scalar> for Scalar {\n  fn sum<I: Iterator<Item = Scalar>>(iter: I) -> Scalar {\n    Self(DScalar::sum(iter))\n  }\n}\n\nimpl<'a> Sum<&'a Scalar> for Scalar {\n  fn sum<I: Iterator<Item = &'a Scalar>>(iter: I) -> Scalar {\n    Self(DScalar::sum(iter))\n  }\n}\n\nimpl Product<Scalar> for Scalar {\n  fn product<I: Iterator<Item = Scalar>>(iter: I) -> Scalar {\n    Self(DScalar::product(iter))\n  }\n}\n\nimpl<'a> Product<&'a Scalar> for Scalar {\n  fn product<I: Iterator<Item = &'a Scalar>>(iter: I) -> Scalar {\n    Self(DScalar::product(iter))\n  }\n}\n\nmacro_rules! dalek_group {\n  (\n    $Point: ident,\n    $DPoint: ident,\n    $torsion_free: expr,\n\n    $Table: ident,\n\n    $DCompressed: ident,\n\n    $BASEPOINT_POINT: ident,\n    $BASEPOINT_TABLE: ident\n  ) => {\n    /// Wrapper around the dalek Point type.\n    ///\n    /// All operations will be restricted to a prime-order subgroup (equivalent to the group itself\n    /// in the case of Ristretto). The exposure of the internal element does allow bypassing this\n    /// however, which may lead to undefined/computationally-unsafe behavior, and is entirely at\n    /// the user's risk.\n    #[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]\n    pub struct $Point(pub $DPoint);\n    deref_borrow!($Point, $DPoint);\n    constant_time!($Point, $DPoint);\n    math_neg!($Point, Scalar, $DPoint::add, $DPoint::sub, $DPoint::mul);\n\n    /// The basepoint for this curve.\n    pub const $BASEPOINT_POINT: $Point = $Point(constants::$BASEPOINT_POINT);\n\n    impl Sum<$Point> for $Point {\n      fn sum<I: Iterator<Item = $Point>>(iter: I) -> $Point {\n        Self($DPoint::sum(iter))\n      }\n    }\n    impl<'a> Sum<&'a $Point> for $Point {\n      fn sum<I: Iterator<Item = &'a $Point>>(iter: I) -> $Point {\n        Self($DPoint::sum(iter))\n      }\n    }\n\n    impl Group for $Point {\n      type Scalar = Scalar;\n      fn random(mut rng: impl RngCore) -> Self {\n        loop {\n          let mut bytes = [0; 32];\n          rng.fill_bytes(&mut bytes);\n          let Some(point) = Option::<$Point>::from($Point::from_bytes(&bytes)) else {\n            continue;\n          };\n          // Ban identity, per the trait specification\n          if !bool::from(point.is_identity()) {\n            return point;\n          }\n        }\n      }\n      fn identity() -> Self {\n        Self($DPoint::identity())\n      }\n      fn generator() -> Self {\n        $BASEPOINT_POINT\n      }\n      fn is_identity(&self) -> Choice {\n        self.0.ct_eq(&$DPoint::identity())\n      }\n      fn double(&self) -> Self {\n        Self(self.0.double())\n      }\n    }\n\n    impl GroupEncoding for $Point {\n      type Repr = [u8; 32];\n\n      fn from_bytes(bytes: &Self::Repr) -> CtOption<Self> {\n        let decompressed = $DCompressed(*bytes).decompress();\n        // TODO: Same note on unwrap_or as above\n        let point = decompressed.unwrap_or($DPoint::identity());\n        CtOption::new(\n          $Point(point),\n          choice(black_box(decompressed).is_some()) & choice($torsion_free(point)),\n        )\n      }\n\n      fn from_bytes_unchecked(bytes: &Self::Repr) -> CtOption<Self> {\n        $Point::from_bytes(bytes)\n      }\n\n      fn to_bytes(&self) -> Self::Repr {\n        self.0.to_bytes()\n      }\n    }\n\n    impl PrimeGroup for $Point {}\n\n    impl Mul<Scalar> for &$Table {\n      type Output = $Point;\n      fn mul(self, b: Scalar) -> $Point {\n        $Point(&b.0 * self)\n      }\n    }\n\n    // Support being used as a key in a table\n    // While it is expensive as a key, due to the field operations required, there's frequently\n    // use cases for public key -> value lookups\n    #[allow(unknown_lints, renamed_and_removed_lints)]\n    #[allow(clippy::derived_hash_with_manual_eq, clippy::derive_hash_xor_eq)]\n    impl Hash for $Point {\n      fn hash<H: Hasher>(&self, state: &mut H) {\n        self.to_bytes().hash(state);\n      }\n    }\n  };\n}\n\ndalek_group!(\n  EdwardsPoint,\n  DEdwardsPoint,\n  |point: DEdwardsPoint| point.is_torsion_free(),\n  EdwardsBasepointTable,\n  CompressedEdwardsY,\n  ED25519_BASEPOINT_POINT,\n  ED25519_BASEPOINT_TABLE\n);\n\nimpl EdwardsPoint {\n  pub fn mul_by_cofactor(&self) -> EdwardsPoint {\n    EdwardsPoint(self.0.mul_by_cofactor())\n  }\n}\n\ndalek_group!(\n  RistrettoPoint,\n  DRistrettoPoint,\n  |_| true,\n  RistrettoBasepointTable,\n  CompressedRistretto,\n  RISTRETTO_BASEPOINT_POINT,\n  RISTRETTO_BASEPOINT_TABLE\n);\n\n#[test]\nfn test_ed25519_group() {\n  ff_group_tests::group::test_prime_group_bits::<_, EdwardsPoint>(&mut rand_core::OsRng);\n}\n\n#[test]\nfn test_ristretto_group() {\n  ff_group_tests::group::test_prime_group_bits::<_, RistrettoPoint>(&mut rand_core::OsRng);\n}\n"
  },
  {
    "path": "crypto/dkg/Cargo.toml",
    "content": "[package]\nname = \"dkg\"\nversion = \"0.6.1\"\ndescription = \"Distributed key generation over ff/group\"\nlicense = \"MIT\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/crypto/dkg\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nkeywords = [\"dkg\", \"multisig\", \"threshold\", \"ff\", \"group\"]\nedition = \"2021\"\nrust-version = \"1.66\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nzeroize = { version = \"^1.5\", default-features = false, features = [\"zeroize_derive\", \"alloc\"] }\n\nthiserror = { version = \"2\", default-features = false }\n\nstd-shims = { version = \"0.1\", path = \"../../common/std-shims\", default-features = false }\n\nborsh = { version = \"1\", default-features = false, features = [\"derive\", \"de_strict_order\"], optional = true }\n\nciphersuite = { path = \"../ciphersuite\", version = \"^0.4.1\", default-features = false, features = [\"alloc\"] }\n\n[features]\nstd = [\n  \"thiserror/std\",\n\n  \"std-shims/std\",\n\n  \"borsh?/std\",\n\n  \"ciphersuite/std\",\n]\nborsh = [\"dep:borsh\"]\ndefault = [\"std\"]\n"
  },
  {
    "path": "crypto/dkg/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2021-2025 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "crypto/dkg/README.md",
    "content": "# Distributed Key Generation\n\nA crate implementing a type for keys, presumably the result of a distributed\nkey generation protocol, and utilities from there.\n\nThis crate used to host implementations of distributed key generation protocols\nas well (hence the name). Those have been smashed into their own crates, such\nas [`dkg-musig`](https://docs.rs/dkg-musig) and\n[`dkg-pedpop`](https://docs.rs/dkg-pedpop).\n\nBefore being smashed, this crate was [audited by Cypher Stack in March 2023](\n  https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf\n), culminating in commit [669d2dbffc1dafb82a09d9419ea182667115df06](\n  https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06\n). Any subsequent changes have not undergone auditing.\n"
  },
  {
    "path": "crypto/dkg/dealer/Cargo.toml",
    "content": "[package]\nname = \"dkg-dealer\"\nversion = \"0.6.0\"\ndescription = \"Produce dkg::ThresholdKeys with a dealer key generation\"\nlicense = \"MIT\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/crypto/dkg/dealer\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nkeywords = [\"dkg\", \"multisig\", \"threshold\", \"ff\", \"group\"]\nedition = \"2021\"\nrust-version = \"1.66\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nzeroize = { version = \"^1.5\", default-features = false }\nrand_core = { version = \"0.6\", default-features = false }\n\nstd-shims = { version = \"0.1\", path = \"../../../common/std-shims\", default-features = false }\n\nciphersuite = { path = \"../../ciphersuite\", version = \"^0.4.1\", default-features = false }\ndkg = { path = \"../\", version = \"0.6\", default-features = false }\n\n[features]\nstd = [\n  \"zeroize/std\",\n  \"rand_core/std\",\n  \"std-shims/std\",\n  \"ciphersuite/std\",\n  \"dkg/std\",\n]\ndefault = [\"std\"]\n"
  },
  {
    "path": "crypto/dkg/dealer/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2021-2025 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "crypto/dkg/dealer/README.md",
    "content": "# Distributed Key Generation - Dealer\n\nThis crate implements a dealer key generation protocol for the\n[`dkg`](https://docs.rs/dkg) crate's types. This provides a single point of\nfailure when the key is being generated and is NOT recommended for use outside\nof tests.\n\nThis crate was originally part of (in some form) the `dkg` crate, which was\n[audited by Cypher Stack in March 2023](\n  https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf\n), culminating in commit [669d2dbffc1dafb82a09d9419ea182667115df06](\n  https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06\n). Any subsequent changes have not undergone auditing.\n"
  },
  {
    "path": "crypto/dkg/dealer/src/lib.rs",
    "content": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![doc = include_str!(\"../README.md\")]\n#![no_std]\n\nuse core::ops::Deref;\nuse std_shims::{vec::Vec, collections::HashMap};\n\nuse zeroize::{Zeroize, Zeroizing};\nuse rand_core::{RngCore, CryptoRng};\n\nuse ciphersuite::{\n  group::ff::{Field, PrimeField},\n  Ciphersuite,\n};\npub use dkg::*;\n\n/// Create a key via a dealer key generation protocol.\npub fn key_gen<R: RngCore + CryptoRng, C: Ciphersuite>(\n  rng: &mut R,\n  threshold: u16,\n  participants: u16,\n) -> Result<HashMap<Participant, ThresholdKeys<C>>, DkgError> {\n  let mut coefficients = Vec::with_capacity(usize::from(participants));\n  // `.max(1)` so we always generate the 0th coefficient which we'll share\n  for _ in 0 .. threshold.max(1) {\n    coefficients.push(Zeroizing::new(C::F::random(&mut *rng)));\n  }\n\n  fn polynomial<F: PrimeField + Zeroize>(\n    coefficients: &[Zeroizing<F>],\n    l: Participant,\n  ) -> Zeroizing<F> {\n    let l = F::from(u64::from(u16::from(l)));\n    // This should never be reached since Participant is explicitly non-zero\n    assert!(l != F::ZERO, \"zero participant passed to polynomial\");\n    let mut share = Zeroizing::new(F::ZERO);\n    for (idx, coefficient) in coefficients.iter().rev().enumerate() {\n      *share += coefficient.deref();\n      if idx != (coefficients.len() - 1) {\n        *share *= l;\n      }\n    }\n    share\n  }\n\n  let group_key = C::generator() * coefficients[0].deref();\n  let mut secret_shares = HashMap::with_capacity(participants as usize);\n  let mut verification_shares = HashMap::with_capacity(participants as usize);\n  for i in 1 ..= participants {\n    let i = Participant::new(i).expect(\"non-zero u16 wasn't a valid Participant index\");\n    let secret_share = polynomial(&coefficients, i);\n    secret_shares.insert(i, secret_share.clone());\n    verification_shares.insert(i, C::generator() * *secret_share);\n  }\n\n  let mut res = HashMap::with_capacity(participants as usize);\n  for (i, secret_share) in secret_shares {\n    let keys = ThresholdKeys::new(\n      ThresholdParams::new(threshold, participants, i)?,\n      Interpolation::Lagrange,\n      secret_share,\n      verification_shares.clone(),\n    )?;\n    debug_assert_eq!(keys.group_key(), group_key);\n    res.insert(i, keys);\n  }\n  Ok(res)\n}\n"
  },
  {
    "path": "crypto/dkg/musig/Cargo.toml",
    "content": "[package]\nname = \"dkg-musig\"\nversion = \"0.6.0\"\ndescription = \"The MuSig key aggregation protocol\"\nlicense = \"MIT\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/crypto/dkg/musig\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nkeywords = [\"dkg\", \"multisig\", \"threshold\", \"ff\", \"group\"]\nedition = \"2021\"\nrust-version = \"1.79\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nthiserror = { version = \"2\", default-features = false }\n\nrand_core = { version = \"0.6\", default-features = false }\n\nzeroize = { version = \"^1.5\", default-features = false, features = [\"zeroize_derive\"] }\n\nstd-shims = { version = \"0.1\", path = \"../../../common/std-shims\", default-features = false }\n\nmultiexp = { path = \"../../multiexp\", version = \"0.4\", default-features = false }\nciphersuite = { path = \"../../ciphersuite\", version = \"^0.4.1\", default-features = false }\ndkg = { path = \"../\", version = \"0.6\", default-features = false }\n\n[dev-dependencies]\nrand_core = { version = \"0.6\", default-features = false, features = [\"getrandom\"] }\ndalek-ff-group = { path = \"../../dalek-ff-group\" }\ndkg-recovery = { path = \"../recovery\", default-features = false, features = [\"std\"] }\n\n[features]\nstd = [\n  \"thiserror/std\",\n\n  \"rand_core/std\",\n\n  \"std-shims/std\",\n\n  \"multiexp/std\",\n  \"ciphersuite/std\",\n  \"dkg/std\",\n]\ndefault = [\"std\"]\n"
  },
  {
    "path": "crypto/dkg/musig/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2021-2025 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "crypto/dkg/musig/README.md",
    "content": "# Distributed Key Generation - MuSig\n\nThis implements the MuSig key aggregation protocol for the\n[`dkg`](https://docs.rs/dkg) crate's types.\n\nThis crate was originally part of (in some form) the `dkg` crate, which was\n[audited by Cypher Stack in March 2023](\n  https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf\n), culminating in commit\n[669d2dbffc1dafb82a09d9419ea182667115df06](\n  https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06\n). Any subsequent changes have not undergone auditing.\n"
  },
  {
    "path": "crypto/dkg/musig/src/lib.rs",
    "content": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![doc = include_str!(\"../README.md\")]\n#![cfg_attr(not(feature = \"std\"), no_std)]\n\nuse core::ops::Deref;\nuse std_shims::{\n  vec,\n  vec::Vec,\n  collections::{HashSet, HashMap},\n};\n\nuse zeroize::Zeroizing;\n\nuse ciphersuite::{group::GroupEncoding, Ciphersuite};\n\npub use dkg::*;\n\n#[cfg(test)]\nmod tests;\n\n/// Errors encountered when working with threshold keys.\n#[derive(Clone, PartialEq, Eq, Debug, thiserror::Error)]\npub enum MusigError<C: Ciphersuite> {\n  /// No keys were provided.\n  #[error(\"no keys provided\")]\n  NoKeysProvided,\n  /// Too many keys were provided.\n  #[error(\"too many keys (allowed {max}, provided {provided})\")]\n  TooManyKeysProvided {\n    /// The maximum amount of keys allowed.\n    max: u16,\n    /// The amount of keys provided.\n    provided: usize,\n  },\n  /// A participant was duplicated.\n  #[error(\"a participant was duplicated\")]\n  DuplicatedParticipant(C::G),\n  /// Participating, yet our public key wasn't found in the list of keys.\n  #[error(\"private key's public key wasn't present in the list of public keys\")]\n  NotPresent,\n  /// An error propagated from the underlying `dkg` crate.\n  #[error(\"error from dkg ({0})\")]\n  DkgError(DkgError),\n}\n\nfn check_keys<C: Ciphersuite>(keys: &[C::G]) -> Result<u16, MusigError<C>> {\n  if keys.is_empty() {\n    Err(MusigError::NoKeysProvided)?;\n  }\n\n  let keys_len = u16::try_from(keys.len())\n    .map_err(|_| MusigError::TooManyKeysProvided { max: u16::MAX, provided: keys.len() })?;\n\n  let mut set = HashSet::with_capacity(keys.len());\n  for key in keys {\n    let bytes = key.to_bytes().as_ref().to_vec();\n    if !set.insert(bytes) {\n      Err(MusigError::DuplicatedParticipant(*key))?;\n    }\n  }\n\n  Ok(keys_len)\n}\n\nfn binding_factor_transcript<C: Ciphersuite>(\n  context: [u8; 32],\n  keys_len: u16,\n  keys: &[C::G],\n) -> Vec<u8> {\n  debug_assert_eq!(usize::from(keys_len), keys.len());\n\n  let mut transcript = vec![];\n  transcript.extend(&context);\n  transcript.extend(keys_len.to_le_bytes());\n  for key in keys {\n    transcript.extend(key.to_bytes().as_ref());\n  }\n  transcript\n}\n\nfn binding_factor<C: Ciphersuite>(mut transcript: Vec<u8>, i: u16) -> C::F {\n  transcript.extend(i.to_le_bytes());\n  C::hash_to_F(b\"dkg-musig\", &transcript)\n}\n\n#[allow(clippy::type_complexity)]\nfn musig_key_multiexp<C: Ciphersuite>(\n  context: [u8; 32],\n  keys: &[C::G],\n) -> Result<Vec<(C::F, C::G)>, MusigError<C>> {\n  let keys_len = check_keys::<C>(keys)?;\n  let transcript = binding_factor_transcript::<C>(context, keys_len, keys);\n  let mut multiexp = Vec::with_capacity(keys.len());\n  for i in 1 ..= keys_len {\n    multiexp.push((binding_factor::<C>(transcript.clone(), i), keys[usize::from(i - 1)]));\n  }\n  Ok(multiexp)\n}\n\n/// The group key resulting from using this library's MuSig key aggregation.\n///\n/// This function executes in variable time and MUST NOT be used with secret data.\npub fn musig_key_vartime<C: Ciphersuite>(\n  context: [u8; 32],\n  keys: &[C::G],\n) -> Result<C::G, MusigError<C>> {\n  Ok(multiexp::multiexp_vartime(&musig_key_multiexp(context, keys)?))\n}\n\n/// The group key resulting from using this library's MuSig key aggregation.\npub fn musig_key<C: Ciphersuite>(context: [u8; 32], keys: &[C::G]) -> Result<C::G, MusigError<C>> {\n  Ok(multiexp::multiexp(&musig_key_multiexp(context, keys)?))\n}\n\n/// A n-of-n non-interactive DKG which does not guarantee the usability of the resulting key.\npub fn musig<C: Ciphersuite>(\n  context: [u8; 32],\n  private_key: Zeroizing<C::F>,\n  keys: &[C::G],\n) -> Result<ThresholdKeys<C>, MusigError<C>> {\n  let our_pub_key = C::generator() * private_key.deref();\n  let Some(our_i) = keys.iter().position(|key| *key == our_pub_key) else {\n    Err(MusigError::DkgError(DkgError::NotParticipating))?\n  };\n\n  let keys_len: u16 = check_keys::<C>(keys)?;\n\n  let params = ThresholdParams::new(\n    keys_len,\n    keys_len,\n    // The `+ 1` won't fail as `keys.len() <= u16::MAX`, so any index is `< u16::MAX`\n    Participant::new(\n      u16::try_from(our_i).expect(\"keys.len() <= u16::MAX yet index of keys > u16::MAX?\") + 1,\n    )\n    .expect(\"i + 1 != 0\"),\n  )\n  .map_err(MusigError::DkgError)?;\n\n  let transcript = binding_factor_transcript::<C>(context, keys_len, keys);\n  let mut binding_factors = Vec::with_capacity(keys.len());\n  let mut multiexp = Vec::with_capacity(keys.len());\n  let mut verification_shares = HashMap::with_capacity(keys.len());\n  for (i, key) in (1 ..= keys_len).zip(keys.iter().copied()) {\n    let binding_factor = binding_factor::<C>(transcript.clone(), i);\n    binding_factors.push(binding_factor);\n    multiexp.push((binding_factor, key));\n\n    let i = Participant::new(i).expect(\"non-zero u16 wasn't a valid Participant index?\");\n    verification_shares.insert(i, key);\n  }\n  let group_key = multiexp::multiexp(&multiexp);\n  debug_assert_eq!(our_pub_key, verification_shares[&params.i()]);\n  debug_assert_eq!(musig_key_vartime::<C>(context, keys), Ok(group_key));\n\n  ThresholdKeys::new(\n    params,\n    Interpolation::Constant(binding_factors),\n    private_key,\n    verification_shares,\n  )\n  .map_err(MusigError::DkgError)\n}\n"
  },
  {
    "path": "crypto/dkg/musig/src/tests.rs",
    "content": "use std::collections::HashMap;\n\nuse zeroize::Zeroizing;\nuse rand_core::OsRng;\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::{group::ff::Field, Ciphersuite};\n\nuse dkg_recovery::recover_key;\nuse crate::*;\n\n/// Tests MuSig key generation.\n#[test]\npub fn test_musig() {\n  const PARTICIPANTS: u16 = 5;\n\n  let mut keys = vec![];\n  let mut pub_keys = vec![];\n  for _ in 0 .. PARTICIPANTS {\n    let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng));\n    pub_keys.push(<Ristretto as Ciphersuite>::generator() * *key);\n    keys.push(key);\n  }\n\n  const CONTEXT: [u8; 32] = *b\"MuSig Test                      \";\n\n  // Empty signing set\n  musig::<Ristretto>(CONTEXT, Zeroizing::new(<Ristretto as Ciphersuite>::F::ZERO), &[])\n    .unwrap_err();\n  // Signing set we're not part of\n  musig::<Ristretto>(\n    CONTEXT,\n    Zeroizing::new(<Ristretto as Ciphersuite>::F::ZERO),\n    &[<Ristretto as Ciphersuite>::generator()],\n  )\n  .unwrap_err();\n\n  // Test with n keys\n  {\n    let mut created_keys = HashMap::new();\n    let mut verification_shares = HashMap::new();\n    let group_key = musig_key::<Ristretto>(CONTEXT, &pub_keys).unwrap();\n    for (i, key) in keys.iter().enumerate() {\n      let these_keys = musig::<Ristretto>(CONTEXT, key.clone(), &pub_keys).unwrap();\n      assert_eq!(these_keys.params().t(), PARTICIPANTS);\n      assert_eq!(these_keys.params().n(), PARTICIPANTS);\n      assert_eq!(usize::from(u16::from(these_keys.params().i())), i + 1);\n\n      verification_shares.insert(\n        these_keys.params().i(),\n        <Ristretto as Ciphersuite>::generator() * **these_keys.original_secret_share(),\n      );\n\n      assert_eq!(these_keys.group_key(), group_key);\n\n      created_keys.insert(these_keys.params().i(), these_keys);\n    }\n\n    for keys in created_keys.values() {\n      for (l, verification_share) in &verification_shares {\n        assert_eq!(keys.original_verification_share(*l), *verification_share);\n      }\n    }\n\n    assert_eq!(\n      <Ristretto as Ciphersuite>::generator() *\n        *recover_key(&created_keys.values().cloned().collect::<Vec<_>>()).unwrap(),\n      group_key\n    );\n  }\n}\n"
  },
  {
    "path": "crypto/dkg/pedpop/Cargo.toml",
    "content": "[package]\nname = \"dkg-pedpop\"\nversion = \"0.6.0\"\ndescription = \"The PedPoP distributed key generation protocol\"\nlicense = \"MIT\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/crypto/dkg/pedpop\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nkeywords = [\"dkg\", \"multisig\", \"threshold\", \"ff\", \"group\"]\nedition = \"2021\"\nrust-version = \"1.80\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nthiserror = { version = \"2\", default-features = false, features = [\"std\"] }\n\nzeroize = { version = \"^1.5\", default-features = false, features = [\"std\", \"zeroize_derive\"] }\nrand_core = { version = \"0.6\", default-features = false, features = [\"std\"] }\n\ntranscript = { package = \"flexible-transcript\", path = \"../../transcript\", version = \"^0.3.3\", default-features = false, features = [\"std\", \"recommended\"] }\nchacha20 = { version = \"0.9\", default-features = false, features = [\"std\", \"zeroize\"] }\n\nmultiexp = { path = \"../../multiexp\", version = \"0.4\", default-features = false, features = [\"std\"] }\nciphersuite = { path = \"../../ciphersuite\", version = \"^0.4.1\", default-features = false, features = [\"std\"] }\nschnorr = { package = \"schnorr-signatures\", path = \"../../schnorr\", version = \"^0.5.1\", default-features = false, features = [\"std\"] }\ndleq = { path = \"../../dleq\", version = \"^0.4.1\", default-features = false, features = [\"std\", \"serialize\"] }\n\ndkg = { path = \"../\", version = \"0.6\", default-features = false, features = [\"std\"] }\n\n[dev-dependencies]\nrand_core = { version = \"0.6\", default-features = false, features = [\"getrandom\"] }\ndalek-ff-group = { path = \"../../dalek-ff-group\", default-features = false }\n"
  },
  {
    "path": "crypto/dkg/pedpop/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2021-2025 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "crypto/dkg/pedpop/README.md",
    "content": "# Distributed Key Generation - PedPoP\n\nThis implements the PedPoP distributed key generation protocol for the\n[`dkg`](https://docs.rs/dkg) crate's types.\n\nThis crate was originally part of the `dkg` crate, which was\n[audited by Cypher Stack in March 2023](\n  https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf\n), culminating in commit\n[669d2dbffc1dafb82a09d9419ea182667115df06](\n  https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06\n). Any subsequent changes have not undergone auditing.\n"
  },
  {
    "path": "crypto/dkg/pedpop/src/encryption.rs",
    "content": "use core::{ops::Deref, fmt};\nuse std::{io, collections::HashMap};\n\nuse thiserror::Error;\n\nuse zeroize::{Zeroize, Zeroizing};\nuse rand_core::{RngCore, CryptoRng};\n\nuse chacha20::{\n  cipher::{crypto_common::KeyIvInit, StreamCipher},\n  Key as Cc20Key, Nonce as Cc20Iv, ChaCha20,\n};\n\nuse transcript::{Transcript, RecommendedTranscript};\n\n#[cfg(test)]\nuse ciphersuite::group::ff::Field;\nuse ciphersuite::{group::GroupEncoding, Ciphersuite};\nuse multiexp::BatchVerifier;\n\nuse schnorr::SchnorrSignature;\nuse dleq::DLEqProof;\n\nuse dkg::{Participant, ThresholdParams};\n\nmod sealed {\n  use super::*;\n\n  pub trait ReadWrite: Sized {\n    fn read<R: io::Read>(reader: &mut R, params: ThresholdParams) -> io::Result<Self>;\n    fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()>;\n\n    fn serialize(&self) -> Vec<u8> {\n      let mut buf = vec![];\n      self.write(&mut buf).unwrap();\n      buf\n    }\n  }\n\n  pub trait Message: Clone + PartialEq + Eq + fmt::Debug + Zeroize + ReadWrite {}\n  impl<M: Clone + PartialEq + Eq + fmt::Debug + Zeroize + ReadWrite> Message for M {}\n\n  pub trait Encryptable: Clone + AsRef<[u8]> + AsMut<[u8]> + Zeroize + ReadWrite {}\n  impl<E: Clone + AsRef<[u8]> + AsMut<[u8]> + Zeroize + ReadWrite> Encryptable for E {}\n}\npub(crate) use sealed::*;\n\n/// Wraps a message with a key to use for encryption in the future.\n#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]\npub struct EncryptionKeyMessage<C: Ciphersuite, M: Message> {\n  msg: M,\n  enc_key: C::G,\n}\n\n// Doesn't impl ReadWrite so that doesn't need to be imported\nimpl<C: Ciphersuite, M: Message> EncryptionKeyMessage<C, M> {\n  pub fn read<R: io::Read>(reader: &mut R, params: ThresholdParams) -> io::Result<Self> {\n    Ok(Self { msg: M::read(reader, params)?, enc_key: C::read_G(reader)? })\n  }\n\n  pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {\n    self.msg.write(writer)?;\n    writer.write_all(self.enc_key.to_bytes().as_ref())\n  }\n\n  pub fn serialize(&self) -> Vec<u8> {\n    let mut buf = vec![];\n    self.write(&mut buf).unwrap();\n    buf\n  }\n\n  #[cfg(test)]\n  pub(crate) fn enc_key(&self) -> C::G {\n    self.enc_key\n  }\n}\n\n/// An encrypted message, with a per-message encryption key enabling revealing specific messages\n/// without side effects.\n#[derive(Clone, Zeroize)]\npub struct EncryptedMessage<C: Ciphersuite, E: Encryptable> {\n  key: C::G,\n  // Also include a proof-of-possession for the key.\n  // If this proof-of-possession wasn't here, Eve could observe Alice encrypt to Bob with key X,\n  // then send Bob a message also claiming to use X.\n  // While Eve's message would fail to meaningfully decrypt, Bob would then use this to create a\n  // blame argument against Eve. When they do, they'd reveal bX, revealing Alice's message to Bob.\n  // This is a massive side effect which could break some protocols, in the worst case.\n  // While Eve can still reuse their own keys, causing Bob to leak all messages by revealing for\n  // any single one, that's effectively Eve revealing themselves, and not considered relevant.\n  pop: SchnorrSignature<C>,\n  msg: Zeroizing<E>,\n}\n\nfn ecdh<C: Ciphersuite>(private: &Zeroizing<C::F>, public: C::G) -> Zeroizing<C::G> {\n  Zeroizing::new(public * private.deref())\n}\n\n// Each ecdh must be distinct. Reuse of an ecdh for multiple ciphers will cause the messages to be\n// leaked.\nfn cipher<C: Ciphersuite>(context: [u8; 32], ecdh: &Zeroizing<C::G>) -> ChaCha20 {\n  // Ideally, we'd box this transcript with ZAlloc, yet that's only possible on nightly\n  // TODO: https://github.com/serai-dex/serai/issues/151\n  let mut transcript = RecommendedTranscript::new(b\"DKG Encryption v0.2\");\n  transcript.append_message(b\"context\", context);\n\n  transcript.domain_separate(b\"encryption_key\");\n\n  let mut ecdh = ecdh.to_bytes();\n  transcript.append_message(b\"shared_key\", ecdh.as_ref());\n  ecdh.as_mut().zeroize();\n\n  let zeroize = |buf: &mut [u8]| buf.zeroize();\n\n  let mut key = Cc20Key::default();\n  let mut challenge = transcript.challenge(b\"key\");\n  key.copy_from_slice(&challenge[.. 32]);\n  zeroize(challenge.as_mut());\n\n  // Since the key is single-use, it doesn't matter what we use for the IV\n  // The issue is key + IV reuse. If we never reuse the key, we can't have the opportunity to\n  // reuse a nonce\n  // Use a static IV in acknowledgement of this\n  let mut iv = Cc20Iv::default();\n  // The \\0 is to satisfy the length requirement (12), not to be null terminated\n  iv.copy_from_slice(b\"DKG IV v0.2\\0\");\n\n  // ChaCha20 has the same commentary as the transcript regarding ZAlloc\n  // TODO: https://github.com/serai-dex/serai/issues/151\n  let res = ChaCha20::new(&key, &iv);\n  zeroize(key.as_mut());\n  res\n}\n\nfn encrypt<R: RngCore + CryptoRng, C: Ciphersuite, E: Encryptable>(\n  rng: &mut R,\n  context: [u8; 32],\n  from: Participant,\n  to: C::G,\n  mut msg: Zeroizing<E>,\n) -> EncryptedMessage<C, E> {\n  /*\n  The following code could be used to replace the requirement on an RNG here.\n  It's just currently not an issue to require taking in an RNG here.\n  let last = self.last_enc_key.to_bytes();\n  self.last_enc_key = C::hash_to_F(b\"encryption_base\", last.as_ref());\n  let key = C::hash_to_F(b\"encryption_key\", last.as_ref());\n  last.as_mut().zeroize();\n  */\n\n  // Generate a new key for this message, satisfying cipher's requirement of distinct keys per\n  // message, and enabling revealing this message without revealing any others\n  let key = Zeroizing::new(C::random_nonzero_F(rng));\n  cipher::<C>(context, &ecdh::<C>(&key, to)).apply_keystream(msg.as_mut().as_mut());\n\n  let pub_key = C::generator() * key.deref();\n  let nonce = Zeroizing::new(C::random_nonzero_F(rng));\n  let pub_nonce = C::generator() * nonce.deref();\n  EncryptedMessage {\n    key: pub_key,\n    pop: SchnorrSignature::sign(\n      &key,\n      nonce,\n      pop_challenge::<C>(context, pub_nonce, pub_key, from, msg.deref().as_ref()),\n    ),\n    msg,\n  }\n}\n\nimpl<C: Ciphersuite, E: Encryptable> EncryptedMessage<C, E> {\n  pub fn read<R: io::Read>(reader: &mut R, params: ThresholdParams) -> io::Result<Self> {\n    Ok(Self {\n      key: C::read_G(reader)?,\n      pop: SchnorrSignature::<C>::read(reader)?,\n      msg: Zeroizing::new(E::read(reader, params)?),\n    })\n  }\n\n  pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {\n    writer.write_all(self.key.to_bytes().as_ref())?;\n    self.pop.write(writer)?;\n    self.msg.write(writer)\n  }\n\n  pub fn serialize(&self) -> Vec<u8> {\n    let mut buf = vec![];\n    self.write(&mut buf).unwrap();\n    buf\n  }\n\n  #[cfg(test)]\n  pub(crate) fn invalidate_pop(&mut self) {\n    self.pop.s += C::F::ONE;\n  }\n\n  #[cfg(test)]\n  pub(crate) fn invalidate_msg<R: RngCore + CryptoRng>(\n    &mut self,\n    rng: &mut R,\n    context: [u8; 32],\n    from: Participant,\n  ) {\n    // Invalidate the message by specifying a new key/Schnorr PoP\n    // This will cause all initial checks to pass, yet a decrypt to gibberish\n    let key = Zeroizing::new(C::random_nonzero_F(rng));\n    let pub_key = C::generator() * key.deref();\n    let nonce = Zeroizing::new(C::random_nonzero_F(rng));\n    let pub_nonce = C::generator() * nonce.deref();\n    self.key = pub_key;\n    self.pop = SchnorrSignature::sign(\n      &key,\n      nonce,\n      pop_challenge::<C>(context, pub_nonce, pub_key, from, self.msg.deref().as_ref()),\n    );\n  }\n\n  // Assumes the encrypted message is a secret share.\n  #[cfg(test)]\n  pub(crate) fn invalidate_share_serialization<R: RngCore + CryptoRng>(\n    &mut self,\n    rng: &mut R,\n    context: [u8; 32],\n    from: Participant,\n    to: C::G,\n  ) {\n    use ciphersuite::group::ff::PrimeField;\n\n    let mut repr = <C::F as PrimeField>::Repr::default();\n    for b in repr.as_mut() {\n      *b = 255;\n    }\n    // Tries to guarantee the above assumption.\n    assert_eq!(repr.as_ref().len(), self.msg.as_ref().len());\n    // Checks that this isn't over a field where this is somehow valid\n    assert!(!bool::from(C::F::from_repr(repr).is_some()));\n\n    self.msg.as_mut().as_mut().copy_from_slice(repr.as_ref());\n    *self = encrypt(rng, context, from, to, self.msg.clone());\n  }\n\n  // Assumes the encrypted message is a secret share.\n  #[cfg(test)]\n  pub(crate) fn invalidate_share_value<R: RngCore + CryptoRng>(\n    &mut self,\n    rng: &mut R,\n    context: [u8; 32],\n    from: Participant,\n    to: C::G,\n  ) {\n    use ciphersuite::group::ff::PrimeField;\n\n    // Assumes the share isn't randomly 1\n    let repr = C::F::ONE.to_repr();\n    self.msg.as_mut().as_mut().copy_from_slice(repr.as_ref());\n    *self = encrypt(rng, context, from, to, self.msg.clone());\n  }\n}\n\n/// A proof that the provided encryption key is a legitimately derived shared key for some message.\n#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]\npub struct EncryptionKeyProof<C: Ciphersuite> {\n  key: Zeroizing<C::G>,\n  dleq: DLEqProof<C::G>,\n}\n\nimpl<C: Ciphersuite> EncryptionKeyProof<C> {\n  pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {\n    Ok(Self { key: Zeroizing::new(C::read_G(reader)?), dleq: DLEqProof::read(reader)? })\n  }\n\n  pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {\n    writer.write_all(self.key.to_bytes().as_ref())?;\n    self.dleq.write(writer)\n  }\n\n  pub fn serialize(&self) -> Vec<u8> {\n    let mut buf = vec![];\n    self.write(&mut buf).unwrap();\n    buf\n  }\n\n  #[cfg(test)]\n  pub(crate) fn invalidate_key(&mut self) {\n    *self.key += C::generator();\n  }\n\n  #[cfg(test)]\n  pub(crate) fn invalidate_dleq(&mut self) {\n    let mut buf = vec![];\n    self.dleq.write(&mut buf).unwrap();\n    // Adds one to c since this is serialized c, s\n    // Adding one to c will leave a validly serialized c\n    // Adding one to s may leave an invalidly serialized s\n    buf[0] = buf[0].wrapping_add(1);\n    self.dleq = DLEqProof::read::<&[u8]>(&mut buf.as_ref()).unwrap();\n  }\n}\n\n// This doesn't need to take the msg. It just doesn't hurt as an extra layer.\n// This still doesn't mean the DKG offers an authenticated channel. The per-message keys have no\n// root of trust other than their existence in the assumed-to-exist external authenticated channel.\nfn pop_challenge<C: Ciphersuite>(\n  context: [u8; 32],\n  nonce: C::G,\n  key: C::G,\n  sender: Participant,\n  msg: &[u8],\n) -> C::F {\n  let mut transcript = RecommendedTranscript::new(b\"DKG Encryption Key Proof of Possession v0.2\");\n  transcript.append_message(b\"context\", context);\n\n  transcript.domain_separate(b\"proof_of_possession\");\n\n  transcript.append_message(b\"nonce\", nonce.to_bytes());\n  transcript.append_message(b\"key\", key.to_bytes());\n  // This is sufficient to prevent the attack this is meant to stop\n  transcript.append_message(b\"sender\", sender.to_bytes());\n  // This, as written above, doesn't hurt\n  transcript.append_message(b\"message\", msg);\n  // While this is a PoK and a PoP, it's called a PoP here since the important part is its owner\n  // Elsewhere, where we use the term PoK, the important part is that it isn't some inverse, with\n  // an unknown to anyone discrete log, breaking the system\n  C::hash_to_F(b\"DKG-encryption-proof_of_possession\", &transcript.challenge(b\"schnorr\"))\n}\n\nfn encryption_key_transcript(context: [u8; 32]) -> RecommendedTranscript {\n  let mut transcript = RecommendedTranscript::new(b\"DKG Encryption Key Correctness Proof v0.2\");\n  transcript.append_message(b\"context\", context);\n  transcript\n}\n\n#[derive(Clone, Copy, PartialEq, Eq, Debug, Error)]\npub(crate) enum DecryptionError {\n  #[error(\"accused provided an invalid signature\")]\n  InvalidSignature,\n  #[error(\"accuser provided an invalid decryption key\")]\n  InvalidProof,\n}\n\n// A simple box for managing decryption.\n#[derive(Clone, Debug)]\npub(crate) struct Decryption<C: Ciphersuite> {\n  context: [u8; 32],\n  enc_keys: HashMap<Participant, C::G>,\n}\n\nimpl<C: Ciphersuite> Decryption<C> {\n  pub(crate) fn new(context: [u8; 32]) -> Self {\n    Self { context, enc_keys: HashMap::new() }\n  }\n  pub(crate) fn register<M: Message>(\n    &mut self,\n    participant: Participant,\n    msg: EncryptionKeyMessage<C, M>,\n  ) -> M {\n    assert!(\n      !self.enc_keys.contains_key(&participant),\n      \"Re-registering encryption key for a participant\"\n    );\n    self.enc_keys.insert(participant, msg.enc_key);\n    msg.msg\n  }\n\n  // Given a message, and the intended decryptor, and a proof for its key, decrypt the message.\n  // Returns None if the key was wrong.\n  pub(crate) fn decrypt_with_proof<E: Encryptable>(\n    &self,\n    from: Participant,\n    decryptor: Participant,\n    mut msg: EncryptedMessage<C, E>,\n    // There's no encryption key proof if the accusation is of an invalid signature\n    proof: Option<EncryptionKeyProof<C>>,\n  ) -> Result<Zeroizing<E>, DecryptionError> {\n    if !msg.pop.verify(\n      msg.key,\n      pop_challenge::<C>(self.context, msg.pop.R, msg.key, from, msg.msg.deref().as_ref()),\n    ) {\n      Err(DecryptionError::InvalidSignature)?;\n    }\n\n    if let Some(proof) = proof {\n      // Verify this is the decryption key for this message\n      proof\n        .dleq\n        .verify(\n          &mut encryption_key_transcript(self.context),\n          &[C::generator(), msg.key],\n          &[self.enc_keys[&decryptor], *proof.key],\n        )\n        .map_err(|_| DecryptionError::InvalidProof)?;\n\n      cipher::<C>(self.context, &proof.key).apply_keystream(msg.msg.as_mut().as_mut());\n      Ok(msg.msg)\n    } else {\n      Err(DecryptionError::InvalidProof)\n    }\n  }\n}\n\n// A simple box for managing encryption.\n#[derive(Clone)]\npub(crate) struct Encryption<C: Ciphersuite> {\n  context: [u8; 32],\n  i: Participant,\n  enc_key: Zeroizing<C::F>,\n  enc_pub_key: C::G,\n  decryption: Decryption<C>,\n}\n\nimpl<C: Ciphersuite> fmt::Debug for Encryption<C> {\n  fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {\n    fmt\n      .debug_struct(\"Encryption\")\n      .field(\"context\", &self.context)\n      .field(\"i\", &self.i)\n      .field(\"enc_pub_key\", &self.enc_pub_key)\n      .field(\"decryption\", &self.decryption)\n      .finish_non_exhaustive()\n  }\n}\n\nimpl<C: Ciphersuite> Zeroize for Encryption<C> {\n  fn zeroize(&mut self) {\n    self.enc_key.zeroize();\n    self.enc_pub_key.zeroize();\n    for (_, mut value) in self.decryption.enc_keys.drain() {\n      value.zeroize();\n    }\n  }\n}\n\nimpl<C: Ciphersuite> Encryption<C> {\n  pub(crate) fn new<R: RngCore + CryptoRng>(\n    context: [u8; 32],\n    i: Participant,\n    rng: &mut R,\n  ) -> Self {\n    let enc_key = Zeroizing::new(C::random_nonzero_F(rng));\n    Self {\n      context,\n      i,\n      enc_pub_key: C::generator() * enc_key.deref(),\n      enc_key,\n      decryption: Decryption::new(context),\n    }\n  }\n\n  pub(crate) fn registration<M: Message>(&self, msg: M) -> EncryptionKeyMessage<C, M> {\n    EncryptionKeyMessage { msg, enc_key: self.enc_pub_key }\n  }\n\n  pub(crate) fn register<M: Message>(\n    &mut self,\n    participant: Participant,\n    msg: EncryptionKeyMessage<C, M>,\n  ) -> M {\n    self.decryption.register(participant, msg)\n  }\n\n  pub(crate) fn encrypt<R: RngCore + CryptoRng, E: Encryptable>(\n    &self,\n    rng: &mut R,\n    participant: Participant,\n    msg: Zeroizing<E>,\n  ) -> EncryptedMessage<C, E> {\n    encrypt(rng, self.context, self.i, self.decryption.enc_keys[&participant], msg)\n  }\n\n  pub(crate) fn decrypt<R: RngCore + CryptoRng, I: Copy + Zeroize, E: Encryptable>(\n    &self,\n    rng: &mut R,\n    batch: &mut BatchVerifier<I, C::G>,\n    // Uses a distinct batch ID so if this batch verifier is reused, we know its the PoP aspect\n    // which failed, and therefore to use None for the blame\n    batch_id: I,\n    from: Participant,\n    mut msg: EncryptedMessage<C, E>,\n  ) -> (Zeroizing<E>, EncryptionKeyProof<C>) {\n    msg.pop.batch_verify(\n      rng,\n      batch,\n      batch_id,\n      msg.key,\n      pop_challenge::<C>(self.context, msg.pop.R, msg.key, from, msg.msg.deref().as_ref()),\n    );\n\n    let key = ecdh::<C>(&self.enc_key, msg.key);\n    cipher::<C>(self.context, &key).apply_keystream(msg.msg.as_mut().as_mut());\n    (\n      msg.msg,\n      EncryptionKeyProof {\n        key,\n        dleq: DLEqProof::prove(\n          rng,\n          &mut encryption_key_transcript(self.context),\n          &[C::generator(), msg.key],\n          &self.enc_key,\n        ),\n      },\n    )\n  }\n\n  pub(crate) fn into_decryption(self) -> Decryption<C> {\n    self.decryption\n  }\n}\n"
  },
  {
    "path": "crypto/dkg/pedpop/src/lib.rs",
    "content": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![doc = include_str!(\"../README.md\")]\n// This crate requires `dleq` which doesn't support no-std via std-shims\n// #![cfg_attr(not(feature = \"std\"), no_std)]\n\nuse core::{marker::PhantomData, ops::Deref, fmt};\nuse std::{\n  io::{self, Read, Write},\n  collections::HashMap,\n};\n\nuse zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing};\nuse rand_core::{RngCore, CryptoRng};\n\nuse transcript::{Transcript, RecommendedTranscript};\n\nuse multiexp::{multiexp_vartime, BatchVerifier};\nuse ciphersuite::{\n  group::{\n    ff::{Field, PrimeField},\n    Group, GroupEncoding,\n  },\n  Ciphersuite,\n};\n\nuse schnorr::SchnorrSignature;\n\npub use dkg::*;\n\nmod encryption;\npub use encryption::*;\n\n#[cfg(test)]\nmod tests;\n\n/// Errors possible during key generation.\n#[derive(Clone, PartialEq, Eq, Debug, thiserror::Error)]\npub enum PedPoPError<C: Ciphersuite> {\n  /// An incorrect amount of participants was provided.\n  #[error(\"incorrect amount of participants (expected {expected}, found {found})\")]\n  IncorrectAmountOfParticipants { expected: usize, found: usize },\n  /// An invalid proof of knowledge was provided.\n  #[error(\"invalid proof of knowledge (participant {0})\")]\n  InvalidCommitments(Participant),\n  /// An invalid DKG share was provided.\n  #[error(\"invalid share (participant {participant}, blame {blame})\")]\n  InvalidShare { participant: Participant, blame: Option<EncryptionKeyProof<C>> },\n  /// A participant was missing.\n  #[error(\"missing participant {0}\")]\n  MissingParticipant(Participant),\n  /// An error propagated from the underlying `dkg` crate.\n  #[error(\"error from dkg ({0})\")]\n  DkgError(DkgError),\n}\n\n// Validate a map of values to have the expected included participants\nfn validate_map<T, C: Ciphersuite>(\n  map: &HashMap<Participant, T>,\n  included: &[Participant],\n  ours: Participant,\n) -> Result<(), PedPoPError<C>> {\n  if (map.len() + 1) != included.len() {\n    Err(PedPoPError::IncorrectAmountOfParticipants {\n      expected: included.len(),\n      found: map.len() + 1,\n    })?;\n  }\n\n  for included in included {\n    if *included == ours {\n      if map.contains_key(included) {\n        Err(PedPoPError::DkgError(DkgError::DuplicatedParticipant(*included)))?;\n      }\n      continue;\n    }\n\n    if !map.contains_key(included) {\n      Err(PedPoPError::MissingParticipant(*included))?;\n    }\n  }\n\n  Ok(())\n}\n\n#[allow(non_snake_case)]\nfn challenge<C: Ciphersuite>(context: [u8; 32], l: Participant, R: &[u8], Am: &[u8]) -> C::F {\n  let mut transcript = RecommendedTranscript::new(b\"DKG PedPoP v0.2\");\n  transcript.domain_separate(b\"schnorr_proof_of_knowledge\");\n  transcript.append_message(b\"context\", context);\n  transcript.append_message(b\"participant\", l.to_bytes());\n  transcript.append_message(b\"nonce\", R);\n  transcript.append_message(b\"commitments\", Am);\n  C::hash_to_F(b\"DKG-PedPoP-proof_of_knowledge-0\", &transcript.challenge(b\"schnorr\"))\n}\n\n/// The commitments message, intended to be broadcast to all other parties.\n///\n/// Every participant should only provide one set of commitments to all parties. If any\n/// participant sends multiple sets of commitments, they are faulty and should be presumed\n/// malicious. As this library does not handle networking, it is unable to detect if any\n/// participant is so faulty. That responsibility lies with the caller.\n#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]\npub struct Commitments<C: Ciphersuite> {\n  commitments: Vec<C::G>,\n  cached_msg: Vec<u8>,\n  sig: SchnorrSignature<C>,\n}\n\nimpl<C: Ciphersuite> ReadWrite for Commitments<C> {\n  fn read<R: Read>(reader: &mut R, params: ThresholdParams) -> io::Result<Self> {\n    let mut commitments = Vec::with_capacity(params.t().into());\n    let mut cached_msg = vec![];\n\n    #[allow(non_snake_case)]\n    let mut read_G = || -> io::Result<C::G> {\n      let mut buf = <C::G as GroupEncoding>::Repr::default();\n      reader.read_exact(buf.as_mut())?;\n      let point = C::read_G(&mut buf.as_ref())?;\n      cached_msg.extend(buf.as_ref());\n      Ok(point)\n    };\n\n    for _ in 0 .. params.t() {\n      commitments.push(read_G()?);\n    }\n\n    Ok(Commitments { commitments, cached_msg, sig: SchnorrSignature::read(reader)? })\n  }\n\n  fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {\n    writer.write_all(&self.cached_msg)?;\n    self.sig.write(writer)\n  }\n}\n\n/// State machine to begin the key generation protocol.\n#[derive(Debug, Zeroize)]\npub struct KeyGenMachine<C: Ciphersuite> {\n  params: ThresholdParams,\n  context: [u8; 32],\n  _curve: PhantomData<C>,\n}\n\nimpl<C: Ciphersuite> KeyGenMachine<C> {\n  /// Create a new machine to generate a key.\n  ///\n  /// The context should be unique among multisigs.\n  pub fn new(params: ThresholdParams, context: [u8; 32]) -> KeyGenMachine<C> {\n    KeyGenMachine { params, context, _curve: PhantomData }\n  }\n\n  /// Start generating a key according to the PedPoP DKG specification present in the FROST paper.\n  ///\n  /// Returns a commitments message to be sent to all parties over an authenticated channel. If any\n  /// party submits multiple sets of commitments, they MUST be treated as malicious.\n  pub fn generate_coefficients<R: RngCore + CryptoRng>(\n    self,\n    rng: &mut R,\n  ) -> (SecretShareMachine<C>, EncryptionKeyMessage<C, Commitments<C>>) {\n    let t = usize::from(self.params.t());\n    let mut coefficients = Vec::with_capacity(t);\n    let mut commitments = Vec::with_capacity(t);\n    let mut cached_msg = vec![];\n\n    for i in 0 .. t {\n      // Step 1: Generate t random values to form a polynomial with\n      coefficients.push(Zeroizing::new(C::random_nonzero_F(&mut *rng)));\n      // Step 3: Generate public commitments\n      commitments.push(C::generator() * coefficients[i].deref());\n      cached_msg.extend(commitments[i].to_bytes().as_ref());\n    }\n\n    // Step 2: Provide a proof of knowledge\n    let r = Zeroizing::new(C::random_nonzero_F(rng));\n    let nonce = C::generator() * r.deref();\n    let sig = SchnorrSignature::<C>::sign(\n      &coefficients[0],\n      // This could be deterministic as the PoK is a singleton never opened up to cooperative\n      // discussion\n      // There's no reason to spend the time and effort to make this deterministic besides a\n      // general obsession with canonicity and determinism though\n      r,\n      challenge::<C>(self.context, self.params.i(), nonce.to_bytes().as_ref(), &cached_msg),\n    );\n\n    // Additionally create an encryption mechanism to protect the secret shares\n    let encryption = Encryption::new(self.context, self.params.i(), rng);\n\n    // Step 4: Broadcast\n    let msg =\n      encryption.registration(Commitments { commitments: commitments.clone(), cached_msg, sig });\n    (\n      SecretShareMachine {\n        params: self.params,\n        context: self.context,\n        coefficients,\n        our_commitments: commitments,\n        encryption,\n      },\n      msg,\n    )\n  }\n}\n\nfn polynomial<F: PrimeField + Zeroize>(\n  coefficients: &[Zeroizing<F>],\n  l: Participant,\n) -> Zeroizing<F> {\n  let l = F::from(u64::from(u16::from(l)));\n  // This should never be reached since Participant is explicitly non-zero\n  assert!(l != F::ZERO, \"zero participant passed to polynomial\");\n  let mut share = Zeroizing::new(F::ZERO);\n  for (idx, coefficient) in coefficients.iter().rev().enumerate() {\n    *share += coefficient.deref();\n    if idx != (coefficients.len() - 1) {\n      *share *= l;\n    }\n  }\n  share\n}\n\n/// The secret share message, to be sent to the party it's intended for over an authenticated\n/// channel.\n///\n/// If any participant sends multiple secret shares to another participant, they are faulty.\n// This should presumably be written as SecretShare(Zeroizing<F::Repr>).\n// It's unfortunately not possible as F::Repr doesn't have Zeroize as a bound.\n// The encryption system also explicitly uses Zeroizing<M> so it can ensure anything being\n// encrypted is within Zeroizing. Accordingly, internally having Zeroizing would be redundant.\n#[derive(Clone, PartialEq, Eq)]\npub struct SecretShare<F: PrimeField>(F::Repr);\nimpl<F: PrimeField> AsRef<[u8]> for SecretShare<F> {\n  fn as_ref(&self) -> &[u8] {\n    self.0.as_ref()\n  }\n}\nimpl<F: PrimeField> AsMut<[u8]> for SecretShare<F> {\n  fn as_mut(&mut self) -> &mut [u8] {\n    self.0.as_mut()\n  }\n}\nimpl<F: PrimeField> fmt::Debug for SecretShare<F> {\n  fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {\n    fmt.debug_struct(\"SecretShare\").finish_non_exhaustive()\n  }\n}\nimpl<F: PrimeField> Zeroize for SecretShare<F> {\n  fn zeroize(&mut self) {\n    self.0.as_mut().zeroize()\n  }\n}\n// Still manually implement ZeroizeOnDrop to ensure these don't stick around.\n// We could replace Zeroizing<M> with a bound M: ZeroizeOnDrop.\n// Doing so would potentially fail to highlight the expected behavior with these and remove a layer\n// of depth.\nimpl<F: PrimeField> Drop for SecretShare<F> {\n  fn drop(&mut self) {\n    self.zeroize();\n  }\n}\nimpl<F: PrimeField> ZeroizeOnDrop for SecretShare<F> {}\n\nimpl<F: PrimeField> ReadWrite for SecretShare<F> {\n  fn read<R: Read>(reader: &mut R, _: ThresholdParams) -> io::Result<Self> {\n    let mut repr = F::Repr::default();\n    reader.read_exact(repr.as_mut())?;\n    Ok(SecretShare(repr))\n  }\n\n  fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {\n    writer.write_all(self.0.as_ref())\n  }\n}\n\n/// Advancement of the key generation state machine.\n#[derive(Zeroize)]\npub struct SecretShareMachine<C: Ciphersuite> {\n  params: ThresholdParams,\n  context: [u8; 32],\n  coefficients: Vec<Zeroizing<C::F>>,\n  our_commitments: Vec<C::G>,\n  encryption: Encryption<C>,\n}\n\nimpl<C: Ciphersuite> fmt::Debug for SecretShareMachine<C> {\n  fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {\n    fmt\n      .debug_struct(\"SecretShareMachine\")\n      .field(\"params\", &self.params)\n      .field(\"context\", &self.context)\n      .field(\"our_commitments\", &self.our_commitments)\n      .field(\"encryption\", &self.encryption)\n      .finish_non_exhaustive()\n  }\n}\n\nimpl<C: Ciphersuite> SecretShareMachine<C> {\n  /// Verify the data from the previous round (canonicity, PoKs, message authenticity)\n  #[allow(clippy::type_complexity)]\n  fn verify_r1<R: RngCore + CryptoRng>(\n    &mut self,\n    rng: &mut R,\n    mut commitment_msgs: HashMap<Participant, EncryptionKeyMessage<C, Commitments<C>>>,\n  ) -> Result<HashMap<Participant, Vec<C::G>>, PedPoPError<C>> {\n    validate_map(\n      &commitment_msgs,\n      &self.params.all_participant_indexes().collect::<Vec<_>>(),\n      self.params.i(),\n    )?;\n\n    let mut batch = BatchVerifier::<Participant, C::G>::new(commitment_msgs.len());\n    let mut commitments = HashMap::new();\n    for l in self.params.all_participant_indexes() {\n      let Some(msg) = commitment_msgs.remove(&l) else { continue };\n      let mut msg = self.encryption.register(l, msg);\n\n      if msg.commitments.len() != self.params.t().into() {\n        Err(PedPoPError::InvalidCommitments(l))?;\n      }\n\n      // Step 5: Validate each proof of knowledge\n      // This is solely the prep step for the latter batch verification\n      msg.sig.batch_verify(\n        rng,\n        &mut batch,\n        l,\n        msg.commitments[0],\n        challenge::<C>(self.context, l, msg.sig.R.to_bytes().as_ref(), &msg.cached_msg),\n      );\n\n      commitments.insert(l, msg.commitments.drain(..).collect::<Vec<_>>());\n    }\n\n    batch.verify_vartime_with_vartime_blame().map_err(PedPoPError::InvalidCommitments)?;\n\n    commitments.insert(self.params.i(), self.our_commitments.drain(..).collect());\n    Ok(commitments)\n  }\n\n  /// Continue generating a key.\n  ///\n  /// Takes in everyone else's commitments. Returns a HashMap of encrypted secret shares to be sent\n  /// over authenticated channels to their relevant counterparties.\n  ///\n  /// If any participant sends multiple secret shares to another participant, they are faulty.\n  #[allow(clippy::type_complexity)]\n  pub fn generate_secret_shares<R: RngCore + CryptoRng>(\n    mut self,\n    rng: &mut R,\n    commitments: HashMap<Participant, EncryptionKeyMessage<C, Commitments<C>>>,\n  ) -> Result<\n    (KeyMachine<C>, HashMap<Participant, EncryptedMessage<C, SecretShare<C::F>>>),\n    PedPoPError<C>,\n  > {\n    let commitments = self.verify_r1(&mut *rng, commitments)?;\n\n    // Step 1: Generate secret shares for all other parties\n    let mut res = HashMap::new();\n    for l in self.params.all_participant_indexes() {\n      // Don't insert our own shares to the byte buffer which is meant to be sent around\n      // An app developer could accidentally send it. Best to keep this black boxed\n      if l == self.params.i() {\n        continue;\n      }\n\n      let mut share = polynomial(&self.coefficients, l);\n      let share_bytes = Zeroizing::new(SecretShare::<C::F>(share.to_repr()));\n      share.zeroize();\n      res.insert(l, self.encryption.encrypt(rng, l, share_bytes));\n    }\n\n    // Calculate our own share\n    let share = polynomial(&self.coefficients, self.params.i());\n    self.coefficients.zeroize();\n\n    Ok((\n      KeyMachine { params: self.params, secret: share, commitments, encryption: self.encryption },\n      res,\n    ))\n  }\n}\n\n/// Advancement of the the secret share state machine.\n///\n/// This machine will 'complete' the protocol, by a local perspective. In order to be secure,\n/// the parties must confirm having successfully completed the protocol (an effort out of scope to\n/// this library), yet this is modeled by one more state transition (BlameMachine).\npub struct KeyMachine<C: Ciphersuite> {\n  params: ThresholdParams,\n  secret: Zeroizing<C::F>,\n  commitments: HashMap<Participant, Vec<C::G>>,\n  encryption: Encryption<C>,\n}\n\nimpl<C: Ciphersuite> fmt::Debug for KeyMachine<C> {\n  fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {\n    fmt\n      .debug_struct(\"KeyMachine\")\n      .field(\"params\", &self.params)\n      .field(\"commitments\", &self.commitments)\n      .field(\"encryption\", &self.encryption)\n      .finish_non_exhaustive()\n  }\n}\n\nimpl<C: Ciphersuite> Zeroize for KeyMachine<C> {\n  fn zeroize(&mut self) {\n    self.params.zeroize();\n    self.secret.zeroize();\n    for commitments in self.commitments.values_mut() {\n      commitments.zeroize();\n    }\n    self.encryption.zeroize();\n  }\n}\n\n// Calculate the exponent for a given participant and apply it to a series of commitments\n// Initially used with the actual commitments to verify the secret share, later used with\n// stripes to generate the verification shares\nfn exponential<C: Ciphersuite>(i: Participant, values: &[C::G]) -> Vec<(C::F, C::G)> {\n  let i = C::F::from(u16::from(i).into());\n  let mut res = Vec::with_capacity(values.len());\n  (0 .. values.len()).fold(C::F::ONE, |exp, l| {\n    res.push((exp, values[l]));\n    exp * i\n  });\n  res\n}\n\nfn share_verification_statements<C: Ciphersuite>(\n  target: Participant,\n  commitments: &[C::G],\n  mut share: Zeroizing<C::F>,\n) -> Vec<(C::F, C::G)> {\n  // This can be insecurely linearized from n * t to just n using the below sums for a given\n  // stripe. Doing so uses naive addition which is subject to malleability. The only way to\n  // ensure that malleability isn't present is to use this n * t algorithm, which runs\n  // per sender and not as an aggregate of all senders, which also enables blame\n  let mut values = exponential::<C>(target, commitments);\n\n  // Perform the share multiplication outside of the multiexp to minimize stack copying\n  // While the multiexp BatchVerifier does zeroize its flattened multiexp, and itself, it still\n  // converts whatever we give to an iterator and then builds a Vec internally, welcoming copies\n  let neg_share_pub = C::generator() * -*share;\n  share.zeroize();\n  values.push((C::F::ONE, neg_share_pub));\n\n  values\n}\n\n#[derive(Clone, Copy, Hash, Debug, Zeroize)]\nenum BatchId {\n  Decryption(Participant),\n  Share(Participant),\n}\n\nimpl<C: Ciphersuite> KeyMachine<C> {\n  /// Calculate our share given the shares sent to us.\n  ///\n  /// Returns a BlameMachine usable to determine if faults in the protocol occurred.\n  ///\n  /// This will error on, and return a blame proof for, the first-observed case of faulty behavior.\n  pub fn calculate_share<R: RngCore + CryptoRng>(\n    mut self,\n    rng: &mut R,\n    mut shares: HashMap<Participant, EncryptedMessage<C, SecretShare<C::F>>>,\n  ) -> Result<BlameMachine<C>, PedPoPError<C>> {\n    validate_map(\n      &shares,\n      &self.params.all_participant_indexes().collect::<Vec<_>>(),\n      self.params.i(),\n    )?;\n\n    let mut batch = BatchVerifier::new(shares.len());\n    let mut blames = HashMap::new();\n    for (l, share_bytes) in shares.drain() {\n      let (mut share_bytes, blame) =\n        self.encryption.decrypt(rng, &mut batch, BatchId::Decryption(l), l, share_bytes);\n      let share =\n        Zeroizing::new(Option::<C::F>::from(C::F::from_repr(share_bytes.0)).ok_or_else(|| {\n          PedPoPError::InvalidShare { participant: l, blame: Some(blame.clone()) }\n        })?);\n      share_bytes.zeroize();\n      *self.secret += share.deref();\n\n      blames.insert(l, blame);\n      batch.queue(\n        rng,\n        BatchId::Share(l),\n        share_verification_statements::<C>(self.params.i(), &self.commitments[&l], share),\n      );\n    }\n    batch.verify_with_vartime_blame().map_err(|id| {\n      let (l, blame) = match id {\n        BatchId::Decryption(l) => (l, None),\n        BatchId::Share(l) => (l, Some(blames.remove(&l).unwrap())),\n      };\n      PedPoPError::InvalidShare { participant: l, blame }\n    })?;\n\n    // Stripe commitments per t and sum them in advance. Calculating verification shares relies on\n    // these sums so preprocessing them is a massive speedup\n    // If these weren't just sums, yet the tables used in multiexp, this would be further optimized\n    // As of right now, each multiexp will regenerate them\n    let mut stripes = Vec::with_capacity(usize::from(self.params.t()));\n    for t in 0 .. usize::from(self.params.t()) {\n      stripes.push(self.commitments.values().map(|commitments| commitments[t]).sum());\n    }\n\n    // Calculate each user's verification share\n    let mut verification_shares = HashMap::new();\n    for i in self.params.all_participant_indexes() {\n      verification_shares.insert(\n        i,\n        if i == self.params.i() {\n          C::generator() * self.secret.deref()\n        } else {\n          multiexp_vartime(&exponential::<C>(i, &stripes))\n        },\n      );\n    }\n\n    let KeyMachine { commitments, encryption, params, secret } = self;\n    Ok(BlameMachine {\n      commitments,\n      encryption: encryption.into_decryption(),\n      result: Some(\n        ThresholdKeys::new(params, Interpolation::Lagrange, secret, verification_shares)\n          .map_err(PedPoPError::DkgError)?,\n      ),\n    })\n  }\n}\n\n/// A machine capable of handling blame proofs.\npub struct BlameMachine<C: Ciphersuite> {\n  commitments: HashMap<Participant, Vec<C::G>>,\n  encryption: Decryption<C>,\n  result: Option<ThresholdKeys<C>>,\n}\n\nimpl<C: Ciphersuite> fmt::Debug for BlameMachine<C> {\n  fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {\n    fmt\n      .debug_struct(\"BlameMachine\")\n      .field(\"commitments\", &self.commitments)\n      .field(\"encryption\", &self.encryption)\n      .finish_non_exhaustive()\n  }\n}\n\nimpl<C: Ciphersuite> Zeroize for BlameMachine<C> {\n  fn zeroize(&mut self) {\n    for commitments in self.commitments.values_mut() {\n      commitments.zeroize();\n    }\n    self.result.zeroize();\n  }\n}\n\nimpl<C: Ciphersuite> BlameMachine<C> {\n  /// Mark the protocol as having been successfully completed, returning the generated keys.\n  /// This should only be called after having confirmed, with all participants, successful\n  /// completion.\n  ///\n  /// Confirming successful completion is not necessarily as simple as everyone reporting their\n  /// completion. Everyone must also receive everyone's report of completion, entering into the\n  /// territory of consensus protocols. This library does not handle that nor does it provide any\n  /// tooling to do so. This function is solely intended to force users to acknowledge they're\n  /// completing the protocol, not processing any blame.\n  pub fn complete(self) -> ThresholdKeys<C> {\n    self.result.unwrap()\n  }\n\n  fn blame_internal(\n    &self,\n    sender: Participant,\n    recipient: Participant,\n    msg: EncryptedMessage<C, SecretShare<C::F>>,\n    proof: Option<EncryptionKeyProof<C>>,\n  ) -> Participant {\n    let share_bytes = match self.encryption.decrypt_with_proof(sender, recipient, msg, proof) {\n      Ok(share_bytes) => share_bytes,\n      // If there's an invalid signature, the sender did not send a properly formed message\n      Err(DecryptionError::InvalidSignature) => return sender,\n      // Decryption will fail if the provided ECDH key wasn't correct for the given message\n      Err(DecryptionError::InvalidProof) => return recipient,\n    };\n\n    let Some(share) = Option::<C::F>::from(C::F::from_repr(share_bytes.0)) else {\n      // If this isn't a valid scalar, the sender is faulty\n      return sender;\n    };\n\n    // If this isn't a valid share, the sender is faulty\n    if !bool::from(\n      multiexp_vartime(&share_verification_statements::<C>(\n        recipient,\n        &self.commitments[&sender],\n        Zeroizing::new(share),\n      ))\n      .is_identity(),\n    ) {\n      return sender;\n    }\n\n    // The share was canonical and valid\n    recipient\n  }\n\n  /// Given an accusation of fault, determine the faulty party (either the sender, who sent an\n  /// invalid secret share, or the receiver, who claimed a valid secret share was invalid). No\n  /// matter which, prevent completion of the machine, forcing an abort of the protocol.\n  ///\n  /// The message should be a copy of the encrypted secret share from the accused sender to the\n  /// accusing recipient. This message must have been authenticated as actually having come from\n  /// the sender in question.\n  ///\n  /// In order to enable detecting multiple faults, an `AdditionalBlameMachine` is returned, which\n  /// can be used to determine further blame. These machines will process the same blame statements\n  /// multiple times, always identifying blame. It is the caller's job to ensure they're unique in\n  /// order to prevent multiple instances of blame over a single incident.\n  pub fn blame(\n    self,\n    sender: Participant,\n    recipient: Participant,\n    msg: EncryptedMessage<C, SecretShare<C::F>>,\n    proof: Option<EncryptionKeyProof<C>>,\n  ) -> (AdditionalBlameMachine<C>, Participant) {\n    let faulty = self.blame_internal(sender, recipient, msg, proof);\n    (AdditionalBlameMachine(self), faulty)\n  }\n}\n\n/// A machine capable of handling an arbitrary amount of additional blame proofs.\n#[derive(Debug, Zeroize)]\npub struct AdditionalBlameMachine<C: Ciphersuite>(BlameMachine<C>);\nimpl<C: Ciphersuite> AdditionalBlameMachine<C> {\n  /// Create an AdditionalBlameMachine capable of evaluating Blame regardless of if the caller was\n  /// a member in the DKG protocol.\n  ///\n  /// Takes in the parameters for the DKG protocol and all of the participant's commitment\n  /// messages.\n  ///\n  /// This constructor assumes the full validity of the commitment messages. They must be fully\n  /// authenticated as having come from the supposed party and verified as valid. Usage of invalid\n  /// commitments is considered undefined behavior, and may cause everything from inaccurate blame\n  /// to panics.\n  pub fn new(\n    context: [u8; 32],\n    n: u16,\n    mut commitment_msgs: HashMap<Participant, EncryptionKeyMessage<C, Commitments<C>>>,\n  ) -> Result<Self, PedPoPError<C>> {\n    let mut commitments = HashMap::new();\n    let mut encryption = Decryption::new(context);\n    for i in 1 ..= n {\n      let i = Participant::new(i).unwrap();\n      let Some(msg) = commitment_msgs.remove(&i) else { Err(PedPoPError::MissingParticipant(i))? };\n      commitments.insert(i, encryption.register(i, msg).commitments);\n    }\n    Ok(AdditionalBlameMachine(BlameMachine { commitments, encryption, result: None }))\n  }\n\n  /// Given an accusation of fault, determine the faulty party (either the sender, who sent an\n  /// invalid secret share, or the receiver, who claimed a valid secret share was invalid).\n  ///\n  /// The message should be a copy of the encrypted secret share from the accused sender to the\n  /// accusing recipient. This message must have been authenticated as actually having come from\n  /// the sender in question.\n  ///\n  /// This will process the same blame statement multiple times, always identifying blame. It is\n  /// the caller's job to ensure they're unique in order to prevent multiple instances of blame\n  /// over a single incident.\n  pub fn blame(\n    &self,\n    sender: Participant,\n    recipient: Participant,\n    msg: EncryptedMessage<C, SecretShare<C::F>>,\n    proof: Option<EncryptionKeyProof<C>>,\n  ) -> Participant {\n    self.0.blame_internal(sender, recipient, msg, proof)\n  }\n}\n"
  },
  {
    "path": "crypto/dkg/pedpop/src/tests.rs",
    "content": "use std::collections::HashMap;\n\nuse rand_core::{RngCore, CryptoRng, OsRng};\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::Ciphersuite;\n\nuse crate::*;\n\nconst THRESHOLD: u16 = 3;\nconst PARTICIPANTS: u16 = 5;\n\n/// Clone a map without a specific value.\nfn clone_without<K: Clone + core::cmp::Eq + core::hash::Hash, V: Clone>(\n  map: &HashMap<K, V>,\n  without: &K,\n) -> HashMap<K, V> {\n  let mut res = map.clone();\n  res.remove(without).unwrap();\n  res\n}\n\ntype PedPoPEncryptedMessage<C> = EncryptedMessage<C, SecretShare<<C as Ciphersuite>::F>>;\ntype PedPoPSecretShares<C> = HashMap<Participant, PedPoPEncryptedMessage<C>>;\n\nconst CONTEXT: [u8; 32] = *b\"DKG Test Key Generation         \";\n\n// Commit, then return commitment messages, enc keys, and shares\n#[allow(clippy::type_complexity)]\nfn commit_enc_keys_and_shares<R: RngCore + CryptoRng, C: Ciphersuite>(\n  rng: &mut R,\n) -> (\n  HashMap<Participant, KeyMachine<C>>,\n  HashMap<Participant, EncryptionKeyMessage<C, Commitments<C>>>,\n  HashMap<Participant, C::G>,\n  HashMap<Participant, PedPoPSecretShares<C>>,\n) {\n  let mut machines = HashMap::new();\n  let mut commitments = HashMap::new();\n  let mut enc_keys = HashMap::new();\n  for i in (1 ..= PARTICIPANTS).map(|i| Participant::new(i).unwrap()) {\n    let params = ThresholdParams::new(THRESHOLD, PARTICIPANTS, i).unwrap();\n    let machine = KeyGenMachine::<C>::new(params, CONTEXT);\n    let (machine, these_commitments) = machine.generate_coefficients(rng);\n    machines.insert(i, machine);\n\n    commitments.insert(\n      i,\n      EncryptionKeyMessage::read::<&[u8]>(&mut these_commitments.serialize().as_ref(), params)\n        .unwrap(),\n    );\n    enc_keys.insert(i, commitments[&i].enc_key());\n  }\n\n  let mut secret_shares = HashMap::new();\n  let machines = machines\n    .drain()\n    .map(|(l, machine)| {\n      let (machine, mut shares) =\n        machine.generate_secret_shares(rng, clone_without(&commitments, &l)).unwrap();\n      let shares = shares\n        .drain()\n        .map(|(l, share)| {\n          (\n            l,\n            EncryptedMessage::read::<&[u8]>(\n              &mut share.serialize().as_ref(),\n              // Only t/n actually matters, so hardcode i to 1 here\n              ThresholdParams::new(THRESHOLD, PARTICIPANTS, Participant::new(1).unwrap()).unwrap(),\n            )\n            .unwrap(),\n          )\n        })\n        .collect::<HashMap<_, _>>();\n      secret_shares.insert(l, shares);\n      (l, machine)\n    })\n    .collect::<HashMap<_, _>>();\n\n  (machines, commitments, enc_keys, secret_shares)\n}\n\nfn generate_secret_shares<C: Ciphersuite>(\n  shares: &HashMap<Participant, PedPoPSecretShares<C>>,\n  recipient: Participant,\n) -> PedPoPSecretShares<C> {\n  let mut our_secret_shares = HashMap::new();\n  for (i, shares) in shares {\n    if recipient == *i {\n      continue;\n    }\n    our_secret_shares.insert(*i, shares[&recipient].clone());\n  }\n  our_secret_shares\n}\n\n/// Fully perform the PedPoP key generation algorithm.\nfn pedpop_gen<R: RngCore + CryptoRng, C: Ciphersuite>(\n  rng: &mut R,\n) -> HashMap<Participant, ThresholdKeys<C>> {\n  let (mut machines, _, _, secret_shares) = commit_enc_keys_and_shares::<_, C>(rng);\n\n  let mut verification_shares = None;\n  let mut group_key = None;\n  machines\n    .drain()\n    .map(|(i, machine)| {\n      let our_secret_shares = generate_secret_shares(&secret_shares, i);\n      let these_keys = machine.calculate_share(rng, our_secret_shares).unwrap().complete();\n\n      // Verify the verification_shares are agreed upon\n      if verification_shares.is_none() {\n        verification_shares = Some(\n          these_keys\n            .params()\n            .all_participant_indexes()\n            .map(|i| (i, these_keys.original_verification_share(i)))\n            .collect::<HashMap<_, _>>(),\n        );\n      }\n      assert_eq!(\n        verification_shares.as_ref().unwrap(),\n        &these_keys\n          .params()\n          .all_participant_indexes()\n          .map(|i| (i, these_keys.original_verification_share(i)))\n          .collect::<HashMap<_, _>>()\n      );\n\n      // Verify the group keys are agreed upon\n      if group_key.is_none() {\n        group_key = Some(these_keys.group_key());\n      }\n      assert_eq!(group_key.unwrap(), these_keys.group_key());\n\n      (i, these_keys)\n    })\n    .collect::<HashMap<_, _>>()\n}\n\nconst ONE: Participant = Participant::new(1).unwrap();\nconst TWO: Participant = Participant::new(2).unwrap();\n\n#[test]\nfn test_pedpop() {\n  let _ = core::hint::black_box(pedpop_gen::<_, Ristretto>(&mut OsRng));\n}\n\nfn test_blame(\n  commitment_msgs: &HashMap<Participant, EncryptionKeyMessage<Ristretto, Commitments<Ristretto>>>,\n  machines: Vec<BlameMachine<Ristretto>>,\n  msg: &PedPoPEncryptedMessage<Ristretto>,\n  blame: &Option<EncryptionKeyProof<Ristretto>>,\n) {\n  for machine in machines {\n    let (additional, blamed) = machine.blame(ONE, TWO, msg.clone(), blame.clone());\n    assert_eq!(blamed, ONE);\n    // Verify additional blame also works\n    assert_eq!(additional.blame(ONE, TWO, msg.clone(), blame.clone()), ONE);\n\n    // Verify machines constructed with AdditionalBlameMachine::new work\n    assert_eq!(\n      AdditionalBlameMachine::new(CONTEXT, PARTICIPANTS, commitment_msgs.clone()).unwrap().blame(\n        ONE,\n        TWO,\n        msg.clone(),\n        blame.clone()\n      ),\n      ONE,\n    );\n  }\n}\n\n// TODO: Write a macro which expands to the following\n#[test]\nfn invalid_encryption_pop_blame() {\n  let (mut machines, commitment_msgs, _, mut secret_shares) =\n    commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng);\n\n  // Mutate the PoP of the encrypted message from 1 to 2\n  secret_shares.get_mut(&ONE).unwrap().get_mut(&TWO).unwrap().invalidate_pop();\n\n  let mut blame = None;\n  let machines = machines\n    .drain()\n    .filter_map(|(i, machine)| {\n      let our_secret_shares = generate_secret_shares(&secret_shares, i);\n      let machine = machine.calculate_share(&mut OsRng, our_secret_shares);\n      if i == TWO {\n        assert_eq!(\n          machine.err(),\n          Some(PedPoPError::InvalidShare { participant: ONE, blame: None })\n        );\n        // Explicitly declare we have a blame object, which happens to be None since invalid PoP\n        // is self-explainable\n        blame = Some(None);\n        None\n      } else {\n        Some(machine.unwrap())\n      }\n    })\n    .collect::<Vec<_>>();\n\n  test_blame(&commitment_msgs, machines, &secret_shares[&ONE][&TWO].clone(), &blame.unwrap());\n}\n\n#[test]\nfn invalid_ecdh_blame() {\n  let (mut machines, commitment_msgs, _, mut secret_shares) =\n    commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng);\n\n  // Mutate the share to trigger a blame event\n  // Mutates from 2 to 1, as 1 is expected to end up malicious for test_blame to pass\n  // While here, 2 is malicious, this is so 1 creates the blame proof\n  // We then malleate 1's blame proof, so 1 ends up malicious\n  // Doesn't simply invalidate the PoP as that won't have a blame statement\n  // By mutating the encrypted data, we do ensure a blame statement is created\n  secret_shares\n    .get_mut(&TWO)\n    .unwrap()\n    .get_mut(&ONE)\n    .unwrap()\n    .invalidate_msg(&mut OsRng, CONTEXT, TWO);\n\n  let mut blame = None;\n  let machines = machines\n    .drain()\n    .filter_map(|(i, machine)| {\n      let our_secret_shares = generate_secret_shares(&secret_shares, i);\n      let machine = machine.calculate_share(&mut OsRng, our_secret_shares);\n      if i == ONE {\n        blame = Some(match machine.err() {\n          Some(PedPoPError::InvalidShare { participant: TWO, blame: Some(blame) }) => Some(blame),\n          _ => panic!(),\n        });\n        None\n      } else {\n        Some(machine.unwrap())\n      }\n    })\n    .collect::<Vec<_>>();\n\n  blame.as_mut().unwrap().as_mut().unwrap().invalidate_key();\n  test_blame(&commitment_msgs, machines, &secret_shares[&TWO][&ONE].clone(), &blame.unwrap());\n}\n\n// This should be largely equivalent to the prior test\n#[test]\nfn invalid_dleq_blame() {\n  let (mut machines, commitment_msgs, _, mut secret_shares) =\n    commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng);\n\n  secret_shares\n    .get_mut(&TWO)\n    .unwrap()\n    .get_mut(&ONE)\n    .unwrap()\n    .invalidate_msg(&mut OsRng, CONTEXT, TWO);\n\n  let mut blame = None;\n  let machines = machines\n    .drain()\n    .filter_map(|(i, machine)| {\n      let our_secret_shares = generate_secret_shares(&secret_shares, i);\n      let machine = machine.calculate_share(&mut OsRng, our_secret_shares);\n      if i == ONE {\n        blame = Some(match machine.err() {\n          Some(PedPoPError::InvalidShare { participant: TWO, blame: Some(blame) }) => Some(blame),\n          _ => panic!(),\n        });\n        None\n      } else {\n        Some(machine.unwrap())\n      }\n    })\n    .collect::<Vec<_>>();\n\n  blame.as_mut().unwrap().as_mut().unwrap().invalidate_dleq();\n  test_blame(&commitment_msgs, machines, &secret_shares[&TWO][&ONE].clone(), &blame.unwrap());\n}\n\n#[test]\nfn invalid_share_serialization_blame() {\n  let (mut machines, commitment_msgs, enc_keys, mut secret_shares) =\n    commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng);\n\n  secret_shares.get_mut(&ONE).unwrap().get_mut(&TWO).unwrap().invalidate_share_serialization(\n    &mut OsRng,\n    CONTEXT,\n    ONE,\n    enc_keys[&TWO],\n  );\n\n  let mut blame = None;\n  let machines = machines\n    .drain()\n    .filter_map(|(i, machine)| {\n      let our_secret_shares = generate_secret_shares(&secret_shares, i);\n      let machine = machine.calculate_share(&mut OsRng, our_secret_shares);\n      if i == TWO {\n        blame = Some(match machine.err() {\n          Some(PedPoPError::InvalidShare { participant: ONE, blame: Some(blame) }) => Some(blame),\n          _ => panic!(),\n        });\n        None\n      } else {\n        Some(machine.unwrap())\n      }\n    })\n    .collect::<Vec<_>>();\n\n  test_blame(&commitment_msgs, machines, &secret_shares[&ONE][&TWO].clone(), &blame.unwrap());\n}\n\n#[test]\nfn invalid_share_value_blame() {\n  let (mut machines, commitment_msgs, enc_keys, mut secret_shares) =\n    commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng);\n\n  secret_shares.get_mut(&ONE).unwrap().get_mut(&TWO).unwrap().invalidate_share_value(\n    &mut OsRng,\n    CONTEXT,\n    ONE,\n    enc_keys[&TWO],\n  );\n\n  let mut blame = None;\n  let machines = machines\n    .drain()\n    .filter_map(|(i, machine)| {\n      let our_secret_shares = generate_secret_shares(&secret_shares, i);\n      let machine = machine.calculate_share(&mut OsRng, our_secret_shares);\n      if i == TWO {\n        blame = Some(match machine.err() {\n          Some(PedPoPError::InvalidShare { participant: ONE, blame: Some(blame) }) => Some(blame),\n          _ => panic!(),\n        });\n        None\n      } else {\n        Some(machine.unwrap())\n      }\n    })\n    .collect::<Vec<_>>();\n\n  test_blame(&commitment_msgs, machines, &secret_shares[&ONE][&TWO].clone(), &blame.unwrap());\n}\n"
  },
  {
    "path": "crypto/dkg/promote/Cargo.toml",
    "content": "[package]\nname = \"dkg-promote\"\nversion = \"0.6.1\"\ndescription = \"Promotions for keys from the dkg crate\"\nlicense = \"MIT\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/crypto/dkg/promote\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nkeywords = [\"dkg\", \"multisig\", \"threshold\", \"ff\", \"group\"]\nedition = \"2021\"\nrust-version = \"1.80\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nthiserror = { version = \"2\", default-features = false, features = [\"std\"] }\n\nrand_core = { version = \"0.6\", default-features = false, features = [\"std\"] }\n\ntranscript = { package = \"flexible-transcript\", path = \"../../transcript\", version = \"^0.3.2\", default-features = false, features = [\"std\", \"recommended\"] }\nciphersuite = { path = \"../../ciphersuite\", version = \"^0.4.1\", default-features = false, features = [\"std\"] }\ndleq = { path = \"../../dleq\", version = \"^0.4.1\", default-features = false, features = [\"std\", \"serialize\"] }\n\ndkg = { path = \"../\", version = \"0.6.1\", default-features = false, features = [\"std\"] }\n\n[dev-dependencies]\nzeroize = { version = \"^1.5\", default-features = false, features = [\"std\", \"zeroize_derive\"] }\nrand_core = { version = \"0.6\", default-features = false, features = [\"getrandom\"] }\ndalek-ff-group = { path = \"../../dalek-ff-group\" }\ndkg-recovery = { path = \"../recovery\", default-features = false, features = [\"std\"] }\n"
  },
  {
    "path": "crypto/dkg/promote/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2021-2025 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "crypto/dkg/promote/README.md",
    "content": "# Distributed Key Generation - Promote\n\nThis crate implements 'promotions' for keys from the\n[`dkg`](https://docs.rs/dkg) crate. A promotion takes a set of keys and maps it\nto a different `Ciphersuite`.\n\nThis crate was originally part of the `dkg` crate, which was\n[audited by Cypher Stack in March 2023](\n  https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf\n), culminating in commit\n[669d2dbffc1dafb82a09d9419ea182667115df06](\n  https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06\n). Any subsequent changes have not undergone auditing.\n"
  },
  {
    "path": "crypto/dkg/promote/src/lib.rs",
    "content": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![doc = include_str!(\"../README.md\")]\n// This crate requires `dleq` which doesn't support no-std via std-shims\n// #![cfg_attr(not(feature = \"std\"), no_std)]\n\nuse core::{marker::PhantomData, ops::Deref};\nuse std::{\n  io::{self, Read, Write},\n  collections::HashMap,\n};\n\nuse rand_core::{RngCore, CryptoRng};\n\nuse ciphersuite::{group::GroupEncoding, Ciphersuite};\n\nuse transcript::{Transcript, RecommendedTranscript};\nuse dleq::DLEqProof;\n\npub use dkg::*;\n\n#[cfg(test)]\nmod tests;\n\n/// Errors encountered when promoting keys.\n#[derive(Clone, PartialEq, Eq, Debug, thiserror::Error)]\npub enum PromotionError {\n  /// Invalid participant identifier.\n  #[error(\"invalid participant (1 <= participant <= {n}, yet participant is {participant})\")]\n  InvalidParticipant {\n    /// The total amount of participants.\n    n: u16,\n    /// The specified participant.\n    participant: Participant,\n  },\n\n  /// An incorrect amount of participants was specified.\n  #[error(\"incorrect amount of participants. {t} <= amount <= {n}, yet amount is {amount}\")]\n  IncorrectAmountOfParticipants {\n    /// The threshold required.\n    t: u16,\n    /// The total amount of participants.\n    n: u16,\n    /// The amount of participants specified.\n    amount: usize,\n  },\n\n  /// Participant provided an invalid proof.\n  #[error(\"invalid proof {0}\")]\n  InvalidProof(Participant),\n}\n\nfn transcript<G: GroupEncoding>(key: &G, i: Participant) -> RecommendedTranscript {\n  let mut transcript = RecommendedTranscript::new(b\"DKG Generator Promotion v0.2\");\n  transcript.append_message(b\"group_key\", key.to_bytes());\n  transcript.append_message(b\"participant\", i.to_bytes());\n  transcript\n}\n\n/// Proof of valid promotion to another generator.\n#[derive(Clone, Copy)]\npub struct GeneratorProof<C: Ciphersuite> {\n  share: C::G,\n  proof: DLEqProof<C::G>,\n}\n\nimpl<C: Ciphersuite> GeneratorProof<C> {\n  pub fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {\n    writer.write_all(self.share.to_bytes().as_ref())?;\n    self.proof.write(writer)\n  }\n\n  pub fn read<R: Read>(reader: &mut R) -> io::Result<GeneratorProof<C>> {\n    Ok(GeneratorProof {\n      share: <C as Ciphersuite>::read_G(reader)?,\n      proof: DLEqProof::read(reader)?,\n    })\n  }\n\n  pub fn serialize(&self) -> Vec<u8> {\n    let mut buf = vec![];\n    self.write(&mut buf).unwrap();\n    buf\n  }\n}\n\n/// Promote a set of keys from one generator to another, where the elliptic curve is the same.\n///\n/// Since the Ciphersuite trait additionally specifies a generator, this provides an O(n) way to\n/// update the generator used with keys. This outperforms the key generation protocol which is\n/// exponential.\npub struct GeneratorPromotion<C1: Ciphersuite, C2: Ciphersuite> {\n  base: ThresholdKeys<C1>,\n  proof: GeneratorProof<C1>,\n  _c2: PhantomData<C2>,\n}\n\nimpl<C1: Ciphersuite, C2: Ciphersuite<F = C1::F, G = C1::G>> GeneratorPromotion<C1, C2> {\n  /// Begin promoting keys from one generator to another.\n  ///\n  /// Returns a proof this share was properly promoted.\n  pub fn promote<R: RngCore + CryptoRng>(\n    rng: &mut R,\n    base: ThresholdKeys<C1>,\n  ) -> (GeneratorPromotion<C1, C2>, GeneratorProof<C1>) {\n    // Do a DLEqProof for the new generator\n    let proof = GeneratorProof {\n      share: C2::generator() * base.original_secret_share().deref(),\n      proof: DLEqProof::prove(\n        rng,\n        &mut transcript(&base.original_group_key(), base.params().i()),\n        &[C1::generator(), C2::generator()],\n        base.original_secret_share(),\n      ),\n    };\n\n    (GeneratorPromotion { base, proof, _c2: PhantomData::<C2> }, proof)\n  }\n\n  /// Complete promotion by taking in the proofs from all other participants.\n  pub fn complete(\n    self,\n    proofs: &HashMap<Participant, GeneratorProof<C1>>,\n  ) -> Result<ThresholdKeys<C2>, PromotionError> {\n    let params = self.base.params();\n    if proofs.len() != (usize::from(params.n()) - 1) {\n      Err(PromotionError::IncorrectAmountOfParticipants {\n        t: params.n(),\n        n: params.n(),\n        amount: proofs.len() + 1,\n      })?;\n    }\n    for i in proofs.keys().copied() {\n      if u16::from(i) > params.n() {\n        Err(PromotionError::InvalidParticipant { n: params.n(), participant: i })?;\n      }\n    }\n\n    let mut verification_shares = HashMap::new();\n    verification_shares.insert(params.i(), self.proof.share);\n    for i in 1 ..= params.n() {\n      let i = Participant::new(i).unwrap();\n      if i == params.i() {\n        continue;\n      }\n\n      let proof = proofs.get(&i).unwrap();\n      proof\n        .proof\n        .verify(\n          &mut transcript(&self.base.original_group_key(), i),\n          &[C1::generator(), C2::generator()],\n          &[self.base.original_verification_share(i), proof.share],\n        )\n        .map_err(|_| PromotionError::InvalidProof(i))?;\n      verification_shares.insert(i, proof.share);\n    }\n\n    Ok(\n      ThresholdKeys::new(\n        params,\n        self.base.interpolation().clone(),\n        self.base.original_secret_share().clone(),\n        verification_shares,\n      )\n      .unwrap(),\n    )\n  }\n}\n"
  },
  {
    "path": "crypto/dkg/promote/src/tests.rs",
    "content": "use core::marker::PhantomData;\nuse std::collections::HashMap;\n\nuse zeroize::{Zeroize, Zeroizing};\nuse rand_core::OsRng;\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::{\n  group::{ff::Field, Group},\n  Ciphersuite,\n};\n\nuse dkg::*;\nuse dkg_recovery::recover_key;\nuse crate::{GeneratorPromotion, GeneratorProof};\n\n#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]\nstruct AltGenerator<C: Ciphersuite> {\n  _curve: PhantomData<C>,\n}\n\nimpl<C: Ciphersuite> Ciphersuite for AltGenerator<C> {\n  type F = C::F;\n  type G = C::G;\n  type H = C::H;\n\n  const ID: &'static [u8] = b\"Alternate Ciphersuite\";\n\n  fn generator() -> Self::G {\n    C::G::generator() * <C as Ciphersuite>::hash_to_F(b\"DKG Promotion Test\", b\"generator\")\n  }\n\n  fn hash_to_F(dst: &[u8], data: &[u8]) -> Self::F {\n    <C as Ciphersuite>::hash_to_F(dst, data)\n  }\n}\n\n/// Clone a map without a specific value.\npub fn clone_without<K: Clone + core::cmp::Eq + core::hash::Hash, V: Clone>(\n  map: &HashMap<K, V>,\n  without: &K,\n) -> HashMap<K, V> {\n  let mut res = map.clone();\n  res.remove(without).unwrap();\n  res\n}\n\n// Test promotion of threshold keys to another generator\n#[test]\nfn test_generator_promotion() {\n  // Generate a set of `ThresholdKeys`\n  const PARTICIPANTS: u16 = 5;\n  let keys: [ThresholdKeys<_>; PARTICIPANTS as usize] = {\n    let shares: [<Ristretto as Ciphersuite>::F; PARTICIPANTS as usize] =\n      core::array::from_fn(|_| <Ristretto as Ciphersuite>::F::random(&mut OsRng));\n    let verification_shares = (0 .. PARTICIPANTS)\n      .map(|i| {\n        (\n          Participant::new(i + 1).unwrap(),\n          <Ristretto as Ciphersuite>::generator() * shares[usize::from(i)],\n        )\n      })\n      .collect::<HashMap<_, _>>();\n    core::array::from_fn(|i| {\n      ThresholdKeys::new(\n        ThresholdParams::new(\n          PARTICIPANTS,\n          PARTICIPANTS,\n          Participant::new(u16::try_from(i + 1).unwrap()).unwrap(),\n        )\n        .unwrap(),\n        Interpolation::Constant(vec![<Ristretto as Ciphersuite>::F::ONE; PARTICIPANTS as usize]),\n        Zeroizing::new(shares[i]),\n        verification_shares.clone(),\n      )\n      .unwrap()\n    })\n  };\n\n  // Perform the promotion\n  let mut promotions = HashMap::new();\n  let mut proofs = HashMap::new();\n  for keys in &keys {\n    let i = keys.params().i();\n    let (promotion, proof) =\n      GeneratorPromotion::<_, AltGenerator<Ristretto>>::promote(&mut OsRng, keys.clone());\n    promotions.insert(i, promotion);\n    proofs.insert(\n      i,\n      GeneratorProof::<Ristretto>::read::<&[u8]>(&mut proof.serialize().as_ref()).unwrap(),\n    );\n  }\n\n  // Complete the promotion, and verify it worked\n  let new_group_key = AltGenerator::<Ristretto>::generator() * *recover_key(&keys).unwrap();\n  for (i, promoting) in promotions.drain() {\n    let promoted = promoting.complete(&clone_without(&proofs, &i)).unwrap();\n    assert_eq!(keys[usize::from(u16::from(i) - 1)].params(), promoted.params());\n    assert_eq!(\n      keys[usize::from(u16::from(i) - 1)].original_secret_share(),\n      promoted.original_secret_share()\n    );\n    assert_eq!(new_group_key, promoted.group_key());\n    for l in 0 .. PARTICIPANTS {\n      let verification_share =\n        promoted.original_verification_share(Participant::new(l + 1).unwrap());\n      assert_eq!(\n        AltGenerator::<Ristretto>::generator() * **keys[usize::from(l)].original_secret_share(),\n        verification_share\n      );\n    }\n  }\n}\n"
  },
  {
    "path": "crypto/dkg/recovery/Cargo.toml",
    "content": "[package]\nname = \"dkg-recovery\"\nversion = \"0.6.0\"\ndescription = \"Recover a secret-shared key from a collection of dkg::ThresholdKeys\"\nlicense = \"MIT\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/crypto/dkg/recovery\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nkeywords = [\"dkg\", \"multisig\", \"threshold\", \"ff\", \"group\"]\nedition = \"2021\"\nrust-version = \"1.66\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nzeroize = { version = \"^1.5\", default-features = false }\n\nthiserror = { version = \"2\", default-features = false }\n\nciphersuite = { path = \"../../ciphersuite\", version = \"^0.4.1\", default-features = false }\ndkg = { path = \"../\", version = \"0.6\", default-features = false }\n\n[features]\nstd = [\n  \"zeroize/std\",\n  \"thiserror/std\",\n  \"ciphersuite/std\",\n  \"dkg/std\",\n]\ndefault = [\"std\"]\n"
  },
  {
    "path": "crypto/dkg/recovery/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2021-2025 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "crypto/dkg/recovery/README.md",
    "content": "# Distributed Key Generation - Recovery\n\nA utility function to recover a key from its secret shares.\n\nKeys likely SHOULD NOT ever be recovered, making this primarily intended for\ntesting purposes. Instead, the shares of the key should be used to produce\nshares for the desired action, allowing using the key while never\nreconstructing it.\n\nBefore being smashed, this crate was [audited by Cypher Stack in March 2023](\n  https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf\n), culminating in commit [669d2dbffc1dafb82a09d9419ea182667115df06](\n  https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06\n). Any subsequent changes have not undergone auditing.\n"
  },
  {
    "path": "crypto/dkg/recovery/src/lib.rs",
    "content": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![doc = include_str!(\"../README.md\")]\n#![no_std]\n\nuse core::ops::{Deref, DerefMut};\nextern crate alloc;\nuse alloc::vec::Vec;\n\nuse zeroize::Zeroizing;\n\nuse ciphersuite::Ciphersuite;\n\npub use dkg::*;\n\n/// Errors encountered when recovering a secret-shared key from a collection of\n/// `dkg::ThresholdKeys`.\n#[derive(Clone, PartialEq, Eq, Debug, thiserror::Error)]\npub enum RecoveryError {\n  /// No keys were provided.\n  #[error(\"no keys provided\")]\n  NoKeysProvided,\n  /// Not enough keys were provided.\n  #[error(\"not enough keys provided (threshold required {required}, provided {provided})\")]\n  NotEnoughKeysProvided { required: u16, provided: usize },\n  /// The keys had inconsistent parameters.\n  #[error(\"keys had inconsistent parameters\")]\n  InconsistentParameters,\n  /// The keys are from distinct secret-sharing sessions or otherwise corrupt.\n  #[error(\"recovery failed\")]\n  Failure,\n  /// An error propagated from the underlying `dkg` crate.\n  #[error(\"error from dkg ({0})\")]\n  DkgError(DkgError),\n}\n\n/// Recover a shared secret from a collection of `dkg::ThresholdKeys`.\npub fn recover_key<C: Ciphersuite>(\n  keys: &[ThresholdKeys<C>],\n) -> Result<Zeroizing<C::F>, RecoveryError> {\n  let included = keys.iter().map(|keys| keys.params().i()).collect::<Vec<_>>();\n\n  let keys_len = keys.len();\n  let mut keys = keys.iter();\n  let first_keys = keys.next().ok_or(RecoveryError::NoKeysProvided)?;\n  {\n    let t = first_keys.params().t();\n    if keys_len < usize::from(t) {\n      Err(RecoveryError::NotEnoughKeysProvided { required: t, provided: keys_len })?;\n    }\n  }\n  {\n    let first_params = (\n      first_keys.params().t(),\n      first_keys.params().n(),\n      first_keys.group_key(),\n      first_keys.current_scalar(),\n      first_keys.current_offset(),\n    );\n    for keys in keys.clone() {\n      let params = (\n        keys.params().t(),\n        keys.params().n(),\n        keys.group_key(),\n        keys.current_scalar(),\n        keys.current_offset(),\n      );\n      if params != first_params {\n        Err(RecoveryError::InconsistentParameters)?;\n      }\n    }\n  }\n\n  let mut res: Zeroizing<_> =\n    first_keys.view(included.clone()).map_err(RecoveryError::DkgError)?.secret_share().clone();\n  for keys in keys {\n    *res.deref_mut() +=\n      keys.view(included.clone()).map_err(RecoveryError::DkgError)?.secret_share().deref();\n  }\n\n  if (C::generator() * res.deref()) != first_keys.group_key() {\n    Err(RecoveryError::Failure)?;\n  }\n\n  Ok(res)\n}\n"
  },
  {
    "path": "crypto/dkg/src/lib.rs",
    "content": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![doc = include_str!(\"../README.md\")]\n#![cfg_attr(not(feature = \"std\"), no_std)]\n\nuse core::{\n  ops::Deref,\n  fmt::{self, Debug},\n};\n#[allow(unused_imports)]\nuse std_shims::prelude::*;\nuse std_shims::{sync::Arc, vec, vec::Vec, collections::HashMap, io};\n\nuse zeroize::{Zeroize, Zeroizing};\n\nuse ciphersuite::{\n  group::{\n    ff::{Field, PrimeField},\n    GroupEncoding,\n  },\n  Ciphersuite,\n};\n\n/// The ID of a participant, defined as a non-zero u16.\n#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Zeroize)]\n#[cfg_attr(feature = \"borsh\", derive(borsh::BorshSerialize))]\npub struct Participant(u16);\nimpl Participant {\n  /// Create a new Participant identifier from a u16.\n  pub const fn new(i: u16) -> Option<Participant> {\n    if i == 0 {\n      None\n    } else {\n      Some(Participant(i))\n    }\n  }\n\n  /// Convert a Participant identifier to bytes.\n  #[allow(clippy::wrong_self_convention)]\n  pub const fn to_bytes(&self) -> [u8; 2] {\n    self.0.to_le_bytes()\n  }\n}\n\nimpl From<Participant> for u16 {\n  fn from(participant: Participant) -> u16 {\n    participant.0\n  }\n}\n\nimpl fmt::Display for Participant {\n  fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n    write!(f, \"{}\", self.0)\n  }\n}\n\n/// Errors encountered when working with threshold keys.\n#[derive(Clone, PartialEq, Eq, Debug, thiserror::Error)]\npub enum DkgError {\n  /// A parameter was zero.\n  #[error(\"a parameter was 0 (threshold {t}, participants {n})\")]\n  ZeroParameter {\n    /// The specified threshold.\n    t: u16,\n    /// The specified total amount of participants.\n    n: u16,\n  },\n\n  /// The threshold exceeded the amount of participants.\n  #[error(\"invalid threshold (max {n}, got {t})\")]\n  InvalidThreshold {\n    /// The specified threshold.\n    t: u16,\n    /// The specified total amount of participants.\n    n: u16,\n  },\n\n  /// Invalid participant identifier.\n  #[error(\"invalid participant (1 <= participant <= {n}, yet participant is {participant})\")]\n  InvalidParticipant {\n    /// The total amount of participants.\n    n: u16,\n    /// The specified participant.\n    participant: Participant,\n  },\n\n  /// An incorrect amount of participants was specified.\n  #[error(\"incorrect amount of verification shares (n = {n} yet {shares} provided)\")]\n  IncorrectAmountOfVerificationShares {\n    /// The amount of participants.\n    n: u16,\n    /// The amount of shares provided.\n    shares: usize,\n  },\n\n  /// An inapplicable method of interpolation was specified.\n  #[error(\"inapplicable method of interpolation ({0})\")]\n  InapplicableInterpolation(&'static str),\n\n  /// An incorrect amount of participants was specified.\n  #[error(\"incorrect amount of participants. {t} <= amount <= {n}, yet amount is {amount}\")]\n  IncorrectAmountOfParticipants {\n    /// The threshold required.\n    t: u16,\n    /// The total amount of participants.\n    n: u16,\n    /// The amount of participants specified.\n    amount: usize,\n  },\n\n  /// A participant was duplicated.\n  #[error(\"a participant ({0}) was duplicated\")]\n  DuplicatedParticipant(Participant),\n\n  /// Not participating in declared signing set.\n  #[error(\"not participating in declared signing set\")]\n  NotParticipating,\n}\n\n// Manually implements BorshDeserialize so we can enforce it's a valid index\n#[cfg(feature = \"borsh\")]\nimpl borsh::BorshDeserialize for Participant {\n  fn deserialize_reader<R: io::Read>(reader: &mut R) -> io::Result<Self> {\n    Participant::new(u16::deserialize_reader(reader)?)\n      .ok_or_else(|| io::Error::other(\"invalid participant\"))\n  }\n}\n\n/// Parameters for a multisig.\n#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]\n#[cfg_attr(feature = \"borsh\", derive(borsh::BorshSerialize))]\npub struct ThresholdParams {\n  /// Participants needed to sign on behalf of the group.\n  t: u16,\n  /// Amount of participants.\n  n: u16,\n  /// Index of the participant being acted for.\n  i: Participant,\n}\n\n/// An iterator over all participant indexes.\nstruct AllParticipantIndexes {\n  i: u16,\n  n: u16,\n}\nimpl Iterator for AllParticipantIndexes {\n  type Item = Participant;\n  fn next(&mut self) -> Option<Participant> {\n    if self.i > self.n {\n      None?;\n    }\n    let res = Participant::new(self.i).unwrap();\n\n    // If i == n == u16::MAX, we cause `i > n` by setting `n` to `0` so the iterator becomes empty\n    if self.i == u16::MAX {\n      self.n = 0;\n    } else {\n      self.i += 1;\n    }\n\n    Some(res)\n  }\n}\n\nimpl ThresholdParams {\n  /// Create a new set of parameters.\n  pub const fn new(t: u16, n: u16, i: Participant) -> Result<ThresholdParams, DkgError> {\n    if (t == 0) || (n == 0) {\n      return Err(DkgError::ZeroParameter { t, n });\n    }\n\n    if t > n {\n      return Err(DkgError::InvalidThreshold { t, n });\n    }\n    if i.0 > n {\n      return Err(DkgError::InvalidParticipant { n, participant: i });\n    }\n\n    Ok(ThresholdParams { t, n, i })\n  }\n\n  /// The threshold for a multisig with these parameters.\n  pub const fn t(&self) -> u16 {\n    self.t\n  }\n  /// The amount of participants for a multisig with these parameters.\n  pub const fn n(&self) -> u16 {\n    self.n\n  }\n  /// The participant index of the share with these parameters.\n  pub const fn i(&self) -> Participant {\n    self.i\n  }\n\n  /// An iterator over all participant indexes.\n  pub fn all_participant_indexes(&self) -> impl Iterator<Item = Participant> {\n    AllParticipantIndexes { i: 1, n: self.n }\n  }\n}\n\n#[cfg(feature = \"borsh\")]\nimpl borsh::BorshDeserialize for ThresholdParams {\n  fn deserialize_reader<R: io::Read>(reader: &mut R) -> io::Result<Self> {\n    let t = u16::deserialize_reader(reader)?;\n    let n = u16::deserialize_reader(reader)?;\n    let i = Participant::deserialize_reader(reader)?;\n    ThresholdParams::new(t, n, i).map_err(|e| io::Error::other(format!(\"{e:?}\")))\n  }\n}\n\n/// A method of interpolation.\n#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]\npub enum Interpolation<F: Zeroize + PrimeField> {\n  /// A list of constant coefficients, one for each of the secret key shares.\n  /*\n    There's no benefit to using a full linear combination here, as the additive term would have\n    an entirely known evaluation with a fixed, public coefficient of `1`. Accordingly, the entire\n    key can simply be offset with the additive term to achieve the same effect.\n  */\n  Constant(Vec<F>),\n  /// Lagrange interpolation.\n  Lagrange,\n}\n\nimpl<F: Zeroize + PrimeField> Interpolation<F> {\n  /// The interpolation factor for this participant, within this signing set.\n  fn interpolation_factor(&self, i: Participant, included: &[Participant]) -> F {\n    match self {\n      Interpolation::Constant(c) => c[usize::from(u16::from(i) - 1)],\n      Interpolation::Lagrange => {\n        let i_f = F::from(u64::from(u16::from(i)));\n\n        let mut num = F::ONE;\n        let mut denom = F::ONE;\n        for l in included {\n          if i == *l {\n            continue;\n          }\n\n          let share = F::from(u64::from(u16::from(*l)));\n          num *= share;\n          denom *= share - i_f;\n        }\n\n        // Safe as this will only be 0 if we're part of the above loop\n        // (which we have an if case to avoid)\n        num * denom.invert().unwrap()\n      }\n    }\n  }\n}\n\n/// A key share for a thresholdized secret key.\n///\n/// This is the 'core' structure containing all relevant data, expected to be wrapped into an\n/// heap-allocated pointer to minimize copies on the stack (`ThresholdKeys`, the publicly exposed\n/// type).\n#[derive(Clone, PartialEq, Eq)]\nstruct ThresholdCore<C: Ciphersuite> {\n  params: ThresholdParams,\n  group_key: C::G,\n  verification_shares: HashMap<Participant, C::G>,\n  interpolation: Interpolation<C::F>,\n  secret_share: Zeroizing<C::F>,\n}\n\nimpl<C: Ciphersuite> fmt::Debug for ThresholdCore<C> {\n  fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {\n    fmt\n      .debug_struct(\"ThresholdCore\")\n      .field(\"params\", &self.params)\n      .field(\"group_key\", &self.group_key)\n      .field(\"verification_shares\", &self.verification_shares)\n      .field(\"interpolation\", &self.interpolation)\n      .finish_non_exhaustive()\n  }\n}\n\nimpl<C: Ciphersuite> Zeroize for ThresholdCore<C> {\n  fn zeroize(&mut self) {\n    self.params.zeroize();\n    self.group_key.zeroize();\n    for share in self.verification_shares.values_mut() {\n      share.zeroize();\n    }\n    self.interpolation.zeroize();\n    self.secret_share.zeroize();\n  }\n}\n\n/// Threshold keys usable for signing.\n#[derive(Clone, Debug, Zeroize)]\npub struct ThresholdKeys<C: Ciphersuite> {\n  // Core keys.\n  #[zeroize(skip)]\n  core: Arc<Zeroizing<ThresholdCore<C>>>,\n\n  // Scalar applied to these keys.\n  scalar: C::F,\n  // Offset applied to these keys.\n  offset: C::F,\n}\n\n/// View of keys, interpolated and with the expected linear combination taken for usage.\n#[derive(Clone)]\npub struct ThresholdView<C: Ciphersuite> {\n  interpolation: Interpolation<C::F>,\n  scalar: C::F,\n  offset: C::F,\n  group_key: C::G,\n  included: Vec<Participant>,\n  secret_share: Zeroizing<C::F>,\n  original_verification_shares: HashMap<Participant, C::G>,\n  verification_shares: HashMap<Participant, C::G>,\n}\n\nimpl<C: Ciphersuite> fmt::Debug for ThresholdView<C> {\n  fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {\n    fmt\n      .debug_struct(\"ThresholdView\")\n      .field(\"interpolation\", &self.interpolation)\n      .field(\"scalar\", &self.scalar)\n      .field(\"offset\", &self.offset)\n      .field(\"group_key\", &self.group_key)\n      .field(\"included\", &self.included)\n      .field(\"original_verification_shares\", &self.original_verification_shares)\n      .field(\"verification_shares\", &self.verification_shares)\n      .finish_non_exhaustive()\n  }\n}\n\nimpl<C: Ciphersuite> Zeroize for ThresholdView<C> {\n  fn zeroize(&mut self) {\n    self.scalar.zeroize();\n    self.offset.zeroize();\n    self.group_key.zeroize();\n    self.included.zeroize();\n    self.secret_share.zeroize();\n    for share in self.original_verification_shares.values_mut() {\n      share.zeroize();\n    }\n    for share in self.verification_shares.values_mut() {\n      share.zeroize();\n    }\n  }\n}\n\nimpl<C: Ciphersuite> ThresholdKeys<C> {\n  /// Create a new set of ThresholdKeys.\n  pub fn new(\n    params: ThresholdParams,\n    interpolation: Interpolation<C::F>,\n    secret_share: Zeroizing<C::F>,\n    verification_shares: HashMap<Participant, C::G>,\n  ) -> Result<ThresholdKeys<C>, DkgError> {\n    if verification_shares.len() != usize::from(params.n()) {\n      Err(DkgError::IncorrectAmountOfVerificationShares {\n        n: params.n(),\n        shares: verification_shares.len(),\n      })?;\n    }\n    for participant in verification_shares.keys().copied() {\n      if u16::from(participant) > params.n() {\n        Err(DkgError::InvalidParticipant { n: params.n(), participant })?;\n      }\n    }\n\n    match &interpolation {\n      Interpolation::Constant(_) => {\n        if params.t() != params.n() {\n          Err(DkgError::InapplicableInterpolation(\"constant interpolation for keys where t != n\"))?;\n        }\n      }\n      Interpolation::Lagrange => {}\n    }\n\n    let t = (1 ..= params.t()).map(Participant).collect::<Vec<_>>();\n    let group_key =\n      t.iter().map(|i| verification_shares[i] * interpolation.interpolation_factor(*i, &t)).sum();\n\n    Ok(ThresholdKeys {\n      core: Arc::new(Zeroizing::new(ThresholdCore {\n        params,\n        interpolation,\n        secret_share,\n        group_key,\n        verification_shares,\n      })),\n      scalar: C::F::ONE,\n      offset: C::F::ZERO,\n    })\n  }\n\n  /// Scale the keys by a given scalar to allow for various account and privacy schemes.\n  ///\n  /// This scalar is ephemeral and will not be included when these keys are serialized. The\n  /// scalar is applied on top of any already-existing scalar/offset.\n  ///\n  /// Returns `None` if the scalar is equal to `0`.\n  #[must_use]\n  pub fn scale(mut self, scalar: C::F) -> Option<ThresholdKeys<C>> {\n    if bool::from(scalar.is_zero()) {\n      None?;\n    }\n    self.scalar *= scalar;\n    self.offset *= scalar;\n    Some(self)\n  }\n\n  /// Offset the keys by a given scalar to allow for various account and privacy schemes.\n  ///\n  /// This offset is ephemeral and will not be included when these keys are serialized. The\n  /// offset is applied on top of any already-existing scalar/offset.\n  #[must_use]\n  pub fn offset(mut self, offset: C::F) -> ThresholdKeys<C> {\n    self.offset += offset;\n    self\n  }\n\n  /// Return the current scalar in-use for these keys.\n  pub fn current_scalar(&self) -> C::F {\n    self.scalar\n  }\n\n  /// Return the current offset in-use for these keys.\n  pub fn current_offset(&self) -> C::F {\n    self.offset\n  }\n\n  /// Return the parameters for these keys.\n  pub fn params(&self) -> ThresholdParams {\n    self.core.params\n  }\n\n  /// Return the original group key, without any tweaks applied.\n  pub fn original_group_key(&self) -> C::G {\n    self.core.group_key\n  }\n\n  /// Return the interpolation method for these keys.\n  pub fn interpolation(&self) -> &Interpolation<C::F> {\n    &self.core.interpolation\n  }\n\n  /// Return the group key, with the expected linear combination taken.\n  pub fn group_key(&self) -> C::G {\n    (self.core.group_key * self.scalar) + (C::generator() * self.offset)\n  }\n\n  /// Return the underlying secret share for these keys, without any tweaks applied.\n  pub fn original_secret_share(&self) -> &Zeroizing<C::F> {\n    &self.core.secret_share\n  }\n\n  /// Return the original (untweaked) verification share for the specified participant.\n  ///\n  /// This will panic if the participant index is invalid for these keys.\n  pub fn original_verification_share(&self, l: Participant) -> C::G {\n    self.core.verification_shares[&l]\n  }\n\n  /// Obtain a view of these keys, interpolated for the specified signing set, with the specified\n  /// linear combination taken.\n  pub fn view(&self, mut included: Vec<Participant>) -> Result<ThresholdView<C>, DkgError> {\n    if (included.len() < self.params().t.into()) ||\n      (usize::from(self.params().n()) < included.len())\n    {\n      Err(DkgError::IncorrectAmountOfParticipants {\n        t: self.params().t,\n        n: self.params().n,\n        amount: included.len(),\n      })?;\n    }\n    included.sort();\n    {\n      let mut found = included[0] == self.params().i();\n      for i in 1 .. included.len() {\n        if included[i - 1] == included[i] {\n          Err(DkgError::DuplicatedParticipant(included[i]))?;\n        }\n        found |= included[i] == self.params().i();\n      }\n      if !found {\n        Err(DkgError::NotParticipating)?;\n      }\n    }\n    {\n      let last = *included.last().unwrap();\n      if u16::from(last) > self.params().n() {\n        Err(DkgError::InvalidParticipant { n: self.params().n(), participant: last })?;\n      }\n    }\n\n    // The interpolation occurs multiplicatively, letting us scale by the scalar now\n    let secret_share_scaled = Zeroizing::new(self.scalar * self.original_secret_share().deref());\n    let mut secret_share = Zeroizing::new(\n      self.core.interpolation.interpolation_factor(self.params().i(), &included) *\n        secret_share_scaled.deref(),\n    );\n\n    let mut verification_shares = HashMap::with_capacity(included.len());\n    for i in &included {\n      let verification_share = self.core.verification_shares[i];\n      let verification_share = verification_share *\n        self.scalar *\n        self.core.interpolation.interpolation_factor(*i, &included);\n      verification_shares.insert(*i, verification_share);\n    }\n\n    /*\n      The offset is included by adding it to the participant with the lowest ID.\n\n      This is done after interpolating to ensure, regardless of the method of interpolation, that\n      the method of interpolation does not scale the offset. For Lagrange interpolation, we could\n      add the offset to every key share before interpolating, yet for Constant interpolation, we\n      _have_ to add it as we do here (which also works even when we intend to perform Lagrange\n      interpolation).\n    */\n    if included[0] == self.params().i() {\n      *secret_share += self.offset;\n    }\n    *verification_shares.get_mut(&included[0]).unwrap() += C::generator() * self.offset;\n\n    Ok(ThresholdView {\n      interpolation: self.core.interpolation.clone(),\n      scalar: self.scalar,\n      offset: self.offset,\n      group_key: self.group_key(),\n      secret_share,\n      original_verification_shares: self.core.verification_shares.clone(),\n      verification_shares,\n      included,\n    })\n  }\n\n  /// Write these keys to a type satisfying `std::io::Write`.\n  ///\n  /// This will not include the ephemeral scalar/offset.\n  pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {\n    writer.write_all(&u32::try_from(C::ID.len()).unwrap().to_le_bytes())?;\n    writer.write_all(C::ID)?;\n    writer.write_all(&self.core.params.t.to_le_bytes())?;\n    writer.write_all(&self.core.params.n.to_le_bytes())?;\n    writer.write_all(&self.core.params.i.to_bytes())?;\n    match &self.core.interpolation {\n      Interpolation::Constant(c) => {\n        writer.write_all(&[0])?;\n        for c in c {\n          writer.write_all(c.to_repr().as_ref())?;\n        }\n      }\n      Interpolation::Lagrange => writer.write_all(&[1])?,\n    };\n    let mut share_bytes = self.core.secret_share.to_repr();\n    writer.write_all(share_bytes.as_ref())?;\n    share_bytes.as_mut().zeroize();\n    for l in 1 ..= self.core.params.n {\n      writer.write_all(\n        self.core.verification_shares[&Participant::new(l).unwrap()].to_bytes().as_ref(),\n      )?;\n    }\n    Ok(())\n  }\n\n  /// Serialize these keys to a `Vec<u8>`.\n  ///\n  /// This will not include the ephemeral scalar/offset.\n  pub fn serialize(&self) -> Zeroizing<Vec<u8>> {\n    let mut serialized = Zeroizing::new(vec![]);\n    self.write::<Vec<u8>>(serialized.as_mut()).unwrap();\n    serialized\n  }\n\n  /// Read keys from a type satisfying `std::io::Read`.\n  pub fn read<R: io::Read>(reader: &mut R) -> io::Result<ThresholdKeys<C>> {\n    {\n      let different = || io::Error::other(\"deserializing ThresholdKeys for another curve\");\n\n      let mut id_len = [0; 4];\n      reader.read_exact(&mut id_len)?;\n      if u32::try_from(C::ID.len()).unwrap().to_le_bytes() != id_len {\n        Err(different())?;\n      }\n\n      let mut id = vec![0; C::ID.len()];\n      reader.read_exact(&mut id)?;\n      if id != C::ID {\n        Err(different())?;\n      }\n    }\n\n    let (t, n, i) = {\n      let mut read_u16 = || -> io::Result<u16> {\n        let mut value = [0; 2];\n        reader.read_exact(&mut value)?;\n        Ok(u16::from_le_bytes(value))\n      };\n      (\n        read_u16()?,\n        read_u16()?,\n        Participant::new(read_u16()?).ok_or(io::Error::other(\"invalid participant index\"))?,\n      )\n    };\n\n    let mut interpolation = [0];\n    reader.read_exact(&mut interpolation)?;\n    let interpolation = match interpolation[0] {\n      0 => Interpolation::Constant({\n        let mut res = Vec::with_capacity(usize::from(n));\n        for _ in 0 .. n {\n          res.push(C::read_F(reader)?);\n        }\n        res\n      }),\n      1 => Interpolation::Lagrange,\n      _ => Err(io::Error::other(\"invalid interpolation method\"))?,\n    };\n\n    let secret_share = Zeroizing::new(C::read_F(reader)?);\n\n    let mut verification_shares = HashMap::new();\n    for l in (1 ..= n).map(Participant) {\n      verification_shares.insert(l, <C as Ciphersuite>::read_G(reader)?);\n    }\n\n    ThresholdKeys::new(\n      ThresholdParams::new(t, n, i).map_err(io::Error::other)?,\n      interpolation,\n      secret_share,\n      verification_shares,\n    )\n    .map_err(io::Error::other)\n  }\n}\n\nimpl<C: Ciphersuite> ThresholdView<C> {\n  /// Return the scalar applied to this view.\n  pub fn scalar(&self) -> C::F {\n    self.scalar\n  }\n\n  /// Return the offset applied to this view.\n  pub fn offset(&self) -> C::F {\n    self.offset\n  }\n\n  /// Return the group key.\n  pub fn group_key(&self) -> C::G {\n    self.group_key\n  }\n\n  /// Return the included signers.\n  pub fn included(&self) -> &[Participant] {\n    &self.included\n  }\n\n  /// Return the interpolation factor for a signer.\n  pub fn interpolation_factor(&self, participant: Participant) -> Option<C::F> {\n    if !self.included.contains(&participant) {\n      None?\n    }\n    Some(self.interpolation.interpolation_factor(participant, &self.included))\n  }\n\n  /// Return the interpolated secret share, with the expected linear combination taken.\n  pub fn secret_share(&self) -> &Zeroizing<C::F> {\n    &self.secret_share\n  }\n\n  /// Return the original (untweaked) verification share for the specified participant.\n  ///\n  /// This will panic if the participant index is invalid for these keys.\n  pub fn original_verification_share(&self, l: Participant) -> C::G {\n    self.original_verification_shares[&l]\n  }\n\n  /// Return the interpolated verification share, with the expected linear combination taken,\n  /// for the specified participant.\n  ///\n  /// This will panic if the participant was not included in the signing set.\n  pub fn verification_share(&self, l: Participant) -> C::G {\n    self.verification_shares[&l]\n  }\n}\n"
  },
  {
    "path": "crypto/dleq/Cargo.toml",
    "content": "[package]\nname = \"dleq\"\nversion = \"0.4.1\"\ndescription = \"Implementation of single and cross-curve Discrete Log Equality proofs\"\nlicense = \"MIT\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/crypto/dleq\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nedition = \"2021\"\nrust-version = \"1.79\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nrustversion = \"1\"\n\nthiserror = { version = \"2\", default-features = false, optional = true }\nrand_core = { version = \"0.6\", default-features = false }\n\nzeroize = { version = \"^1.5\", default-features = false, features = [\"zeroize_derive\"] }\n\ndigest = { version = \"0.10\", default-features = false }\ntranscript = { package = \"flexible-transcript\", path = \"../transcript\", version = \"^0.3.2\", default-features = false }\n\nff = { version = \"0.13\", default-features = false }\ngroup = { version = \"0.13\", default-features = false }\n\nmultiexp = { path = \"../multiexp\", version = \"0.4\", default-features = false, features = [\"batch\"], optional = true }\n\n[dev-dependencies]\nhex-literal = \"0.4\"\n\nrand_core = { version = \"0.6\", features = [\"getrandom\"] }\n\nblake2 = \"0.10\"\n\nk256 = { version = \"^0.13.1\", default-features = false, features = [\"std\", \"arithmetic\", \"bits\"] }\ndalek-ff-group = { path = \"../dalek-ff-group\" }\n\ntranscript = { package = \"flexible-transcript\", path = \"../transcript\", features = [\"recommended\"] }\n\n[features]\nstd = [\"thiserror?/std\", \"rand_core/std\", \"zeroize/std\", \"digest/std\", \"transcript/std\", \"ff/std\", \"multiexp?/std\"]\nserialize = [\"std\"]\n\n# Needed for cross-group DLEqs\nsecure_capacity_difference = []\nexperimental = [\"std\", \"thiserror\", \"multiexp\"]\n\ndefault = [\n  \"std\",\n  # Only applies to experimental, yet is default to ensure security\n  # experimental doesn't mandate it itself in case two curves with extreme\n  # capacity differences are desired to be used together, in which case the user\n  # must specify experimental without default features\n  \"secure_capacity_difference\"\n]\n"
  },
  {
    "path": "crypto/dleq/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2020-2023 Luke Parker, Lee Bousfield\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "crypto/dleq/README.md",
    "content": "# Discrete Log Equality\n\nImplementation of discrete log equality proofs for curves implementing\n`ff`/`group`.\n\nThere is also a highly experimental cross-group DLEq proof, under\nthe `experimental` feature, which has no formal proofs available yet is\navailable here regardless.\n\nThis library, except for the `experimental` feature, was\n[audited by Cypher Stack in March 2023](https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf),\nculminating in commit\n[669d2dbffc1dafb82a09d9419ea182667115df06](https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06).\nAny subsequent changes have not undergone auditing.\n\n### Cross-Group DLEq\n\nThe present cross-group DLEq is based off\n[MRL-0010](https://web.getmonero.org/resources/research-lab/pubs/MRL-0010.pdf),\nwhich isn't computationally correct as while it proves both keys have the same\ndiscrete logarithm for their `G'`/`H'` component, it doesn't prove a lack of a\n`G`/`H` component. Accordingly, it was augmented with a pair of Schnorr Proof of\nKnowledges, proving a known `G'`/`H'` component, guaranteeing a lack of a\n`G`/`H` component (assuming an unknown relation between `G`/`H` and `G'`/`H'`).\n\nThe challenges for the ring signatures were also merged, removing one-element\nfrom each bit's proof with only a slight reduction to challenge security (as\ninstead of being uniform over each scalar field, they're uniform over the\nmutual bit capacity of each scalar field). This reduction is identical to the\none applied to the proved-for scalar, and accordingly should not reduce overall\nsecurity. It does create a lack of domain separation, yet that shouldn't be an\nissue.\n\nThe following variants are available:\n\n- `ClassicLinear`. This is only for reference purposes, being the above\n  described proof, with no further optimizations.\n\n- `ConciseLinear`. This proves for 2 bits at a time, not increasing the\n  signature size for both bits yet decreasing the amount of\n  commitments/challenges in total.\n\n- `EfficientLinear`. This provides ring signatures in the form\n  `((R_G, R_H), s)`, instead of `(e, s)`, and accordingly enables a batch\n  verification of their final step. It is the most performant, and also the\n  largest, option.\n\n- `CompromiseLinear`. This provides signatures in the form `((R_G, R_H), s)` AND\n  proves for 2-bits at a time. While this increases the amount of steps in\n  verifying the ring signatures, which aren't batch verified, and decreases the\n  amount of items batched (an operation which grows in efficiency with\n  quantity), it strikes a balance between speed and size.\n\nThe following numbers are from benchmarks performed with k256/curve25519_dalek\non a Intel i7-118567:\n\n| Algorithm          | Size                    | Verification Time |\n|--------------------|-------------------------|-------------------|\n| `ClassicLinear`    | 56829 bytes (+27%)      | 157ms (0%)        |\n| `ConciseLinear`    | 44607 bytes (Reference) | 156ms (Reference) |\n| `EfficientLinear`  | 65145 bytes (+46%)      | 122ms (-22%)      |\n| `CompromiseLinear` | 48765 bytes  (+9%)      | 137ms (-12%)      |\n\n`CompromiseLinear` is the best choice by only being marginally sub-optimal\nregarding size, yet still achieving most of the desired performance\nimprovements. That said, neither the original postulation (which had flaws) nor\nany construction here has been proven nor audited. Accordingly, they are solely\nexperimental, and none are recommended.\n\nAll proofs are suffixed \"Linear\" in the hope a logarithmic proof makes itself\navailable, which would likely immediately become the most efficient option.\n"
  },
  {
    "path": "crypto/dleq/src/cross_group/aos.rs",
    "content": "use rand_core::{RngCore, CryptoRng};\n\nuse zeroize::Zeroize;\n\nuse transcript::Transcript;\n\nuse group::{\n  ff::{Field, PrimeFieldBits},\n  prime::PrimeGroup,\n};\n\nuse multiexp::BatchVerifier;\n\nuse crate::cross_group::{\n  Generators, DLEqError,\n  scalar::{scalar_convert, mutual_scalar_from_bytes},\n};\n\n#[cfg(feature = \"serialize\")]\nuse std::io::{Read, Write};\n#[cfg(feature = \"serialize\")]\nuse ff::PrimeField;\n#[cfg(feature = \"serialize\")]\nuse crate::{read_scalar, cross_group::read_point};\n\n#[allow(non_camel_case_types)]\n#[derive(Clone, PartialEq, Eq, Debug)]\npub(crate) enum Re<G0: PrimeGroup, G1: PrimeGroup> {\n  R(G0, G1),\n  // Merged challenges have a slight security reduction, yet one already applied to the scalar\n  // being proven for, and this saves ~8kb. Alternatively, challenges could be redefined as a seed,\n  // present here, which is then hashed for each of the two challenges, remaining unbiased/unique\n  // while maintaining the bandwidth savings, yet also while adding 252 hashes for\n  // Secp256k1/Ed25519\n  e(G0::Scalar),\n}\n\nimpl<G0: PrimeGroup, G1: PrimeGroup> Re<G0, G1> {\n  #[allow(non_snake_case)]\n  pub(crate) fn R_default() -> Re<G0, G1> {\n    Re::R(G0::identity(), G1::identity())\n  }\n\n  pub(crate) fn e_default() -> Re<G0, G1> {\n    Re::e(G0::Scalar::ZERO)\n  }\n}\n\n#[allow(non_snake_case)]\n#[derive(Clone, PartialEq, Eq, Debug)]\npub(crate) struct Aos<G0: PrimeGroup + Zeroize, G1: PrimeGroup + Zeroize, const RING_LEN: usize> {\n  Re_0: Re<G0, G1>,\n  s: [(G0::Scalar, G1::Scalar); RING_LEN],\n}\n\nimpl<\n    G0: PrimeGroup<Scalar: PrimeFieldBits + Zeroize> + Zeroize,\n    G1: PrimeGroup<Scalar: PrimeFieldBits + Zeroize> + Zeroize,\n    const RING_LEN: usize,\n  > Aos<G0, G1, RING_LEN>\n{\n  #[allow(non_snake_case)]\n  fn nonces<T: Transcript>(mut transcript: T, nonces: (G0, G1)) -> (G0::Scalar, G1::Scalar) {\n    transcript.domain_separate(b\"aos_membership_proof\");\n    transcript.append_message(b\"ring_len\", u8::try_from(RING_LEN).unwrap().to_le_bytes());\n    transcript.append_message(b\"nonce_0\", nonces.0.to_bytes());\n    transcript.append_message(b\"nonce_1\", nonces.1.to_bytes());\n    mutual_scalar_from_bytes(transcript.challenge(b\"challenge\").as_ref())\n  }\n\n  #[allow(non_snake_case)]\n  fn R(\n    generators: (Generators<G0>, Generators<G1>),\n    s: (G0::Scalar, G1::Scalar),\n    A: (G0, G1),\n    e: (G0::Scalar, G1::Scalar),\n  ) -> (G0, G1) {\n    (((generators.0.alt * s.0) - (A.0 * e.0)), ((generators.1.alt * s.1) - (A.1 * e.1)))\n  }\n\n  #[allow(non_snake_case, clippy::type_complexity)]\n  fn R_batch(\n    generators: (Generators<G0>, Generators<G1>),\n    s: (G0::Scalar, G1::Scalar),\n    A: (G0, G1),\n    e: (G0::Scalar, G1::Scalar),\n  ) -> (Vec<(G0::Scalar, G0)>, Vec<(G1::Scalar, G1)>) {\n    (vec![(-s.0, generators.0.alt), (e.0, A.0)], vec![(-s.1, generators.1.alt), (e.1, A.1)])\n  }\n\n  #[allow(non_snake_case)]\n  fn R_nonces<T: Transcript>(\n    transcript: T,\n    generators: (Generators<G0>, Generators<G1>),\n    s: (G0::Scalar, G1::Scalar),\n    A: (G0, G1),\n    e: (G0::Scalar, G1::Scalar),\n  ) -> (G0::Scalar, G1::Scalar) {\n    Self::nonces(transcript, Self::R(generators, s, A, e))\n  }\n\n  #[allow(non_snake_case)]\n  pub(crate) fn prove<R: RngCore + CryptoRng, T: Clone + Transcript>(\n    rng: &mut R,\n    transcript: &T,\n    generators: (Generators<G0>, Generators<G1>),\n    ring: &[(G0, G1)],\n    mut actual: usize,\n    blinding_key: &mut (G0::Scalar, G1::Scalar),\n    mut Re_0: Re<G0, G1>,\n  ) -> Self {\n    // While it is possible to use larger values, it's not efficient to do so\n    // 2 + 2 == 2^2, yet 2 + 2 + 2 < 2^3\n    debug_assert!((RING_LEN == 2) || (RING_LEN == 4));\n    debug_assert_eq!(RING_LEN, ring.len());\n\n    let mut s = [(G0::Scalar::ZERO, G1::Scalar::ZERO); RING_LEN];\n\n    let mut r = (G0::Scalar::random(&mut *rng), G1::Scalar::random(&mut *rng));\n    #[allow(non_snake_case)]\n    let original_R = (generators.0.alt * r.0, generators.1.alt * r.1);\n    #[allow(non_snake_case)]\n    let mut R = original_R;\n\n    for i in ((actual + 1) ..= (actual + RING_LEN)).map(|i| i % RING_LEN) {\n      let e = Self::nonces(transcript.clone(), R);\n      if i == 0 {\n        match Re_0 {\n          Re::R(ref mut R0_0, ref mut R1_0) => {\n            *R0_0 = R.0;\n            *R1_0 = R.1\n          }\n          Re::e(ref mut e_0) => *e_0 = e.0,\n        }\n      }\n\n      // Solve for the real index\n      if i == actual {\n        s[i] = (r.0 + (e.0 * blinding_key.0), r.1 + (e.1 * blinding_key.1));\n        debug_assert_eq!(Self::R(generators, s[i], ring[actual], e), original_R);\n        actual.zeroize();\n        blinding_key.0.zeroize();\n        blinding_key.1.zeroize();\n        r.0.zeroize();\n        r.1.zeroize();\n        break;\n      }\n\n      // Generate a decoy response\n      s[i] = (G0::Scalar::random(&mut *rng), G1::Scalar::random(&mut *rng));\n      R = Self::R(generators, s[i], ring[i], e);\n    }\n\n    Aos { Re_0, s }\n  }\n\n  // Assumes the ring has already been transcripted in some form. Critically insecure if it hasn't\n  pub(crate) fn verify<R: RngCore + CryptoRng, T: Clone + Transcript>(\n    &self,\n    rng: &mut R,\n    transcript: &T,\n    generators: (Generators<G0>, Generators<G1>),\n    batch: &mut (BatchVerifier<(), G0>, BatchVerifier<(), G1>),\n    ring: &[(G0, G1)],\n  ) -> Result<(), DLEqError> {\n    debug_assert!((RING_LEN == 2) || (RING_LEN == 4));\n    debug_assert_eq!(RING_LEN, ring.len());\n\n    #[allow(non_snake_case)]\n    match self.Re_0 {\n      Re::R(R0_0, R1_0) => {\n        let mut e = Self::nonces(transcript.clone(), (R0_0, R1_0));\n        #[allow(clippy::needless_range_loop)]\n        for i in 0 .. (RING_LEN - 1) {\n          e = Self::R_nonces(transcript.clone(), generators, self.s[i], ring[i], e);\n        }\n\n        let mut statements =\n          Self::R_batch(generators, *self.s.last().unwrap(), *ring.last().unwrap(), e);\n        statements.0.push((G0::Scalar::ONE, R0_0));\n        statements.1.push((G1::Scalar::ONE, R1_0));\n        batch.0.queue(&mut *rng, (), statements.0);\n        batch.1.queue(&mut *rng, (), statements.1);\n      }\n\n      Re::e(e_0) => {\n        let e_0 = (e_0, scalar_convert(e_0).ok_or(DLEqError::InvalidChallenge)?);\n        let mut e = None;\n        #[allow(clippy::needless_range_loop)]\n        for i in 0 .. RING_LEN {\n          e = Some(Self::R_nonces(\n            transcript.clone(),\n            generators,\n            self.s[i],\n            ring[i],\n            e.unwrap_or(e_0),\n          ));\n        }\n\n        // Will panic if the above loop is never run somehow\n        // If e wasn't an Option, and instead initially set to e_0, it'd always pass\n        if e_0 != e.unwrap() {\n          Err(DLEqError::InvalidProof)?;\n        }\n      }\n    }\n\n    Ok(())\n  }\n\n  #[cfg(feature = \"serialize\")]\n  pub(crate) fn write<W: Write>(&self, w: &mut W) -> std::io::Result<()> {\n    #[allow(non_snake_case)]\n    match self.Re_0 {\n      Re::R(R0, R1) => {\n        w.write_all(R0.to_bytes().as_ref())?;\n        w.write_all(R1.to_bytes().as_ref())?;\n      }\n      Re::e(e) => w.write_all(e.to_repr().as_ref())?,\n    }\n\n    for i in 0 .. RING_LEN {\n      w.write_all(self.s[i].0.to_repr().as_ref())?;\n      w.write_all(self.s[i].1.to_repr().as_ref())?;\n    }\n\n    Ok(())\n  }\n\n  #[allow(non_snake_case)]\n  #[cfg(feature = \"serialize\")]\n  pub(crate) fn read<R: Read>(r: &mut R, mut Re_0: Re<G0, G1>) -> std::io::Result<Self> {\n    match Re_0 {\n      Re::R(ref mut R0, ref mut R1) => {\n        *R0 = read_point(r)?;\n        *R1 = read_point(r)?\n      }\n      Re::e(ref mut e) => *e = read_scalar(r)?,\n    }\n\n    let mut s = [(G0::Scalar::ZERO, G1::Scalar::ZERO); RING_LEN];\n    for s in &mut s {\n      *s = (read_scalar(r)?, read_scalar(r)?);\n    }\n\n    Ok(Aos { Re_0, s })\n  }\n}\n"
  },
  {
    "path": "crypto/dleq/src/cross_group/bits.rs",
    "content": "use rand_core::{RngCore, CryptoRng};\n\nuse zeroize::Zeroize;\n\nuse transcript::Transcript;\n\nuse group::{ff::PrimeFieldBits, prime::PrimeGroup};\nuse multiexp::BatchVerifier;\n\nuse crate::cross_group::{\n  Generators, DLEqError,\n  aos::{Re, Aos},\n};\n\n#[cfg(feature = \"serialize\")]\nuse std::io::{Read, Write};\n#[cfg(feature = \"serialize\")]\nuse crate::cross_group::read_point;\n\n#[allow(clippy::enum_variant_names)]\npub(crate) enum BitSignature {\n  ClassicLinear,\n  ConciseLinear,\n  EfficientLinear,\n  CompromiseLinear,\n}\n\nimpl BitSignature {\n  pub(crate) const fn to_u8(&self) -> u8 {\n    match self {\n      BitSignature::ClassicLinear => 0,\n      BitSignature::ConciseLinear => 1,\n      BitSignature::EfficientLinear => 2,\n      BitSignature::CompromiseLinear => 3,\n    }\n  }\n\n  pub(crate) const fn from(algorithm: u8) -> BitSignature {\n    match algorithm {\n      0 => BitSignature::ClassicLinear,\n      1 => BitSignature::ConciseLinear,\n      2 => BitSignature::EfficientLinear,\n      3 => BitSignature::CompromiseLinear,\n      _ => panic!(\"Unknown algorithm\"),\n    }\n  }\n\n  pub(crate) const fn bits(&self) -> u8 {\n    match self {\n      BitSignature::ClassicLinear | BitSignature::EfficientLinear => 1,\n      BitSignature::ConciseLinear | BitSignature::CompromiseLinear => 2,\n    }\n  }\n\n  pub(crate) const fn ring_len(&self) -> usize {\n    2_usize.pow(self.bits() as u32)\n  }\n\n  fn aos_form<G0: PrimeGroup, G1: PrimeGroup>(&self) -> Re<G0, G1> {\n    match self {\n      BitSignature::ClassicLinear | BitSignature::ConciseLinear => Re::e_default(),\n      BitSignature::EfficientLinear | BitSignature::CompromiseLinear => Re::R_default(),\n    }\n  }\n}\n\n#[derive(Clone, PartialEq, Eq, Debug)]\npub(crate) struct Bits<\n  G0: PrimeGroup + Zeroize,\n  G1: PrimeGroup + Zeroize,\n  const SIGNATURE: u8,\n  const RING_LEN: usize,\n> {\n  pub(crate) commitments: (G0, G1),\n  signature: Aos<G0, G1, RING_LEN>,\n}\n\nimpl<\n    G0: PrimeGroup<Scalar: PrimeFieldBits + Zeroize> + Zeroize,\n    G1: PrimeGroup<Scalar: PrimeFieldBits + Zeroize> + Zeroize,\n    const SIGNATURE: u8,\n    const RING_LEN: usize,\n  > Bits<G0, G1, SIGNATURE, RING_LEN>\n{\n  fn transcript<T: Transcript>(transcript: &mut T, i: usize, commitments: (G0, G1)) {\n    transcript.domain_separate(b\"bits\");\n    transcript.append_message(b\"group\", u16::try_from(i).unwrap().to_le_bytes());\n    transcript.append_message(b\"commitment_0\", commitments.0.to_bytes());\n    transcript.append_message(b\"commitment_1\", commitments.1.to_bytes());\n  }\n\n  fn ring(pow_2: (G0, G1), commitments: (G0, G1)) -> Vec<(G0, G1)> {\n    let mut res = vec![commitments; RING_LEN];\n    for i in 1 .. RING_LEN {\n      res[i] = (res[i - 1].0 - pow_2.0, res[i - 1].1 - pow_2.1);\n    }\n    res\n  }\n\n  fn shift(pow_2: &mut (G0, G1)) {\n    for _ in 0 .. BitSignature::from(SIGNATURE).bits() {\n      pow_2.0 = pow_2.0.double();\n      pow_2.1 = pow_2.1.double();\n    }\n  }\n\n  pub(crate) fn prove<R: RngCore + CryptoRng, T: Clone + Transcript>(\n    rng: &mut R,\n    transcript: &mut T,\n    generators: (Generators<G0>, Generators<G1>),\n    i: usize,\n    pow_2: &mut (G0, G1),\n    mut bits: u8,\n    blinding_key: &mut (G0::Scalar, G1::Scalar),\n  ) -> Self {\n    let mut commitments =\n      ((generators.0.alt * blinding_key.0), (generators.1.alt * blinding_key.1));\n    commitments.0 += pow_2.0 * G0::Scalar::from(bits.into());\n    commitments.1 += pow_2.1 * G1::Scalar::from(bits.into());\n\n    Self::transcript(transcript, i, commitments);\n\n    let signature = Aos::prove(\n      rng,\n      transcript,\n      generators,\n      &Self::ring(*pow_2, commitments),\n      usize::from(bits),\n      blinding_key,\n      BitSignature::from(SIGNATURE).aos_form(),\n    );\n    bits.zeroize();\n\n    Self::shift(pow_2);\n    Bits { commitments, signature }\n  }\n\n  pub(crate) fn verify<R: RngCore + CryptoRng, T: Clone + Transcript>(\n    &self,\n    rng: &mut R,\n    transcript: &mut T,\n    generators: (Generators<G0>, Generators<G1>),\n    batch: &mut (BatchVerifier<(), G0>, BatchVerifier<(), G1>),\n    i: usize,\n    pow_2: &mut (G0, G1),\n  ) -> Result<(), DLEqError> {\n    Self::transcript(transcript, i, self.commitments);\n\n    self.signature.verify(\n      rng,\n      transcript,\n      generators,\n      batch,\n      &Self::ring(*pow_2, self.commitments),\n    )?;\n\n    Self::shift(pow_2);\n    Ok(())\n  }\n\n  #[cfg(feature = \"serialize\")]\n  pub(crate) fn write<W: Write>(&self, w: &mut W) -> std::io::Result<()> {\n    w.write_all(self.commitments.0.to_bytes().as_ref())?;\n    w.write_all(self.commitments.1.to_bytes().as_ref())?;\n    self.signature.write(w)\n  }\n\n  #[cfg(feature = \"serialize\")]\n  pub(crate) fn read<R: Read>(r: &mut R) -> std::io::Result<Self> {\n    Ok(Bits {\n      commitments: (read_point(r)?, read_point(r)?),\n      signature: Aos::read(r, BitSignature::from(SIGNATURE).aos_form())?,\n    })\n  }\n}\n"
  },
  {
    "path": "crypto/dleq/src/cross_group/mod.rs",
    "content": "use core::ops::{Deref, DerefMut};\n#[cfg(feature = \"serialize\")]\nuse std::io::{self, Read, Write};\n\nuse thiserror::Error;\n\nuse rand_core::{RngCore, CryptoRng};\n\nuse zeroize::{Zeroize, Zeroizing};\n\nuse digest::{Digest, HashMarker};\n\nuse transcript::Transcript;\n\nuse group::{\n  ff::{Field, PrimeField, PrimeFieldBits},\n  prime::PrimeGroup,\n};\nuse multiexp::BatchVerifier;\n\n/// Scalar utilities.\npub mod scalar;\nuse scalar::{scalar_convert, mutual_scalar_from_bytes};\n\npub(crate) mod schnorr;\nuse self::schnorr::SchnorrPoK;\n\npub(crate) mod aos;\n\nmod bits;\nuse bits::{BitSignature, Bits};\n\n// Use black_box when possible\n#[rustversion::since(1.66)]\nuse core::hint::black_box;\n#[rustversion::before(1.66)]\nfn black_box<T>(val: T) -> T {\n  val\n}\n\nfn u8_from_bool(bit_ref: &mut bool) -> u8 {\n  let bit_ref = black_box(bit_ref);\n\n  let mut bit = black_box(*bit_ref);\n  #[allow(clippy::cast_lossless)]\n  let res = black_box(bit as u8);\n  bit.zeroize();\n  debug_assert!((res | 1) == 1);\n\n  bit_ref.zeroize();\n  res\n}\n\n#[cfg(feature = \"serialize\")]\npub(crate) fn read_point<R: Read, G: PrimeGroup>(r: &mut R) -> io::Result<G> {\n  let mut repr = G::Repr::default();\n  r.read_exact(repr.as_mut())?;\n  let point = G::from_bytes(&repr);\n  let Some(point) = Option::<G>::from(point) else { Err(io::Error::other(\"invalid point\"))? };\n  if point.to_bytes().as_ref() != repr.as_ref() {\n    Err(io::Error::other(\"non-canonical point\"))?;\n  }\n  Ok(point)\n}\n\n/// A pair of generators, one committing to values (primary), one blinding (alt), for an elliptic\n/// curve.\n#[derive(Clone, Copy, PartialEq, Eq)]\npub struct Generators<G: PrimeGroup> {\n  /// The generator used to commit to values.\n  ///\n  /// This should likely be the curve's traditional 'basepoint'.\n  pub primary: G,\n  /// The generator used to blind values. This must be distinct from the primary generator.\n  pub alt: G,\n}\n\nimpl<G: PrimeGroup> Generators<G> {\n  /// Create a new set of generators.\n  pub fn new(primary: G, alt: G) -> Option<Generators<G>> {\n    if primary == alt {\n      None?;\n    }\n    Some(Generators { primary, alt })\n  }\n\n  fn transcript<T: Transcript>(&self, transcript: &mut T) {\n    transcript.domain_separate(b\"generators\");\n    transcript.append_message(b\"primary\", self.primary.to_bytes());\n    transcript.append_message(b\"alternate\", self.alt.to_bytes());\n  }\n}\n\n/// Error for cross-group DLEq proofs.\n#[derive(Clone, Copy, PartialEq, Eq, Debug, Error)]\npub enum DLEqError {\n  /// Invalid proof length.\n  #[error(\"invalid proof length\")]\n  InvalidProofLength,\n  /// Invalid challenge.\n  #[error(\"invalid challenge\")]\n  InvalidChallenge,\n  /// Invalid proof.\n  #[error(\"invalid proof\")]\n  InvalidProof,\n}\n\n// This should never be directly instantiated and uses a u8 to represent internal values\n// Any external usage is likely invalid\n#[doc(hidden)]\n// Debug would be such a dump of data this likely isn't helpful, but at least it's available to\n// anyone who wants it\n#[derive(Clone, PartialEq, Eq, Debug)]\npub struct __DLEqProof<\n  G0: PrimeGroup<Scalar: PrimeFieldBits> + Zeroize,\n  G1: PrimeGroup<Scalar: PrimeFieldBits> + Zeroize,\n  const SIGNATURE: u8,\n  const RING_LEN: usize,\n  const REMAINDER_RING_LEN: usize,\n> {\n  bits: Vec<Bits<G0, G1, SIGNATURE, RING_LEN>>,\n  remainder: Option<Bits<G0, G1, SIGNATURE, REMAINDER_RING_LEN>>,\n  poks: (SchnorrPoK<G0>, SchnorrPoK<G1>),\n}\n\nmacro_rules! dleq {\n  ($doc_str: expr, $name: ident, $signature: expr, $remainder: literal,) => {\n    #[doc = $doc_str]\n    pub type $name<G0, G1> = __DLEqProof<\n      G0,\n      G1,\n      { $signature.to_u8() },\n      { $signature.ring_len() },\n      // There may not be a remainder, yet if there is one, it'll be just one bit\n      // A ring for one bit has a RING_LEN of 2\n      {\n        if $remainder {\n          2\n        } else {\n          0\n        }\n      },\n    >;\n  };\n}\n\n// Proves for 1-bit at a time with the signature form (e, s), as originally described in MRL-0010.\n// Uses a merged challenge, unlike MRL-0010, for the ring signature, saving an element from each\n// bit and removing a hash while slightly reducing challenge security. This security reduction is\n// already applied to the scalar being proven for, a result of the requirement it's mutually valid\n// over both scalar fields, hence its application here as well. This is mainly here as a point of\n// reference for the following DLEq proofs, all which use merged challenges, and isn't performant\n// in comparison to the others\ndleq!(\n  \"The DLEq proof described in MRL-0010.\",\n  ClassicLinearDLEq,\n  BitSignature::ClassicLinear,\n  false,\n);\n\n// Proves for 2-bits at a time to save 3/7 elements of every other bit\n// <9% smaller than CompromiseLinear, yet ~12% slower\ndleq!(\n  \"A DLEq proof modified from MRL-0010, proving for two bits at a time to save on space.\",\n  ConciseLinearDLEq,\n  BitSignature::ConciseLinear,\n  true,\n);\n\n// Uses AOS signatures of the form R, s, to enable the final step of the ring signature to be\n// batch verified, at the cost of adding an additional element per bit\ndleq!(\n  \"\n    A DLEq proof modified from MRL-0010, using R, s forms instead of c, s forms to enable batch\n    verification at the cost of space usage.\n  \",\n  EfficientLinearDLEq,\n  BitSignature::EfficientLinear,\n  false,\n);\n\n// Proves for 2-bits at a time while using the R, s form. This saves 3/7 elements of every other\n// bit, while adding 1 element to every bit, and is more efficient than ConciseLinear yet less\n// efficient than EfficientLinear due to having more ring signature steps which aren't batched\n// >25% smaller than EfficientLinear and just 11% slower, making it the recommended option\ndleq!(\n  \"\n    A DLEq proof modified from MRL-0010, using R, s forms instead of c, s forms, while proving for\n    two bits at a time, to enable batch verification and take advantage of space savings.\n\n    This isn't quite as efficient as EfficientLinearDLEq, and isn't as compact as\n    ConciseLinearDLEq, yet strikes a strong balance of performance and conciseness.\n  \",\n  CompromiseLinearDLEq,\n  BitSignature::CompromiseLinear,\n  true,\n);\n\nimpl<\n    G0: PrimeGroup<Scalar: PrimeFieldBits + Zeroize> + Zeroize,\n    G1: PrimeGroup<Scalar: PrimeFieldBits + Zeroize> + Zeroize,\n    const SIGNATURE: u8,\n    const RING_LEN: usize,\n    const REMAINDER_RING_LEN: usize,\n  > __DLEqProof<G0, G1, SIGNATURE, RING_LEN, REMAINDER_RING_LEN>\n{\n  pub(crate) fn transcript<T: Transcript>(\n    transcript: &mut T,\n    generators: (Generators<G0>, Generators<G1>),\n    keys: (G0, G1),\n  ) {\n    transcript.domain_separate(b\"cross_group_dleq\");\n    generators.0.transcript(transcript);\n    generators.1.transcript(transcript);\n    transcript.domain_separate(b\"points\");\n    transcript.append_message(b\"point_0\", keys.0.to_bytes());\n    transcript.append_message(b\"point_1\", keys.1.to_bytes());\n  }\n\n  pub(crate) fn blinding_key<R: RngCore + CryptoRng, F: PrimeField>(\n    rng: &mut R,\n    total: &mut F,\n    last: bool,\n  ) -> F {\n    let blinding_key = if last { -*total } else { F::random(&mut *rng) };\n    *total += blinding_key;\n    blinding_key\n  }\n\n  fn reconstruct_keys(&self) -> (G0, G1) {\n    let mut res = (\n      self.bits.iter().map(|bit| bit.commitments.0).sum::<G0>(),\n      self.bits.iter().map(|bit| bit.commitments.1).sum::<G1>(),\n    );\n\n    if let Some(bit) = &self.remainder {\n      res.0 += bit.commitments.0;\n      res.1 += bit.commitments.1;\n    }\n\n    res\n  }\n\n  #[allow(clippy::type_complexity)]\n  fn prove_internal<R: RngCore + CryptoRng, T: Clone + Transcript>(\n    rng: &mut R,\n    transcript: &mut T,\n    generators: (Generators<G0>, Generators<G1>),\n    f: (Zeroizing<G0::Scalar>, Zeroizing<G1::Scalar>),\n  ) -> (Self, (Zeroizing<G0::Scalar>, Zeroizing<G1::Scalar>)) {\n    Self::transcript(\n      transcript,\n      generators,\n      ((generators.0.primary * f.0.deref()), (generators.1.primary * f.1.deref())),\n    );\n\n    let poks = (\n      SchnorrPoK::<G0>::prove(rng, transcript, generators.0.primary, &f.0),\n      SchnorrPoK::<G1>::prove(rng, transcript, generators.1.primary, &f.1),\n    );\n\n    let mut blinding_key_total = (G0::Scalar::ZERO, G1::Scalar::ZERO);\n    let mut blinding_key = |rng: &mut R, last| {\n      let blinding_key = (\n        Self::blinding_key(&mut *rng, &mut blinding_key_total.0, last),\n        Self::blinding_key(&mut *rng, &mut blinding_key_total.1, last),\n      );\n      if last {\n        debug_assert_eq!(blinding_key_total.0, G0::Scalar::ZERO);\n        debug_assert_eq!(blinding_key_total.1, G1::Scalar::ZERO);\n      }\n      blinding_key\n    };\n\n    let capacity = usize::try_from(G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY)).unwrap();\n    let bits_per_group = usize::from(BitSignature::from(SIGNATURE).bits());\n\n    let mut pow_2 = (generators.0.primary, generators.1.primary);\n\n    let mut raw_bits = f.0.to_le_bits();\n    let mut bits = Vec::with_capacity(capacity);\n    let mut these_bits: u8 = 0;\n    // Needed to zero out the bits\n    #[allow(unused_assignments)]\n    for (i, mut bit) in raw_bits.iter_mut().enumerate() {\n      if i == capacity {\n        break;\n      }\n\n      // Accumulate this bit\n      let mut bit = u8_from_bool(bit.deref_mut());\n      these_bits |= bit << (i % bits_per_group);\n      bit.zeroize();\n\n      if (i % bits_per_group) == (bits_per_group - 1) {\n        let last = i == (capacity - 1);\n        let mut blinding_key = blinding_key(&mut *rng, last);\n        bits.push(Bits::prove(\n          &mut *rng,\n          transcript,\n          generators,\n          i / bits_per_group,\n          &mut pow_2,\n          these_bits,\n          &mut blinding_key,\n        ));\n        these_bits.zeroize();\n      }\n    }\n    debug_assert_eq!(bits.len(), capacity / bits_per_group);\n\n    let mut remainder = None;\n    if capacity != ((capacity / bits_per_group) * bits_per_group) {\n      let mut blinding_key = blinding_key(&mut *rng, true);\n      remainder = Some(Bits::prove(\n        &mut *rng,\n        transcript,\n        generators,\n        capacity / bits_per_group,\n        &mut pow_2,\n        these_bits,\n        &mut blinding_key,\n      ));\n    }\n\n    these_bits.zeroize();\n\n    let proof = __DLEqProof { bits, remainder, poks };\n    debug_assert_eq!(\n      proof.reconstruct_keys(),\n      (generators.0.primary * f.0.deref(), generators.1.primary * f.1.deref())\n    );\n    (proof, f)\n  }\n\n  /// Prove the Cross-Group Discrete Log Equality for the points derived from the scalar created as\n  /// the output of the passed in Digest.\n  ///\n  /// Given the non-standard requirements to achieve uniformity, needing to be < 2^x instead of\n  /// less than a prime moduli, this is the simplest way to safely and securely generate a Scalar,\n  /// without risk of failure nor bias.\n  ///\n  /// It also ensures a lack of determinable relation between keys, guaranteeing security in the\n  /// currently expected use case for this, atomic swaps, where each swap leaks the key. Knowing\n  /// the relationship between keys would allow breaking all swaps after just one.\n  #[allow(clippy::type_complexity)]\n  pub fn prove<R: RngCore + CryptoRng, T: Clone + Transcript, D: Digest + HashMarker>(\n    rng: &mut R,\n    transcript: &mut T,\n    generators: (Generators<G0>, Generators<G1>),\n    digest: D,\n  ) -> (Self, (Zeroizing<G0::Scalar>, Zeroizing<G1::Scalar>)) {\n    // This pattern theoretically prevents the compiler from moving it, so our protection against\n    // a copy remaining un-zeroized is actually what's causing a copy. There's still a feeling of\n    // safety granted by it, even if there's a loss in performance.\n    let (mut f0, mut f1) =\n      mutual_scalar_from_bytes::<G0::Scalar, G1::Scalar>(digest.finalize().as_ref());\n    let f = (Zeroizing::new(f0), Zeroizing::new(f1));\n    f0.zeroize();\n    f1.zeroize();\n\n    Self::prove_internal(rng, transcript, generators, f)\n  }\n\n  /// Prove the Cross-Group Discrete Log Equality for the points derived from the scalar passed in,\n  /// failing if it's not mutually valid.\n  ///\n  /// This allows for rejection sampling externally derived scalars until they're safely usable,\n  /// as needed.\n  #[allow(clippy::type_complexity)]\n  pub fn prove_without_bias<R: RngCore + CryptoRng, T: Clone + Transcript>(\n    rng: &mut R,\n    transcript: &mut T,\n    generators: (Generators<G0>, Generators<G1>),\n    f0: Zeroizing<G0::Scalar>,\n  ) -> Option<(Self, (Zeroizing<G0::Scalar>, Zeroizing<G1::Scalar>))> {\n    scalar_convert(*f0.deref()) // scalar_convert will zeroize it, though this is unfortunate\n      .map(|f1| Self::prove_internal(rng, transcript, generators, (f0, Zeroizing::new(f1))))\n  }\n\n  /// Verify a Cross-Group Discrete Log Equality proof, returning the points proven for.\n  pub fn verify<R: RngCore + CryptoRng, T: Clone + Transcript>(\n    &self,\n    rng: &mut R,\n    transcript: &mut T,\n    generators: (Generators<G0>, Generators<G1>),\n  ) -> Result<(G0, G1), DLEqError> {\n    let capacity = usize::try_from(G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY)).unwrap();\n    let bits_per_group = usize::from(BitSignature::from(SIGNATURE).bits());\n    let has_remainder = (capacity % bits_per_group) != 0;\n\n    // These shouldn't be possible, as locally created and deserialized proofs should be properly\n    // formed in these regards, yet it doesn't hurt to check and would be problematic if true\n    if (self.bits.len() != (capacity / bits_per_group)) ||\n      ((self.remainder.is_none() && has_remainder) ||\n        (self.remainder.is_some() && !has_remainder))\n    {\n      return Err(DLEqError::InvalidProofLength);\n    }\n\n    let keys = self.reconstruct_keys();\n    Self::transcript(transcript, generators, keys);\n\n    let batch_capacity = match BitSignature::from(SIGNATURE) {\n      BitSignature::ClassicLinear | BitSignature::ConciseLinear => 3,\n      BitSignature::EfficientLinear | BitSignature::CompromiseLinear => (self.bits.len() + 1) * 3,\n    };\n    let mut batch = (BatchVerifier::new(batch_capacity), BatchVerifier::new(batch_capacity));\n\n    self.poks.0.verify(&mut *rng, transcript, generators.0.primary, keys.0, &mut batch.0);\n    self.poks.1.verify(&mut *rng, transcript, generators.1.primary, keys.1, &mut batch.1);\n\n    let mut pow_2 = (generators.0.primary, generators.1.primary);\n    for (i, bits) in self.bits.iter().enumerate() {\n      bits.verify(&mut *rng, transcript, generators, &mut batch, i, &mut pow_2)?;\n    }\n    if let Some(bit) = &self.remainder {\n      bit.verify(&mut *rng, transcript, generators, &mut batch, self.bits.len(), &mut pow_2)?;\n    }\n\n    if (!batch.0.verify_vartime()) || (!batch.1.verify_vartime()) {\n      Err(DLEqError::InvalidProof)?;\n    }\n\n    Ok(keys)\n  }\n\n  /// Write a Cross-Group Discrete Log Equality proof to a type satisfying std::io::Write.\n  #[cfg(feature = \"serialize\")]\n  pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {\n    for bit in &self.bits {\n      bit.write(w)?;\n    }\n    if let Some(bit) = &self.remainder {\n      bit.write(w)?;\n    }\n    self.poks.0.write(w)?;\n    self.poks.1.write(w)\n  }\n\n  /// Read a Cross-Group Discrete Log Equality proof from a type satisfying std::io::Read.\n  #[cfg(feature = \"serialize\")]\n  pub fn read<R: Read>(r: &mut R) -> io::Result<Self> {\n    let capacity = usize::try_from(G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY)).unwrap();\n    let bits_per_group = usize::from(BitSignature::from(SIGNATURE).bits());\n\n    let mut bits = Vec::with_capacity(capacity / bits_per_group);\n    for _ in 0 .. (capacity / bits_per_group) {\n      bits.push(Bits::read(r)?);\n    }\n\n    let mut remainder = None;\n    if (capacity % bits_per_group) != 0 {\n      remainder = Some(Bits::read(r)?);\n    }\n\n    Ok(__DLEqProof { bits, remainder, poks: (SchnorrPoK::read(r)?, SchnorrPoK::read(r)?) })\n  }\n}\n"
  },
  {
    "path": "crypto/dleq/src/cross_group/scalar.rs",
    "content": "use core::ops::DerefMut;\n\nuse ff::PrimeFieldBits;\n\nuse zeroize::Zeroize;\n\nuse crate::cross_group::u8_from_bool;\n\n/// Convert a uniform scalar into one usable on both fields, clearing the top bits as needed.\npub fn scalar_normalize<F0: PrimeFieldBits + Zeroize, F1: PrimeFieldBits>(\n  mut scalar: F0,\n) -> (F0, F1) {\n  let mutual_capacity = F0::CAPACITY.min(F1::CAPACITY);\n\n  // A mutual key is only as secure as its weakest group\n  // Accordingly, this bans a capacity difference of more than 4 bits to prevent a curve generally\n  // offering n-bits of security from being forced into a situation with much fewer bits\n  #[cfg(feature = \"secure_capacity_difference\")]\n  assert!((F0::CAPACITY.max(F1::CAPACITY) - mutual_capacity) <= 4);\n\n  let mut res1 = F0::ZERO;\n  let mut res2 = F1::ZERO;\n  // Uses the bits API to ensure a consistent endianness\n  let mut bits = scalar.to_le_bits();\n  scalar.zeroize();\n  // Convert it to big endian\n  bits.reverse();\n\n  let mut skip = bits.len() - usize::try_from(mutual_capacity).unwrap();\n  // Needed to zero out the bits\n  #[allow(unused_assignments)]\n  for mut bit in &mut bits {\n    if skip > 0 {\n      bit.deref_mut().zeroize();\n      skip -= 1;\n      continue;\n    }\n\n    res1 = res1.double();\n    res2 = res2.double();\n\n    let mut bit = u8_from_bool(bit.deref_mut());\n    res1 += F0::from(bit.into());\n    res2 += F1::from(bit.into());\n    bit.zeroize();\n  }\n\n  (res1, res2)\n}\n\n/// Helper to convert a scalar between fields. Returns None if the scalar isn't mutually valid.\npub fn scalar_convert<F0: PrimeFieldBits + Zeroize, F1: PrimeFieldBits>(\n  mut scalar: F0,\n) -> Option<F1> {\n  let (mut valid, converted) = scalar_normalize(scalar);\n  let res = Some(converted).filter(|_| scalar == valid);\n  scalar.zeroize();\n  valid.zeroize();\n  res\n}\n\n/// Create a mutually valid scalar from bytes via bit truncation to not introduce bias.\npub fn mutual_scalar_from_bytes<F0: PrimeFieldBits + Zeroize, F1: PrimeFieldBits>(\n  bytes: &[u8],\n) -> (F0, F1) {\n  let capacity = usize::try_from(F0::CAPACITY.min(F1::CAPACITY)).unwrap();\n  debug_assert!((bytes.len() * 8) >= capacity);\n\n  let mut accum = F0::ZERO;\n  for b in 0 .. capacity {\n    accum = accum.double();\n    accum += F0::from(((bytes[b / 8] >> (b % 8)) & 1).into());\n  }\n  (accum, scalar_convert(accum).unwrap())\n}\n"
  },
  {
    "path": "crypto/dleq/src/cross_group/schnorr.rs",
    "content": "use core::ops::Deref;\n\nuse rand_core::{RngCore, CryptoRng};\n\nuse zeroize::{Zeroize, Zeroizing};\n\nuse transcript::Transcript;\n\nuse group::{\n  ff::{Field, PrimeFieldBits},\n  prime::PrimeGroup,\n};\nuse multiexp::BatchVerifier;\n\nuse crate::challenge;\n\n#[cfg(feature = \"serialize\")]\nuse std::io::{Read, Write};\n#[cfg(feature = \"serialize\")]\nuse ff::PrimeField;\n#[cfg(feature = \"serialize\")]\nuse crate::{read_scalar, cross_group::read_point};\n\n#[allow(non_snake_case)]\n#[derive(Clone, PartialEq, Eq, Debug)]\npub(crate) struct SchnorrPoK<G: PrimeGroup + Zeroize> {\n  R: G,\n  s: G::Scalar,\n}\n\nimpl<G: PrimeGroup<Scalar: PrimeFieldBits + Zeroize> + Zeroize> SchnorrPoK<G> {\n  // Not HRAm due to the lack of m\n  #[allow(non_snake_case)]\n  fn hra<T: Transcript>(transcript: &mut T, generator: G, R: G, A: G) -> G::Scalar {\n    transcript.domain_separate(b\"schnorr_proof_of_knowledge\");\n    transcript.append_message(b\"generator\", generator.to_bytes());\n    transcript.append_message(b\"nonce\", R.to_bytes());\n    transcript.append_message(b\"public_key\", A.to_bytes());\n    challenge(transcript)\n  }\n\n  pub(crate) fn prove<R: RngCore + CryptoRng, T: Transcript>(\n    rng: &mut R,\n    transcript: &mut T,\n    generator: G,\n    private_key: &Zeroizing<G::Scalar>,\n  ) -> SchnorrPoK<G> {\n    let nonce = Zeroizing::new(G::Scalar::random(rng));\n    #[allow(non_snake_case)]\n    let R = generator * nonce.deref();\n    SchnorrPoK {\n      R,\n      s: (SchnorrPoK::hra(transcript, generator, R, generator * private_key.deref()) *\n        private_key.deref()) +\n        nonce.deref(),\n    }\n  }\n\n  pub(crate) fn verify<R: RngCore + CryptoRng, T: Transcript>(\n    &self,\n    rng: &mut R,\n    transcript: &mut T,\n    generator: G,\n    public_key: G,\n    batch: &mut BatchVerifier<(), G>,\n  ) {\n    batch.queue(\n      rng,\n      (),\n      [\n        (-self.s, generator),\n        (G::Scalar::ONE, self.R),\n        (Self::hra(transcript, generator, self.R, public_key), public_key),\n      ],\n    );\n  }\n\n  #[cfg(feature = \"serialize\")]\n  pub fn write<W: Write>(&self, w: &mut W) -> std::io::Result<()> {\n    w.write_all(self.R.to_bytes().as_ref())?;\n    w.write_all(self.s.to_repr().as_ref())\n  }\n\n  #[cfg(feature = \"serialize\")]\n  pub fn read<R: Read>(r: &mut R) -> std::io::Result<SchnorrPoK<G>> {\n    Ok(SchnorrPoK { R: read_point(r)?, s: read_scalar(r)? })\n  }\n}\n"
  },
  {
    "path": "crypto/dleq/src/lib.rs",
    "content": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![cfg_attr(not(feature = \"std\"), no_std)]\n#![doc = include_str!(\"../README.md\")]\n\nuse core::ops::Deref;\n\nuse rand_core::{RngCore, CryptoRng};\n\nuse zeroize::{Zeroize, Zeroizing};\n\nuse transcript::Transcript;\n\nuse ff::{Field, PrimeField};\nuse group::prime::PrimeGroup;\n\n#[cfg(feature = \"serialize\")]\nuse std::io::{self, Error, Read, Write};\n\n/// A cross-group DLEq proof capable of proving that two public keys, across two different curves,\n/// share a private key.\n#[cfg(feature = \"experimental\")]\npub mod cross_group;\n\n#[cfg(test)]\nmod tests;\n\n// Produce a non-biased challenge from the transcript in the specified field\npub(crate) fn challenge<T: Transcript, F: PrimeField>(transcript: &mut T) -> F {\n  // From here, there are three ways to get a scalar under the ff/group API\n  // 1: Scalar::random(ChaCha20Rng::from_seed(self.transcript.rng_seed(b\"challenge\")))\n  // 2: Grabbing a UInt library to perform reduction by the modulus, then determining endianness\n  //    and loading it in\n  // 3: Iterating over each byte and manually doubling/adding. This is simplest\n\n  let mut challenge = F::ZERO;\n\n  // Get a wide amount of bytes to safely reduce without bias\n  // In most cases, <=1.5x bytes is enough. 2x is still standard and there's some theoretical\n  // groups which may technically require more than 1.5x bytes for this to work as intended\n  let target_bytes = usize::try_from(F::NUM_BITS).unwrap().div_ceil(8) * 2;\n  let mut challenge_bytes = transcript.challenge(b\"challenge\");\n  let challenge_bytes_len = challenge_bytes.as_ref().len();\n  // If the challenge is 32 bytes, and we need 64, we need two challenges\n  let needed_challenges = target_bytes.div_ceil(challenge_bytes_len);\n\n  // The following algorithm should be equivalent to a wide reduction of the challenges,\n  // interpreted as concatenated, big-endian byte string\n  let mut handled_bytes = 0;\n  'outer: for _ in 0 ..= needed_challenges {\n    // Cursor of which byte of the challenge to use next\n    let mut b = 0;\n    while b < challenge_bytes_len {\n      // Get the next amount of bytes to attempt\n      // Only grabs the needed amount of bytes, up to 8 at a time (u64), so long as they're\n      // available in the challenge\n      let chunk_bytes = (target_bytes - handled_bytes).min(8).min(challenge_bytes_len - b);\n\n      let mut chunk = 0;\n      for _ in 0 .. chunk_bytes {\n        chunk <<= 8;\n        chunk |= u64::from(challenge_bytes.as_ref()[b]);\n        b += 1;\n      }\n      // Add this chunk\n      challenge += F::from(chunk);\n\n      handled_bytes += chunk_bytes;\n      // If we've reached the target amount of bytes, break\n      if handled_bytes == target_bytes {\n        break 'outer;\n      }\n\n      // Shift over by however many bits will be in the next chunk\n      let next_chunk_bytes = (target_bytes - handled_bytes).min(8).min(challenge_bytes_len);\n      for _ in 0 .. (next_chunk_bytes * 8) {\n        challenge = challenge.double();\n      }\n    }\n\n    // Secure thanks to the Transcript trait having a bound of updating on challenge\n    challenge_bytes = transcript.challenge(b\"challenge_extension\");\n  }\n\n  challenge\n}\n\n// Helper function to read a scalar\n#[cfg(feature = \"serialize\")]\nfn read_scalar<R: Read, F: PrimeField>(r: &mut R) -> io::Result<F> {\n  let mut repr = F::Repr::default();\n  r.read_exact(repr.as_mut())?;\n  let scalar = F::from_repr(repr);\n  if scalar.is_none().into() {\n    Err(Error::other(\"invalid scalar\"))?;\n  }\n  Ok(scalar.unwrap())\n}\n\n/// Error for DLEq proofs.\n#[derive(Clone, Copy, PartialEq, Eq, Debug)]\npub enum DLEqError {\n  /// The proof was invalid.\n  InvalidProof,\n}\n\n/// A proof that points have the same discrete logarithm across generators.\n#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]\npub struct DLEqProof<G: PrimeGroup<Scalar: Zeroize>> {\n  c: G::Scalar,\n  s: G::Scalar,\n}\n\n#[allow(non_snake_case)]\nimpl<G: PrimeGroup<Scalar: Zeroize>> DLEqProof<G> {\n  fn transcript<T: Transcript>(transcript: &mut T, generator: G, nonce: G, point: G) {\n    transcript.append_message(b\"generator\", generator.to_bytes());\n    transcript.append_message(b\"nonce\", nonce.to_bytes());\n    transcript.append_message(b\"point\", point.to_bytes());\n  }\n\n  /// Prove that the points created by `scalar * G`, for each specified generator, share a discrete\n  /// logarithm.\n  pub fn prove<R: RngCore + CryptoRng, T: Transcript>(\n    rng: &mut R,\n    transcript: &mut T,\n    generators: &[G],\n    scalar: &Zeroizing<G::Scalar>,\n  ) -> DLEqProof<G> {\n    let r = Zeroizing::new(G::Scalar::random(rng));\n\n    transcript.domain_separate(b\"dleq\");\n    for generator in generators {\n      // R, A\n      Self::transcript(transcript, *generator, *generator * r.deref(), *generator * scalar.deref());\n    }\n\n    let c = challenge(transcript);\n    // r + ca\n    let s = (c * scalar.deref()) + r.deref();\n\n    DLEqProof { c, s }\n  }\n\n  // Transcript a specific generator/nonce/point (G/R/A), as used when verifying a proof.\n  // This takes in the generator/point, and then the challenge and solution to calculate the nonce.\n  fn verify_statement<T: Transcript>(\n    transcript: &mut T,\n    generator: G,\n    point: G,\n    c: G::Scalar,\n    s: G::Scalar,\n  ) {\n    // s = r + ca\n    // sG - cA = R\n    // R, A\n    Self::transcript(transcript, generator, (generator * s) - (point * c), point);\n  }\n\n  /// Verify the specified points share a discrete logarithm across the specified generators.\n  pub fn verify<T: Transcript>(\n    &self,\n    transcript: &mut T,\n    generators: &[G],\n    points: &[G],\n  ) -> Result<(), DLEqError> {\n    if generators.len() != points.len() {\n      Err(DLEqError::InvalidProof)?;\n    }\n\n    transcript.domain_separate(b\"dleq\");\n    for (generator, point) in generators.iter().zip(points) {\n      Self::verify_statement(transcript, *generator, *point, self.c, self.s);\n    }\n\n    if self.c != challenge(transcript) {\n      Err(DLEqError::InvalidProof)?;\n    }\n\n    Ok(())\n  }\n\n  /// Write a DLEq proof to something implementing Write.\n  #[cfg(feature = \"serialize\")]\n  pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {\n    w.write_all(self.c.to_repr().as_ref())?;\n    w.write_all(self.s.to_repr().as_ref())\n  }\n\n  /// Read a DLEq proof from something implementing Read.\n  #[cfg(feature = \"serialize\")]\n  pub fn read<R: Read>(r: &mut R) -> io::Result<DLEqProof<G>> {\n    Ok(DLEqProof { c: read_scalar(r)?, s: read_scalar(r)? })\n  }\n\n  /// Serialize a DLEq proof to a `Vec<u8>`.\n  #[cfg(feature = \"serialize\")]\n  pub fn serialize(&self) -> Vec<u8> {\n    let mut res = vec![];\n    self.write(&mut res).unwrap();\n    res\n  }\n}\n\n/// A proof that multiple series of points each have a single discrete logarithm across generators.\n///\n/// This is effectively n distinct DLEq proofs, one for each discrete logarithm and its points\n/// across some generators, yet with a smaller overall proof size.\n#[cfg(feature = \"std\")]\n#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]\npub struct MultiDLEqProof<G: PrimeGroup<Scalar: Zeroize>> {\n  c: G::Scalar,\n  s: Vec<G::Scalar>,\n}\n\n#[cfg(feature = \"std\")]\n#[allow(non_snake_case)]\nimpl<G: PrimeGroup<Scalar: Zeroize>> MultiDLEqProof<G> {\n  /// Prove for each scalar that the series of points created by multiplying it against its\n  /// matching generators share a discrete logarithm.\n  /// This function panics if `generators.len() != scalars.len()`.\n  pub fn prove<R: RngCore + CryptoRng, T: Transcript>(\n    rng: &mut R,\n    transcript: &mut T,\n    generators: &[Vec<G>],\n    scalars: &[Zeroizing<G::Scalar>],\n  ) -> MultiDLEqProof<G> {\n    assert_eq!(\n      generators.len(),\n      scalars.len(),\n      \"amount of series of generators doesn't match the amount of scalars\"\n    );\n\n    transcript.domain_separate(b\"multi_dleq\");\n\n    let mut nonces = vec![];\n    for (i, (scalar, generators)) in scalars.iter().zip(generators).enumerate() {\n      // Delineate between discrete logarithms\n      transcript.append_message(b\"discrete_logarithm\", i.to_le_bytes());\n\n      let nonce = Zeroizing::new(G::Scalar::random(&mut *rng));\n      for generator in generators {\n        DLEqProof::transcript(\n          transcript,\n          *generator,\n          *generator * nonce.deref(),\n          *generator * scalar.deref(),\n        );\n      }\n      nonces.push(nonce);\n    }\n\n    let c = challenge(transcript);\n\n    let mut s = vec![];\n    for (scalar, nonce) in scalars.iter().zip(nonces) {\n      s.push((c * scalar.deref()) + nonce.deref());\n    }\n\n    MultiDLEqProof { c, s }\n  }\n\n  /// Verify each series of points share a discrete logarithm against their matching series of\n  /// generators.\n  pub fn verify<T: Transcript>(\n    &self,\n    transcript: &mut T,\n    generators: &[Vec<G>],\n    points: &[Vec<G>],\n  ) -> Result<(), DLEqError> {\n    if points.len() != generators.len() {\n      Err(DLEqError::InvalidProof)?;\n    }\n    if self.s.len() != generators.len() {\n      Err(DLEqError::InvalidProof)?;\n    }\n\n    transcript.domain_separate(b\"multi_dleq\");\n    for (i, (generators, points)) in generators.iter().zip(points).enumerate() {\n      if points.len() != generators.len() {\n        Err(DLEqError::InvalidProof)?;\n      }\n\n      transcript.append_message(b\"discrete_logarithm\", i.to_le_bytes());\n      for (generator, point) in generators.iter().zip(points) {\n        DLEqProof::verify_statement(transcript, *generator, *point, self.c, self.s[i]);\n      }\n    }\n\n    if self.c != challenge(transcript) {\n      Err(DLEqError::InvalidProof)?;\n    }\n\n    Ok(())\n  }\n\n  /// Write a multi-DLEq proof to something implementing Write.\n  #[cfg(feature = \"serialize\")]\n  pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {\n    w.write_all(self.c.to_repr().as_ref())?;\n    for s in &self.s {\n      w.write_all(s.to_repr().as_ref())?;\n    }\n    Ok(())\n  }\n\n  /// Read a multi-DLEq proof from something implementing Read.\n  #[cfg(feature = \"serialize\")]\n  pub fn read<R: Read>(r: &mut R, discrete_logs: usize) -> io::Result<MultiDLEqProof<G>> {\n    let c = read_scalar(r)?;\n    let mut s = vec![];\n    for _ in 0 .. discrete_logs {\n      s.push(read_scalar(r)?);\n    }\n    Ok(MultiDLEqProof { c, s })\n  }\n\n  /// Serialize a multi-DLEq proof to a `Vec<u8>`.\n  #[cfg(feature = \"serialize\")]\n  pub fn serialize(&self) -> Vec<u8> {\n    let mut res = vec![];\n    self.write(&mut res).unwrap();\n    res\n  }\n}\n"
  },
  {
    "path": "crypto/dleq/src/tests/cross_group/aos.rs",
    "content": "use rand_core::OsRng;\n\nuse group::{ff::Field, Group};\n\nuse multiexp::BatchVerifier;\n\nuse crate::{\n  cross_group::aos::{Re, Aos},\n  tests::cross_group::{G0, G1, transcript, generators},\n};\n\n#[allow(non_snake_case)]\n#[cfg(feature = \"serialize\")]\nfn test_aos_serialization<const RING_LEN: usize>(proof: &Aos<G0, G1, RING_LEN>, Re_0: Re<G0, G1>) {\n  let mut buf = vec![];\n  proof.write(&mut buf).unwrap();\n  let deserialized = Aos::read::<&[u8]>(&mut buf.as_ref(), Re_0).unwrap();\n  assert_eq!(proof, &deserialized);\n}\n\nfn test_aos<const RING_LEN: usize>(default: &Re<G0, G1>) {\n  let generators = generators();\n\n  let mut ring_keys = [(<G0 as Group>::Scalar::ZERO, <G1 as Group>::Scalar::ZERO); RING_LEN];\n  // Side-effect of G0 being a type-alias with identity() deprecated\n  #[allow(deprecated)]\n  let mut ring = [(G0::identity(), G1::identity()); RING_LEN];\n  for i in 0 .. RING_LEN {\n    ring_keys[i] =\n      (<G0 as Group>::Scalar::random(&mut OsRng), <G1 as Group>::Scalar::random(&mut OsRng));\n    ring[i] = (generators.0.alt * ring_keys[i].0, generators.1.alt * ring_keys[i].1);\n  }\n\n  for (actual, key) in ring_keys.iter_mut().enumerate() {\n    let proof = Aos::<_, _, RING_LEN>::prove(\n      &mut OsRng,\n      &transcript(),\n      generators,\n      &ring,\n      actual,\n      key,\n      default.clone(),\n    );\n\n    let mut batch = (BatchVerifier::new(0), BatchVerifier::new(0));\n    proof.verify(&mut OsRng, &transcript(), generators, &mut batch, &ring).unwrap();\n    // For e, these should have nothing. For R, these should have 6 elements each which sum to 0\n    assert!(batch.0.verify_vartime());\n    assert!(batch.1.verify_vartime());\n\n    #[cfg(feature = \"serialize\")]\n    test_aos_serialization(&proof, default.clone());\n  }\n}\n\n#[test]\nfn test_aos_e() {\n  test_aos::<2>(&Re::e_default());\n  test_aos::<4>(&Re::e_default());\n}\n\n#[allow(non_snake_case)]\n#[test]\nfn test_aos_R() {\n  // Batch verification appreciates the longer vectors, which means not batching bits\n  test_aos::<2>(&Re::R_default());\n}\n"
  },
  {
    "path": "crypto/dleq/src/tests/cross_group/mod.rs",
    "content": "use core::ops::Deref;\n\nuse hex_literal::hex;\n\nuse zeroize::Zeroizing;\nuse rand_core::{RngCore, OsRng};\n\nuse ff::{Field, PrimeField};\nuse group::{Group, GroupEncoding};\n\nuse blake2::{Digest, Blake2b512};\n\nuse k256::{Scalar, ProjectivePoint};\nuse dalek_ff_group::{self as dfg, EdwardsPoint};\n\nuse transcript::{Transcript, RecommendedTranscript};\n\nuse crate::{\n  cross_group::{\n    scalar::mutual_scalar_from_bytes, Generators, ClassicLinearDLEq, EfficientLinearDLEq,\n    ConciseLinearDLEq, CompromiseLinearDLEq,\n  },\n};\n\nmod scalar;\nmod aos;\n\ntype G0 = ProjectivePoint;\ntype G1 = EdwardsPoint;\n\npub(crate) fn transcript() -> RecommendedTranscript {\n  RecommendedTranscript::new(b\"Cross-Group DLEq Proof Test\")\n}\n\npub(crate) fn generators() -> (Generators<G0>, Generators<G1>) {\n  (\n    Generators::new(\n      ProjectivePoint::GENERATOR,\n      ProjectivePoint::from_bytes(\n        &(hex!(\"0250929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0\").into()),\n      )\n      .unwrap(),\n    )\n    .unwrap(),\n    Generators::new(\n      EdwardsPoint::generator(),\n      EdwardsPoint::from_bytes(&hex!(\n        \"8b655970153799af2aeadc9ff1add0ea6c7251d54154cfa92c173a0dd39c1f94\"\n      ))\n      .unwrap(),\n    )\n    .unwrap(),\n  )\n}\n\nmacro_rules! verify_and_deserialize {\n  ($type: ty, $proof: ident, $generators: ident, $keys: ident) => {\n    let public_keys = $proof.verify(&mut OsRng, &mut transcript(), $generators).unwrap();\n    assert_eq!($generators.0.primary * $keys.0.deref(), public_keys.0);\n    assert_eq!($generators.1.primary * $keys.1.deref(), public_keys.1);\n\n    #[cfg(feature = \"serialize\")]\n    {\n      let mut buf = vec![];\n      $proof.write(&mut buf).unwrap();\n      let deserialized = <$type>::read::<&[u8]>(&mut buf.as_ref()).unwrap();\n      assert_eq!($proof, deserialized);\n    }\n  };\n}\n\nmacro_rules! test_dleq {\n  ($str: literal, $benchmark: ident, $name: ident, $type: ident) => {\n    #[ignore]\n    #[test]\n    fn $benchmark() {\n      println!(\"Benchmarking with Secp256k1/Ed25519\");\n      let generators = generators();\n\n      let mut seed = [0; 32];\n      OsRng.fill_bytes(&mut seed);\n      let key = Blake2b512::new().chain_update(seed);\n\n      let runs = 200;\n      let mut proofs = Vec::with_capacity(usize::try_from(runs).unwrap());\n      let time = std::time::Instant::now();\n      for _ in 0 .. runs {\n        proofs.push($type::prove(&mut OsRng, &mut transcript(), generators, key.clone()).0);\n      }\n      println!(\"{} had a average prove time of {}ms\", $str, time.elapsed().as_millis() / runs);\n\n      let time = std::time::Instant::now();\n      for proof in &proofs {\n        proof.verify(&mut OsRng, &mut transcript(), generators).unwrap();\n      }\n      println!(\"{} had a average verify time of {}ms\", $str, time.elapsed().as_millis() / runs);\n\n      #[cfg(feature = \"serialize\")]\n      {\n        let mut buf = vec![];\n        proofs[0].write(&mut buf).unwrap();\n        println!(\"{} had a proof size of {} bytes\", $str, buf.len());\n      }\n    }\n\n    #[test]\n    fn $name() {\n      let generators = generators();\n\n      for i in 0 .. 1 {\n        let (proof, keys) = if i == 0 {\n          let mut seed = [0; 32];\n          OsRng.fill_bytes(&mut seed);\n\n          $type::prove(\n            &mut OsRng,\n            &mut transcript(),\n            generators,\n            Blake2b512::new().chain_update(seed),\n          )\n        } else {\n          let mut key;\n          let mut res;\n          while {\n            key = Zeroizing::new(Scalar::random(&mut OsRng));\n            res = $type::prove_without_bias(&mut OsRng, &mut transcript(), generators, key.clone());\n            res.is_none()\n          } {}\n          let res = res.unwrap();\n          assert_eq!(key, res.1 .0);\n          res\n        };\n\n        verify_and_deserialize!($type::<G0, G1>, proof, generators, keys);\n      }\n    }\n  };\n}\n\ntest_dleq!(\"ClassicLinear\", benchmark_classic_linear, test_classic_linear, ClassicLinearDLEq);\ntest_dleq!(\"ConciseLinear\", benchmark_concise_linear, test_concise_linear, ConciseLinearDLEq);\ntest_dleq!(\n  \"EfficientLinear\",\n  benchmark_efficient_linear,\n  test_efficient_linear,\n  EfficientLinearDLEq\n);\ntest_dleq!(\n  \"CompromiseLinear\",\n  benchmark_compromise_linear,\n  test_compromise_linear,\n  CompromiseLinearDLEq\n);\n\n#[test]\nfn test_rejection_sampling() {\n  let mut pow_2 = Scalar::ONE;\n  for _ in 0 .. dfg::Scalar::CAPACITY {\n    pow_2 = pow_2.double();\n  }\n\n  assert!(\n    // Either would work\n    EfficientLinearDLEq::prove_without_bias(\n      &mut OsRng,\n      &mut transcript(),\n      generators(),\n      Zeroizing::new(pow_2)\n    )\n    .is_none()\n  );\n}\n\n#[test]\nfn test_remainder() {\n  // Uses Secp256k1 for both to achieve an odd capacity of 255\n  assert_eq!(Scalar::CAPACITY, 255);\n  let generators = (generators().0, generators().0);\n  // This will ignore any unused bits, ensuring every remaining one is set\n  let keys = mutual_scalar_from_bytes::<Scalar, Scalar>(&[0xFF; 32]);\n  let keys = (Zeroizing::new(keys.0), Zeroizing::new(keys.1));\n  assert_eq!(Scalar::ONE + keys.0.deref(), Scalar::from(2u64).pow_vartime([255]));\n  assert_eq!(keys.0, keys.1);\n\n  let (proof, res) = ConciseLinearDLEq::prove_without_bias(\n    &mut OsRng,\n    &mut transcript(),\n    generators,\n    keys.0.clone(),\n  )\n  .unwrap();\n  assert_eq!(keys, res);\n\n  verify_and_deserialize!(\n    ConciseLinearDLEq::<ProjectivePoint, ProjectivePoint>,\n    proof,\n    generators,\n    keys\n  );\n}\n"
  },
  {
    "path": "crypto/dleq/src/tests/cross_group/scalar.rs",
    "content": "use rand_core::OsRng;\n\nuse ff::{Field, PrimeField};\n\nuse k256::Scalar as K256Scalar;\nuse dalek_ff_group::Scalar as DalekScalar;\n\nuse crate::cross_group::scalar::{scalar_normalize, scalar_convert};\n\n#[test]\nfn test_scalar() {\n  assert_eq!(\n    scalar_normalize::<_, DalekScalar>(K256Scalar::ZERO),\n    (K256Scalar::ZERO, DalekScalar::ZERO)\n  );\n\n  assert_eq!(\n    scalar_normalize::<_, DalekScalar>(K256Scalar::ONE),\n    (K256Scalar::ONE, DalekScalar::ONE)\n  );\n\n  let mut initial;\n  while {\n    initial = K256Scalar::random(&mut OsRng);\n    let (k, ed) = scalar_normalize::<_, DalekScalar>(initial);\n\n    // The initial scalar should equal the new scalar with Ed25519's capacity\n    let mut initial_bytes = initial.to_repr().to_vec();\n    // Drop the first 4 bits to hit 252\n    initial_bytes[0] &= 0b00001111;\n    let k_bytes = k.to_repr().to_vec();\n    assert_eq!(initial_bytes, k_bytes);\n\n    let mut ed_bytes = ed.to_repr().as_ref().to_vec();\n    // Reverse to big endian\n    ed_bytes.reverse();\n    assert_eq!(k_bytes, ed_bytes);\n\n    // Verify conversion works as expected\n    assert_eq!(scalar_convert::<_, DalekScalar>(k), Some(ed));\n\n    // Run this test again if this secp256k1 scalar didn't have any bits cleared\n    initial == k\n  } {}\n  // Verify conversion returns None when the scalar isn't mutually valid\n  assert!(scalar_convert::<_, DalekScalar>(initial).is_none());\n}\n"
  },
  {
    "path": "crypto/dleq/src/tests/cross_group/schnorr.rs",
    "content": "use core::ops::Deref;\n\nuse rand_core::OsRng;\n\nuse zeroize::Zeroize;\n\nuse group::{\n  ff::{Field, PrimeFieldBits},\n  prime::PrimeGroup,\n};\nuse multiexp::BatchVerifier;\n\nuse transcript::{Transcript, RecommendedTranscript};\n\nuse crate::cross_group::schnorr::SchnorrPoK;\n\nfn test_schnorr<G: PrimeGroup<Scalar: PrimeFieldBits + Zeroize> + Zeroize>() {\n  let transcript = RecommendedTranscript::new(b\"Schnorr Test\");\n\n  let mut batch = BatchVerifier::new(10);\n  for _ in 0 .. 10 {\n    let private = Zeroizing::new(G::Scalar::random(&mut OsRng));\n    SchnorrPoK::prove(&mut OsRng, &mut transcript.clone(), G::generator(), &private).verify(\n      &mut OsRng,\n      &mut transcript.clone(),\n      G::generator(),\n      G::generator() * private.deref(),\n      &mut batch,\n    );\n  }\n\n  assert!(batch.verify_vartime());\n}\n\n#[test]\nfn test_secp256k1() {\n  test_schnorr::<k256::ProjectivePoint>();\n}\n\n#[test]\nfn test_ed25519() {\n  test_schnorr::<dalek_ff_group::EdwardsPoint>();\n}\n"
  },
  {
    "path": "crypto/dleq/src/tests/mod.rs",
    "content": "use core::ops::Deref;\n\nuse hex_literal::hex;\n\nuse rand_core::OsRng;\n\nuse zeroize::Zeroizing;\n\nuse ff::Field;\nuse group::GroupEncoding;\n\nuse k256::{Scalar, ProjectivePoint};\n\nuse transcript::{Transcript, RecommendedTranscript};\n\nuse crate::{DLEqProof, MultiDLEqProof};\n\n#[cfg(feature = \"experimental\")]\nmod cross_group;\n\nfn generators() -> [k256::ProjectivePoint; 5] {\n  [\n    ProjectivePoint::GENERATOR,\n    ProjectivePoint::from_bytes(\n      &(hex!(\"0250929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0\").into()),\n    )\n    .unwrap(),\n    // Just an increment of the last byte from the previous, where the previous two are valid\n    ProjectivePoint::from_bytes(\n      &(hex!(\"0250929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac4\").into()),\n    )\n    .unwrap(),\n    ProjectivePoint::from_bytes(\n      &(hex!(\"0250929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803aca\").into()),\n    )\n    .unwrap(),\n    ProjectivePoint::from_bytes(\n      &(hex!(\"0250929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803acb\").into()),\n    )\n    .unwrap(),\n  ]\n}\n\n#[test]\nfn test_dleq() {\n  let generators = generators();\n  let transcript = || RecommendedTranscript::new(b\"DLEq Proof Test\");\n\n  for i in 0 .. 5 {\n    let key = Zeroizing::new(Scalar::random(&mut OsRng));\n    let proof = DLEqProof::prove(&mut OsRng, &mut transcript(), &generators[.. i], &key);\n\n    let mut keys = [ProjectivePoint::GENERATOR; 5];\n    for k in 0 .. 5 {\n      keys[k] = generators[k] * key.deref();\n    }\n    proof.verify(&mut transcript(), &generators[.. i], &keys[.. i]).unwrap();\n    // Different challenge\n    assert!(proof\n      .verify(\n        &mut RecommendedTranscript::new(b\"different challenge\"),\n        &generators[.. i],\n        &keys[.. i]\n      )\n      .is_err());\n\n    // All of these following tests should effectively be a different challenge and accordingly\n    // pointless. They're still nice to have though\n\n    // We could edit these tests to always test with at least two generators\n    // Then we don't test proofs with zero/one generator(s)\n    // While those are stupid, and pointless, and potentially point to a failure in the caller,\n    // it could also be part of a dynamic system which deals with variable amounts of generators\n    // Not panicking in such use cases, even if they're inefficient, provides seamless behavior\n    if i >= 2 {\n      // Different generators\n      assert!(proof\n        .verify(\n          &mut transcript(),\n          generators[.. i].iter().copied().rev().collect::<Vec<_>>().as_ref(),\n          &keys[.. i]\n        )\n        .is_err());\n      // Different keys\n      assert!(proof\n        .verify(\n          &mut transcript(),\n          &generators[.. i],\n          keys[.. i].iter().copied().rev().collect::<Vec<_>>().as_ref()\n        )\n        .is_err());\n    }\n\n    #[cfg(feature = \"serialize\")]\n    {\n      let mut buf = vec![];\n      proof.write(&mut buf).unwrap();\n      let deserialized = DLEqProof::<ProjectivePoint>::read::<&[u8]>(&mut buf.as_ref()).unwrap();\n      assert_eq!(proof, deserialized);\n    }\n  }\n}\n\n#[test]\nfn test_multi_dleq() {\n  let generators = generators();\n  let transcript = || RecommendedTranscript::new(b\"MultiDLEq Proof Test\");\n\n  // Test up to 3 keys\n  for k in 0 ..= 3 {\n    let mut keys = vec![];\n    let mut these_generators = vec![];\n    let mut pub_keys = vec![];\n    for i in 0 .. k {\n      let key = Zeroizing::new(Scalar::random(&mut OsRng));\n      // For each key, test a variable set of generators\n      // 0: 0\n      // 1: 1, 2\n      // 2: 2, 3, 4\n      let key_generators = generators[i ..= (i + i)].to_vec();\n      let mut these_pub_keys = vec![];\n      for generator in &key_generators {\n        these_pub_keys.push(generator * key.deref());\n      }\n      keys.push(key);\n      these_generators.push(key_generators);\n      pub_keys.push(these_pub_keys);\n    }\n\n    let proof = MultiDLEqProof::prove(&mut OsRng, &mut transcript(), &these_generators, &keys);\n\n    proof.verify(&mut transcript(), &these_generators, &pub_keys).unwrap();\n    // Different challenge\n    assert!(proof\n      .verify(&mut RecommendedTranscript::new(b\"different challenge\"), &these_generators, &pub_keys)\n      .is_err());\n\n    // Test verifying for a different amount of keys fail\n    if k > 0 {\n      assert!(proof.verify(&mut transcript(), &these_generators, &pub_keys[.. k - 1]).is_err());\n    }\n\n    #[cfg(feature = \"serialize\")]\n    {\n      let mut buf = vec![];\n      proof.write(&mut buf).unwrap();\n      let deserialized =\n        MultiDLEqProof::<ProjectivePoint>::read::<&[u8]>(&mut buf.as_ref(), k).unwrap();\n      assert_eq!(proof, deserialized);\n    }\n  }\n}\n"
  },
  {
    "path": "crypto/ed448/Cargo.toml",
    "content": "[package]\nname = \"minimal-ed448\"\nversion = \"0.4.2\"\ndescription = \"Unaudited, inefficient implementation of Ed448 in Rust\"\nlicense = \"MIT\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/crypto/ed448\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nkeywords = [\"ed448\", \"ff\", \"group\"]\nedition = \"2021\"\nrust-version = \"1.65\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nrustversion = \"1\"\n\nrand_core = { version = \"0.6\", default-features = false }\n\nzeroize = { version = \"^1.5\", default-features = false, features = [\"zeroize_derive\"] }\nsubtle = { version = \"^2.4\", default-features = false }\n\nsha3 = { version = \"0.10\", default-features = false }\n\nff = { version = \"0.13\", default-features = false, features = [\"bits\"] }\ngroup = { version = \"0.13\", default-features = false }\nciphersuite = { path = \"../ciphersuite\", default-features = false }\n\ngeneric-array = { version = \"1\", default-features = false }\ncrypto-bigint = { version = \"0.5\", default-features = false, features = [\"zeroize\"] }\n\n[dev-dependencies]\nhex = { version = \"0.4\", default-features = false, features = [\"std\"] }\n\nrand_core = { version = \"0.6\", default-features = false, features = [\"std\"] }\n\nff-group-tests = { path = \"../ff-group-tests\" }\n\n[features]\nalloc = [\"zeroize/alloc\", \"ciphersuite/alloc\"]\nstd = [\"alloc\", \"rand_core/std\", \"zeroize/std\", \"subtle/std\", \"sha3/std\", \"ff/std\", \"ciphersuite/std\"]\ndefault = [\"std\"]\n"
  },
  {
    "path": "crypto/ed448/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2022-2023 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "crypto/ed448/README.md",
    "content": "# Minimal Ed448\n\nBarebones implementation of Ed448 bound to the ff/group API, rejecting torsion\nto achieve a PrimeGroup definition.\n\nThis library has not been audited. While it is complete, and decently tested,\nany usage of it should be carefully considered.\n\nconstant time and no_std.\n"
  },
  {
    "path": "crypto/ed448/src/backend.rs",
    "content": "use zeroize::Zeroize;\n\n// Use black_box when possible\n#[rustversion::since(1.66)]\nmod black_box {\n  pub(crate) fn black_box<T>(val: T) -> T {\n    #[allow(clippy::incompatible_msrv)]\n    core::hint::black_box(val)\n  }\n}\n#[rustversion::before(1.66)]\nmod black_box {\n  pub(crate) fn black_box<T>(val: T) -> T {\n    val\n  }\n}\nuse black_box::black_box;\n\npub(crate) fn u8_from_bool(bit_ref: &mut bool) -> u8 {\n  let bit_ref = black_box(bit_ref);\n\n  let mut bit = black_box(*bit_ref);\n  #[allow(clippy::cast_lossless)]\n  let res = black_box(bit as u8);\n  bit.zeroize();\n  debug_assert!((res | 1) == 1);\n\n  bit_ref.zeroize();\n  res\n}\n\nmacro_rules! math_op {\n  (\n    $Value: ident,\n    $Other: ident,\n    $Op: ident,\n    $op_fn: ident,\n    $Assign: ident,\n    $assign_fn: ident,\n    $function: expr\n  ) => {\n    impl $Op<$Other> for $Value {\n      type Output = $Value;\n      fn $op_fn(self, other: $Other) -> Self::Output {\n        $Value($function(self.0, other.0))\n      }\n    }\n    impl $Assign<$Other> for $Value {\n      fn $assign_fn(&mut self, other: $Other) {\n        self.0 = $function(self.0, other.0);\n      }\n    }\n    impl<'a> $Op<&'a $Other> for $Value {\n      type Output = $Value;\n      fn $op_fn(self, other: &'a $Other) -> Self::Output {\n        $Value($function(self.0, other.0))\n      }\n    }\n    impl<'a> $Assign<&'a $Other> for $Value {\n      fn $assign_fn(&mut self, other: &'a $Other) {\n        self.0 = $function(self.0, other.0);\n      }\n    }\n  };\n}\n\nmacro_rules! from_wrapper {\n  ($wrapper: ident, $inner: ident, $uint: ident) => {\n    impl From<$uint> for $wrapper {\n      fn from(a: $uint) -> $wrapper {\n        $wrapper(Residue::new(&$inner::from(a)))\n      }\n    }\n  };\n}\n\nmacro_rules! field {\n  (\n    $FieldName: ident,\n    $ResidueType: ident,\n\n    $MODULUS_STR: ident,\n    $MODULUS: ident,\n    $WIDE_MODULUS: ident,\n\n    $NUM_BITS: literal,\n\n    $MULTIPLICATIVE_GENERATOR: literal,\n    $DELTA: expr,\n  ) => {\n    use core::{\n      ops::{Add, AddAssign, Neg, Sub, SubAssign, Mul, MulAssign},\n      iter::{Sum, Product},\n    };\n\n    use subtle::{Choice, CtOption, ConstantTimeEq, ConstantTimeLess, ConditionallySelectable};\n    use rand_core::RngCore;\n\n    use generic_array::{typenum::U57, GenericArray};\n    use crypto_bigint::{Integer, NonZero, Encoding, impl_modulus};\n\n    use ff::{Field, PrimeField, FieldBits, PrimeFieldBits, helpers::sqrt_ratio_generic};\n\n    use $crate::backend::u8_from_bool;\n\n    fn reduce(x: U896) -> U448 {\n      U448::from_le_slice(&x.rem(&NonZero::new($WIDE_MODULUS).unwrap()).to_le_bytes()[.. 56])\n    }\n\n    impl ConstantTimeEq for $FieldName {\n      fn ct_eq(&self, other: &Self) -> Choice {\n        self.0.ct_eq(&other.0)\n      }\n    }\n\n    impl ConditionallySelectable for $FieldName {\n      fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {\n        $FieldName(Residue::conditional_select(&a.0, &b.0, choice))\n      }\n    }\n\n    math_op!($FieldName, $FieldName, Add, add, AddAssign, add_assign, |x: $ResidueType, y| x\n      .add(&y));\n    math_op!($FieldName, $FieldName, Sub, sub, SubAssign, sub_assign, |x: $ResidueType, y| x\n      .sub(&y));\n    math_op!($FieldName, $FieldName, Mul, mul, MulAssign, mul_assign, |x: $ResidueType, y| x\n      .mul(&y));\n\n    from_wrapper!($FieldName, U448, u8);\n    from_wrapper!($FieldName, U448, u16);\n    from_wrapper!($FieldName, U448, u32);\n    from_wrapper!($FieldName, U448, u64);\n    from_wrapper!($FieldName, U448, u128);\n\n    impl Neg for $FieldName {\n      type Output = $FieldName;\n      fn neg(self) -> $FieldName {\n        $FieldName(self.0.neg())\n      }\n    }\n\n    impl<'a> Neg for &'a $FieldName {\n      type Output = $FieldName;\n      fn neg(self) -> Self::Output {\n        (*self).neg()\n      }\n    }\n\n    impl $FieldName {\n      /// Perform an exponentiation.\n      pub fn pow(&self, other: $FieldName) -> $FieldName {\n        let mut table = [$FieldName(Residue::ONE); 16];\n        table[1] = *self;\n        for i in 2 .. 16 {\n          table[i] = table[i - 1] * self;\n        }\n\n        let mut res = $FieldName(Residue::ONE);\n        let mut bits = 0;\n        for (i, mut bit) in other.to_le_bits().iter_mut().rev().enumerate() {\n          bits <<= 1;\n          let mut bit = u8_from_bool(&mut bit);\n          bits |= bit;\n          bit.zeroize();\n\n          if ((i + 1) % 4) == 0 {\n            if i != 3 {\n              for _ in 0 .. 4 {\n                res *= res;\n              }\n            }\n\n            let mut scale_by = $FieldName(Residue::ONE);\n            #[allow(clippy::needless_range_loop)]\n            for i in 0 .. 16 {\n              #[allow(clippy::cast_possible_truncation)] // Safe since 0 .. 16\n              {\n                scale_by = <_>::conditional_select(&scale_by, &table[i], bits.ct_eq(&(i as u8)));\n              }\n            }\n            res *= scale_by;\n            bits = 0;\n          }\n        }\n        res\n      }\n    }\n\n    impl Field for $FieldName {\n      const ZERO: Self = $FieldName(Residue::ZERO);\n      const ONE: Self = $FieldName(Residue::ONE);\n\n      fn random(mut rng: impl RngCore) -> Self {\n        let mut bytes = [0; 112];\n        rng.fill_bytes(&mut bytes);\n        $FieldName(Residue::new(&reduce(U896::from_le_slice(bytes.as_ref()))))\n      }\n\n      fn square(&self) -> Self {\n        *self * self\n      }\n      fn double(&self) -> Self {\n        *self + self\n      }\n\n      fn invert(&self) -> CtOption<Self> {\n        const NEG_2: $FieldName =\n          $FieldName($ResidueType::sub(&$ResidueType::ZERO, &$ResidueType::new(&U448::from_u8(2))));\n        CtOption::new(self.pow(NEG_2), !self.is_zero())\n      }\n\n      fn sqrt(&self) -> CtOption<Self> {\n        const MOD_1_4: $FieldName = $FieldName($ResidueType::new(\n          &$MODULUS.saturating_add(&U448::ONE).wrapping_div(&U448::from_u8(4)),\n        ));\n\n        let res = self.pow(MOD_1_4);\n        CtOption::new(res, res.square().ct_eq(self))\n      }\n\n      fn sqrt_ratio(num: &Self, div: &Self) -> (Choice, Self) {\n        sqrt_ratio_generic(num, div)\n      }\n    }\n\n    impl PrimeField for $FieldName {\n      type Repr = GenericArray<u8, U57>;\n\n      const MODULUS: &'static str = $MODULUS_STR;\n\n      const NUM_BITS: u32 = $NUM_BITS;\n      const CAPACITY: u32 = $NUM_BITS - 1;\n\n      const TWO_INV: Self = $FieldName($ResidueType::new(&U448::from_u8(2)).invert().0);\n\n      const MULTIPLICATIVE_GENERATOR: Self =\n        $FieldName(Residue::new(&U448::from_u8($MULTIPLICATIVE_GENERATOR)));\n      // True for both the Ed448 Scalar field and FieldElement field\n      const S: u32 = 1;\n\n      // Both fields have their root of unity as -1\n      const ROOT_OF_UNITY: Self =\n        $FieldName($ResidueType::sub(&$ResidueType::ZERO, &$ResidueType::new(&U448::ONE)));\n      const ROOT_OF_UNITY_INV: Self = $FieldName(Self::ROOT_OF_UNITY.0.invert().0);\n\n      const DELTA: Self = $FieldName(Residue::new(&U448::from_le_hex($DELTA)));\n\n      fn from_repr(bytes: Self::Repr) -> CtOption<Self> {\n        let res = U448::from_le_slice(&bytes[.. 56]);\n        CtOption::new($FieldName(Residue::new(&res)), res.ct_lt(&$MODULUS) & bytes[56].ct_eq(&0))\n      }\n      fn to_repr(&self) -> Self::Repr {\n        let mut repr = GenericArray::<u8, U57>::default();\n        repr[.. 56].copy_from_slice(&self.0.retrieve().to_le_bytes());\n        repr\n      }\n\n      fn is_odd(&self) -> Choice {\n        self.0.retrieve().is_odd()\n      }\n    }\n\n    impl PrimeFieldBits for $FieldName {\n      type ReprBits = [u8; 56];\n\n      fn to_le_bits(&self) -> FieldBits<Self::ReprBits> {\n        let mut repr = [0; 56];\n        repr.copy_from_slice(&self.to_repr()[.. 56]);\n        repr.into()\n      }\n\n      fn char_le_bits() -> FieldBits<Self::ReprBits> {\n        let mut repr = [0; 56];\n        repr.copy_from_slice(&MODULUS.to_le_bytes()[.. 56]);\n        repr.into()\n      }\n    }\n\n    impl Sum<$FieldName> for $FieldName {\n      fn sum<I: Iterator<Item = $FieldName>>(iter: I) -> $FieldName {\n        let mut res = $FieldName::ZERO;\n        for item in iter {\n          res += item;\n        }\n        res\n      }\n    }\n\n    impl<'a> Sum<&'a $FieldName> for $FieldName {\n      fn sum<I: Iterator<Item = &'a $FieldName>>(iter: I) -> $FieldName {\n        iter.cloned().sum()\n      }\n    }\n\n    impl Product<$FieldName> for $FieldName {\n      fn product<I: Iterator<Item = $FieldName>>(iter: I) -> $FieldName {\n        let mut res = $FieldName::ONE;\n        for item in iter {\n          res *= item;\n        }\n        res\n      }\n    }\n\n    impl<'a> Product<&'a $FieldName> for $FieldName {\n      fn product<I: Iterator<Item = &'a $FieldName>>(iter: I) -> $FieldName {\n        iter.cloned().product()\n      }\n    }\n  };\n}\n"
  },
  {
    "path": "crypto/ed448/src/ciphersuite.rs",
    "content": "use zeroize::Zeroize;\n\nuse sha3::{\n  digest::{\n    typenum::U114, core_api::BlockSizeUser, Update, Output, OutputSizeUser, FixedOutput,\n    ExtendableOutput, XofReader, HashMarker, Digest,\n  },\n  Shake256,\n};\n\nuse group::Group;\nuse crate::{Scalar, Point};\n\nuse ciphersuite::Ciphersuite;\n\n/// Shake256, fixed to a 114-byte output, as used by Ed448.\n#[derive(Clone, Default)]\npub struct Shake256_114(Shake256);\nimpl BlockSizeUser for Shake256_114 {\n  type BlockSize = <Shake256 as BlockSizeUser>::BlockSize;\n  fn block_size() -> usize {\n    Shake256::block_size()\n  }\n}\nimpl OutputSizeUser for Shake256_114 {\n  type OutputSize = U114;\n  fn output_size() -> usize {\n    114\n  }\n}\nimpl Update for Shake256_114 {\n  fn update(&mut self, data: &[u8]) {\n    self.0.update(data);\n  }\n  fn chain(mut self, data: impl AsRef<[u8]>) -> Self {\n    Update::update(&mut self, data.as_ref());\n    self\n  }\n}\nimpl FixedOutput for Shake256_114 {\n  fn finalize_fixed(self) -> Output<Self> {\n    let mut res = Default::default();\n    FixedOutput::finalize_into(self, &mut res);\n    res\n  }\n  fn finalize_into(self, out: &mut Output<Self>) {\n    let mut reader = self.0.finalize_xof();\n    reader.read(out);\n  }\n}\nimpl HashMarker for Shake256_114 {}\n\n/// Ciphersuite for Ed448, inspired by RFC-8032. This is not recommended for usage.\n///\n/// hash_to_F is implemented with a naive concatenation of the dst and data, allowing transposition\n/// between the two. This means `dst: b\"abc\", data: b\"def\"`, will produce the same scalar as\n/// `dst: \"abcdef\", data: b\"\"`. Please use carefully, not letting dsts be substrings of each other.\n#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]\npub struct Ed448;\nimpl Ciphersuite for Ed448 {\n  type F = Scalar;\n  type G = Point;\n  type H = Shake256_114;\n\n  const ID: &'static [u8] = b\"ed448\";\n\n  fn generator() -> Self::G {\n    Point::generator()\n  }\n\n  fn hash_to_F(dst: &[u8], data: &[u8]) -> Self::F {\n    Scalar::wide_reduce(Self::H::digest([dst, data].concat()).as_ref().try_into().unwrap())\n  }\n}\n\n#[test]\nfn test_ed448() {\n  use ff::PrimeField;\n\n  ff_group_tests::group::test_prime_group_bits::<_, Point>(&mut rand_core::OsRng);\n\n  // Ideally, a test vector from RFC-8032 (not FROST) would be here\n  // Unfortunately, the IETF draft doesn't provide any vectors for the derived challenges\n  assert_eq!(\n    Ed448::hash_to_F(\n      b\"FROST-ED448-SHAKE256-v11nonce\",\n      &hex::decode(\n        \"\\\n89bf16040081ff2990336b200613787937ebe1f024b8cdff90eb6f1c741d91c1\\\n4a2b2f5858a932ad3d3b18bd16e76ced3070d72fd79ae4402df201f5\\\n25e754716a1bc1b87a502297f2a99d89ea054e0018eb55d39562fd01\\\n00\"\n      )\n      .unwrap()\n    )\n    .to_repr()\n    .to_vec(),\n    hex::decode(\n      \"\\\n67a6f023e77361707c6e894c625e809e80f33fdb310810053ae29e28\\\ne7011f3193b9020e73c183a98cc3a519160ed759376dd92c94831622\\\n00\"\n    )\n    .unwrap()\n  );\n}\n"
  },
  {
    "path": "crypto/ed448/src/field.rs",
    "content": "use zeroize::{DefaultIsZeroes, Zeroize};\n\nuse crypto_bigint::{\n  U448, U896,\n  modular::constant_mod::{ResidueParams, Residue},\n};\n\nconst MODULUS_STR: &str = concat!(\n  \"fffffffffffffffffffffffffffffffffffffffffffffffffffffffe\",\n  \"ffffffffffffffffffffffffffffffffffffffffffffffffffffffff\",\n);\n\nimpl_modulus!(FieldModulus, U448, MODULUS_STR);\npub(crate) type ResidueType = Residue<FieldModulus, { FieldModulus::LIMBS }>;\n\n/// Ed448 field element.\n#[derive(Clone, Copy, PartialEq, Eq, Default, Debug)]\npub struct FieldElement(pub(crate) ResidueType);\n\nimpl DefaultIsZeroes for FieldElement {}\n\n// 2**448 - 2**224 - 1\npub(crate) const MODULUS: U448 = U448::from_be_hex(MODULUS_STR);\n\nconst WIDE_MODULUS: U896 = U896::from_be_hex(concat!(\n  \"00000000000000000000000000000000000000000000000000000000\",\n  \"00000000000000000000000000000000000000000000000000000000\",\n  \"fffffffffffffffffffffffffffffffffffffffffffffffffffffffe\",\n  \"ffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"\n));\n\npub(crate) const Q_4: FieldElement = FieldElement(ResidueType::new(\n  &MODULUS.saturating_add(&U448::ONE).wrapping_div(&U448::from_u8(4)),\n));\n\nfield!(\n  FieldElement,\n  ResidueType,\n  MODULUS_STR,\n  MODULUS,\n  WIDE_MODULUS,\n  448,\n  7,\n  concat!(\n    \"31000000000000000000000000000000000000000000000000000000\",\n    \"00000000000000000000000000000000000000000000000000000000\",\n  ),\n);\n\n#[test]\nfn test_field() {\n  ff_group_tests::prime_field::test_prime_field_bits::<_, FieldElement>(&mut rand_core::OsRng);\n}\n"
  },
  {
    "path": "crypto/ed448/src/lib.rs",
    "content": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![doc = include_str!(\"../README.md\")]\n#![no_std]\n#![allow(clippy::redundant_closure_call)]\n\n#[macro_use]\nmod backend;\n\nmod scalar;\npub use scalar::Scalar;\n\nmod field;\npub use field::FieldElement;\n\nmod point;\npub use point::Point;\n\nmod ciphersuite;\npub use crate::ciphersuite::Ed448;\n"
  },
  {
    "path": "crypto/ed448/src/point.rs",
    "content": "use core::{\n  ops::{Add, AddAssign, Neg, Sub, SubAssign, Mul, MulAssign},\n  iter::Sum,\n};\n\nuse rand_core::RngCore;\n\nuse zeroize::Zeroize;\nuse subtle::{Choice, CtOption, ConstantTimeEq, ConditionallySelectable, ConditionallyNegatable};\n\nuse crypto_bigint::{U448, modular::constant_mod::Residue};\n\nuse group::{\n  ff::{Field, PrimeField, PrimeFieldBits},\n  Group, GroupEncoding,\n  prime::PrimeGroup,\n};\n\nuse crate::{\n  backend::u8_from_bool,\n  scalar::Scalar,\n  field::{ResidueType, FieldElement, Q_4},\n};\n\nconst D: FieldElement =\n  FieldElement(ResidueType::sub(&ResidueType::ZERO, &Residue::new(&U448::from_u16(39081))));\n\nconst G_Y: FieldElement = FieldElement(Residue::new(&U448::from_be_hex(concat!(\n  \"693f46716eb6bc248876203756c9c7624bea73736ca3984087789c1e\",\n  \"05a0c2d73ad3ff1ce67c39c4fdbd132c4ed7c8ad9808795bf230fa14\",\n))));\n\nconst G_X: FieldElement = FieldElement(Residue::new(&U448::from_be_hex(concat!(\n  \"4f1970c66bed0ded221d15a622bf36da9e146570470f1767ea6de324\",\n  \"a3d3a46412ae1af72ab66511433b80e18b00938e2626a82bc70cc05e\",\n))));\n\nfn recover_x(y: FieldElement) -> CtOption<FieldElement> {\n  let ysq = y.square();\n  #[allow(non_snake_case)]\n  let D_ysq = D * ysq;\n  (D_ysq - FieldElement::ONE).invert().and_then(|inverted| {\n    let temp = (ysq - FieldElement::ONE) * inverted;\n    let mut x = temp.pow(Q_4);\n    x.conditional_negate(x.is_odd());\n\n    let xsq = x.square();\n    CtOption::new(x, (xsq + ysq).ct_eq(&(FieldElement::ONE + (xsq * D_ysq))))\n  })\n}\n\n/// Ed448 point.\n#[derive(Clone, Copy, Debug)]\npub struct Point {\n  x: FieldElement,\n  y: FieldElement,\n  z: FieldElement,\n}\n\nimpl Zeroize for Point {\n  fn zeroize(&mut self) {\n    self.x.zeroize();\n    self.y.zeroize();\n    self.z.zeroize();\n    let identity = Self::identity();\n    self.x = identity.x;\n    self.y = identity.y;\n    self.z = identity.z;\n  }\n}\n\nconst G: Point = Point { x: G_X, y: G_Y, z: FieldElement::ONE };\n\nimpl ConstantTimeEq for Point {\n  fn ct_eq(&self, other: &Self) -> Choice {\n    let x1 = self.x * other.z;\n    let x2 = other.x * self.z;\n\n    let y1 = self.y * other.z;\n    let y2 = other.y * self.z;\n\n    x1.ct_eq(&x2) & y1.ct_eq(&y2)\n  }\n}\n\nimpl PartialEq for Point {\n  fn eq(&self, other: &Point) -> bool {\n    self.ct_eq(other).into()\n  }\n}\n\nimpl Eq for Point {}\n\nimpl ConditionallySelectable for Point {\n  fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {\n    Point {\n      x: FieldElement::conditional_select(&a.x, &b.x, choice),\n      y: FieldElement::conditional_select(&a.y, &b.y, choice),\n      z: FieldElement::conditional_select(&a.z, &b.z, choice),\n    }\n  }\n}\n\nimpl Add for Point {\n  type Output = Point;\n  fn add(self, other: Self) -> Self {\n    // 12 muls, 7 additions, 4 negations\n    let xcp = self.x * other.x;\n    let ycp = self.y * other.y;\n    let zcp = self.z * other.z;\n    #[allow(non_snake_case)]\n    let B = zcp.square();\n    #[allow(non_snake_case)]\n    let E = D * xcp * ycp;\n    #[allow(non_snake_case)]\n    let F = B - E;\n    #[allow(non_snake_case)]\n    let G_ = B + E;\n\n    Point {\n      x: zcp * F * ((self.x + self.y) * (other.x + other.y) - xcp - ycp),\n      y: zcp * G_ * (ycp - xcp),\n      z: F * G_,\n    }\n  }\n}\n\nimpl AddAssign for Point {\n  fn add_assign(&mut self, other: Point) {\n    *self = *self + other;\n  }\n}\n\nimpl Add<&Point> for Point {\n  type Output = Point;\n  fn add(self, other: &Point) -> Point {\n    self + *other\n  }\n}\n\nimpl AddAssign<&Point> for Point {\n  fn add_assign(&mut self, other: &Point) {\n    *self += *other;\n  }\n}\n\nimpl Neg for Point {\n  type Output = Point;\n  fn neg(self) -> Self {\n    Point { x: -self.x, y: self.y, z: self.z }\n  }\n}\n\nimpl Sub for Point {\n  type Output = Point;\n  #[allow(clippy::suspicious_arithmetic_impl)]\n  fn sub(self, other: Self) -> Self {\n    self + other.neg()\n  }\n}\n\nimpl SubAssign for Point {\n  fn sub_assign(&mut self, other: Point) {\n    *self = *self - other;\n  }\n}\n\nimpl Sub<&Point> for Point {\n  type Output = Point;\n  fn sub(self, other: &Point) -> Point {\n    self - *other\n  }\n}\n\nimpl SubAssign<&Point> for Point {\n  fn sub_assign(&mut self, other: &Point) {\n    *self -= *other;\n  }\n}\n\nimpl Group for Point {\n  type Scalar = Scalar;\n  fn random(mut rng: impl RngCore) -> Self {\n    loop {\n      let mut bytes = FieldElement::random(&mut rng).to_repr();\n      let mut_ref: &mut [u8] = bytes.as_mut();\n      mut_ref[56] |= u8::try_from(rng.next_u32() % 2).unwrap() << 7;\n      let opt = Self::from_bytes(&bytes);\n      if opt.is_some().into() {\n        return opt.unwrap();\n      }\n    }\n  }\n  fn identity() -> Self {\n    Point { x: FieldElement::ZERO, y: FieldElement::ONE, z: FieldElement::ONE }\n  }\n  fn generator() -> Self {\n    G\n  }\n  fn is_identity(&self) -> Choice {\n    self.ct_eq(&Self::identity())\n  }\n  fn double(&self) -> Self {\n    // 7 muls, 7 additions, 4 negations\n    let xsq = self.x.square();\n    let ysq = self.y.square();\n    let zsq = self.z.square();\n    let xy = self.x + self.y;\n    #[allow(non_snake_case)]\n    let F = xsq + ysq;\n    #[allow(non_snake_case)]\n    let J = F - zsq.double();\n    Point { x: J * (xy.square() - xsq - ysq), y: F * (xsq - ysq), z: F * J }\n  }\n}\n\nimpl Sum<Point> for Point {\n  fn sum<I: Iterator<Item = Point>>(iter: I) -> Point {\n    let mut res = Self::identity();\n    for i in iter {\n      res += i;\n    }\n    res\n  }\n}\n\nimpl<'a> Sum<&'a Point> for Point {\n  fn sum<I: Iterator<Item = &'a Point>>(iter: I) -> Point {\n    Point::sum(iter.copied())\n  }\n}\n\nimpl Mul<Scalar> for Point {\n  type Output = Point;\n  fn mul(self, mut other: Scalar) -> Point {\n    // Precompute the optimal amount that's a multiple of 2\n    let mut table = [Point::identity(); 16];\n    table[1] = self;\n    for i in 2 .. 16 {\n      table[i] = table[i - 1] + self;\n    }\n\n    let mut res = Self::identity();\n    let mut bits = 0;\n    for (i, mut bit) in other.to_le_bits().iter_mut().rev().enumerate() {\n      bits <<= 1;\n      let mut bit = u8_from_bool(&mut bit);\n      bits |= bit;\n      bit.zeroize();\n\n      if ((i + 1) % 4) == 0 {\n        if i != 3 {\n          for _ in 0 .. 4 {\n            res = res.double();\n          }\n        }\n\n        let mut add_by = Point::identity();\n        #[allow(clippy::needless_range_loop)]\n        for i in 0 .. 16 {\n          #[allow(clippy::cast_possible_truncation)] // Safe since 0 .. 16\n          {\n            add_by = <_>::conditional_select(&add_by, &table[i], bits.ct_eq(&(i as u8)));\n          }\n        }\n        res += add_by;\n        bits = 0;\n      }\n    }\n    other.zeroize();\n    res\n  }\n}\n\nimpl MulAssign<Scalar> for Point {\n  fn mul_assign(&mut self, other: Scalar) {\n    *self = *self * other;\n  }\n}\n\nimpl Mul<&Scalar> for Point {\n  type Output = Point;\n  fn mul(self, other: &Scalar) -> Point {\n    self * *other\n  }\n}\n\nimpl MulAssign<&Scalar> for Point {\n  fn mul_assign(&mut self, other: &Scalar) {\n    *self *= *other;\n  }\n}\n\nimpl Point {\n  fn is_torsion_free(&self) -> Choice {\n    ((*self * (Scalar::ZERO - Scalar::ONE)) + self).is_identity()\n  }\n}\n\nimpl GroupEncoding for Point {\n  type Repr = <FieldElement as PrimeField>::Repr;\n\n  fn from_bytes(bytes: &Self::Repr) -> CtOption<Self> {\n    // Extract and clear the sign bit\n    let sign = Choice::from(bytes[56] >> 7);\n    let mut bytes = *bytes;\n    let mut_ref: &mut [u8] = bytes.as_mut();\n    mut_ref[56] &= !(1 << 7);\n\n    // Parse y, recover x\n    FieldElement::from_repr(bytes).and_then(|y| {\n      recover_x(y).and_then(|mut x| {\n        x.conditional_negate(x.is_odd().ct_eq(&!sign));\n        let not_negative_zero = !(x.is_zero() & sign);\n        let point = Point { x, y, z: FieldElement::ONE };\n        CtOption::new(point, not_negative_zero & point.is_torsion_free())\n      })\n    })\n  }\n\n  fn from_bytes_unchecked(bytes: &Self::Repr) -> CtOption<Self> {\n    Point::from_bytes(bytes)\n  }\n\n  fn to_bytes(&self) -> Self::Repr {\n    let z = self.z.invert().unwrap();\n    let x = self.x * z;\n    let y = self.y * z;\n\n    let mut bytes = y.to_repr();\n    let mut_ref: &mut [u8] = bytes.as_mut();\n    mut_ref[56] |= x.is_odd().unwrap_u8() << 7;\n    bytes\n  }\n}\n\nimpl PrimeGroup for Point {}\n\n#[test]\nfn test_group() {\n  ff_group_tests::group::test_prime_group_bits::<_, Point>(&mut rand_core::OsRng);\n}\n\n#[test]\nfn generator() {\n  assert!(G.x == G_X);\n  assert!(G.y == G_Y);\n  assert!(recover_x(G.y).unwrap() == G.x);\n}\n\n#[test]\nfn torsion() {\n  use generic_array::GenericArray;\n\n  // Uses the originally suggested generator which had torsion\n  let old_y = FieldElement::from_repr(*GenericArray::from_slice(\n    &hex::decode(\n      \"\\\n12796c1532041525945f322e414d434467cfd5c57c9a9af2473b2775\\\n8c921c4828b277ca5f2891fc4f3d79afdf29a64c72fb28b59c16fa51\\\n00\",\n    )\n    .unwrap(),\n  ))\n  .unwrap();\n  let old = Point { x: -recover_x(old_y).unwrap(), y: old_y, z: FieldElement::ONE };\n  assert!(bool::from(!old.is_torsion_free()));\n}\n\n#[test]\nfn vector() {\n  use generic_array::GenericArray;\n\n  assert_eq!(\n    Point::generator().double(),\n    Point::from_bytes(GenericArray::from_slice(\n      &hex::decode(\n        \"\\\ned8693eacdfbeada6ba0cdd1beb2bcbb98302a3a8365650db8c4d88a\\\n726de3b7d74d8835a0d76e03b0c2865020d659b38d04d74a63e905ae\\\n80\"\n      )\n      .unwrap()\n    ))\n    .unwrap()\n  );\n\n  assert_eq!(\n    Point::generator() *\n      Scalar::from_repr(*GenericArray::from_slice(\n        &hex::decode(\n          \"\\\n6298e1eef3c379392caaed061ed8a31033c9e9e3420726f23b404158\\\na401cd9df24632adfe6b418dc942d8a091817dd8bd70e1c72ba52f3c\\\n00\"\n        )\n        .unwrap()\n      ))\n      .unwrap(),\n    Point::from_bytes(GenericArray::from_slice(\n      &hex::decode(\n        \"\\\n3832f82fda00ff5365b0376df705675b63d2a93c24c6e81d40801ba2\\\n65632be10f443f95968fadb70d10786827f30dc001c8d0f9b7c1d1b0\\\n00\"\n      )\n      .unwrap()\n    ))\n    .unwrap()\n  );\n}\n\n// Checks random won't infinitely loop\n#[test]\nfn random() {\n  Point::random(&mut rand_core::OsRng);\n}\n"
  },
  {
    "path": "crypto/ed448/src/scalar.rs",
    "content": "use zeroize::{DefaultIsZeroes, Zeroize};\n\nuse crypto_bigint::{\n  U448, U896, U1024,\n  modular::constant_mod::{ResidueParams, Residue},\n};\n\nconst MODULUS_STR: &str = concat!(\n  \"3fffffffffffffffffffffffffffffffffffffffffffffffffffffff\",\n  \"7cca23e9c44edb49aed63690216cc2728dc58f552378c292ab5844f3\",\n);\n\nimpl_modulus!(ScalarModulus, U448, MODULUS_STR);\ntype ResidueType = Residue<ScalarModulus, { ScalarModulus::LIMBS }>;\n\n/// Ed448 Scalar field element.\n#[derive(Clone, Copy, PartialEq, Eq, Default, Debug)]\npub struct Scalar(pub(crate) ResidueType);\n\nimpl DefaultIsZeroes for Scalar {}\n\n// 2**446 - 13818066809895115352007386748515426880336692474882178609894547503885\npub(crate) const MODULUS: U448 = U448::from_be_hex(MODULUS_STR);\n\nconst WIDE_MODULUS: U896 = U896::from_be_hex(concat!(\n  \"00000000000000000000000000000000000000000000000000000000\",\n  \"00000000000000000000000000000000000000000000000000000000\",\n  \"3fffffffffffffffffffffffffffffffffffffffffffffffffffffff\",\n  \"7cca23e9c44edb49aed63690216cc2728dc58f552378c292ab5844f3\",\n));\n\nconst WIDE_REDUCTION_MODULUS: NonZero<U1024> = NonZero::from_uint(U1024::from_be_hex(concat!(\n  \"00000000000000000000000000000000\",\n  \"00000000000000000000000000000000000000000000000000000000\",\n  \"00000000000000000000000000000000000000000000000000000000\",\n  \"3fffffffffffffffffffffffffffffffffffffffffffffffffffffff\",\n  \"7cca23e9c44edb49aed63690216cc2728dc58f552378c292ab5844f3\",\n)));\n\nfield!(\n  Scalar,\n  ResidueType,\n  MODULUS_STR,\n  MODULUS,\n  WIDE_MODULUS,\n  446,\n  2,\n  concat!(\n    \"04000000000000000000000000000000000000000000000000000000\",\n    \"00000000000000000000000000000000000000000000000000000000\",\n  ),\n);\n\nimpl Scalar {\n  /// Perform a wide reduction to obtain a non-biased Scalar.\n  pub fn wide_reduce(bytes: [u8; 114]) -> Scalar {\n    let mut bytes_128 = [0; 128];\n    bytes_128[.. 114].copy_from_slice(&bytes);\n    let wide = U1024::from_le_slice(&bytes_128);\n    Scalar(Residue::new(&U448::from_le_slice(\n      &wide.rem(&WIDE_REDUCTION_MODULUS).to_le_bytes()[.. 56],\n    )))\n  }\n}\n\n#[test]\nfn test_scalar() {\n  ff_group_tests::prime_field::test_prime_field_bits::<_, Scalar>(&mut rand_core::OsRng);\n}\n"
  },
  {
    "path": "crypto/ff-group-tests/Cargo.toml",
    "content": "[package]\nname = \"ff-group-tests\"\nversion = \"0.13.2\"\ndescription = \"A collection of sanity tests for implementors of ff/group APIs\"\nlicense = \"MIT\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/crypto/ff-group-tests\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nkeywords = [\"ff\", \"group\", \"ecc\"]\nedition = \"2021\"\nrust-version = \"1.79\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nrand_core = \"0.6\"\n\nsubtle = \"^2.4\"\n\nff = { version = \"0.13\", features = [\"bits\"] }\ngroup = \"0.13\"\n\n[dev-dependencies]\nk256 = { version = \"^0.13.1\", default-features = false, features = [\"std\", \"arithmetic\", \"bits\"] }\np256 = { version = \"^0.13.1\", default-features = false, features = [\"std\", \"arithmetic\", \"bits\"] }\n\nbls12_381 = \"0.8\"\n\npasta_curves = \"0.5\"\n"
  },
  {
    "path": "crypto/ff-group-tests/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2022-2023 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "crypto/ff-group-tests/README.md",
    "content": "# FF/Group Tests\n\nA series of sanity checks for implementors of the ff/group APIs.\n\nImplementors are assumed to be of a non-trivial size. These tests do not attempt\nto check if constant time implementations are used.\n\nThis library was\n[audited by Cypher Stack in March 2023](https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf),\nculminating in commit\n[669d2dbffc1dafb82a09d9419ea182667115df06](https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06).\nAny subsequent changes have not undergone auditing.\n"
  },
  {
    "path": "crypto/ff-group-tests/src/field.rs",
    "content": "use rand_core::RngCore;\nuse subtle::Choice;\nuse group::ff::Field;\n\n/// Perform basic tests on equality.\npub fn test_eq<F: Field>() {\n  let zero = F::ZERO;\n  let one = F::ONE;\n\n  assert!(zero != one, \"0 == 1\");\n  assert!(!bool::from(zero.ct_eq(&one)), \"0 ct_eq 1\");\n\n  assert_eq!(zero, F::ZERO, \"0 != 0\");\n  assert!(bool::from(zero.ct_eq(&F::ZERO)), \"0 !ct_eq 0\");\n\n  assert_eq!(one, F::ONE, \"1 != 1\");\n  assert!(bool::from(one.ct_eq(&F::ONE)), \"1 !ct_eq 1\");\n}\n\n/// Verify conditional selection works. Doesn't verify it's actually constant time.\npub fn test_conditional_select<F: Field>() {\n  let zero = F::ZERO;\n  let one = F::ONE;\n  assert_eq!(F::conditional_select(&zero, &one, 0.into()), zero, \"couldn't select when false\");\n  assert_eq!(F::conditional_select(&zero, &one, 1.into()), one, \"couldn't select when true\");\n}\n\n/// Perform basic tests on addition.\npub fn test_add<F: Field>() {\n  assert_eq!(F::ZERO + F::ZERO, F::ZERO, \"0 + 0 != 0\");\n  assert_eq!(F::ZERO + F::ONE, F::ONE, \"0 + 1 != 1\");\n  assert_eq!(F::ONE + F::ZERO, F::ONE, \"1 + 0 != 1\");\n  // Only PrimeField offers From<u64>\n  // Accordingly, we assume either double or addition is correct\n  // They either have to be matchingly correct or matchingly incorrect, yet we can't\n  // reliably determine that here\n  assert_eq!(F::ONE + F::ONE, F::ONE.double(), \"1 + 1 != 2\");\n}\n\n/// Perform basic tests on sum.\npub fn test_sum<F: Field>() {\n  assert_eq!((&[] as &[F]).iter().sum::<F>(), F::ZERO, \"[].sum() != 0\");\n  assert_eq!([F::ZERO].iter().sum::<F>(), F::ZERO, \"[0].sum() != 0\");\n  assert_eq!([F::ONE].iter().sum::<F>(), F::ONE, \"[1].sum() != 1\");\n\n  let two = F::ONE + F::ONE;\n  assert_eq!([F::ONE, F::ONE].iter().sum::<F>(), two, \"[1, 1].sum() != 2\");\n  assert_eq!([two, F::ONE].iter().sum::<F>(), two + F::ONE, \"[2, 1].sum() != 3\");\n  assert_eq!([two, F::ZERO, F::ONE].iter().sum::<F>(), two + F::ONE, \"[2, 0, 1].sum() != 3\");\n}\n\n/// Perform basic tests on subtraction.\npub fn test_sub<F: Field>() {\n  #[allow(clippy::eq_op)]\n  let expr = F::ZERO - F::ZERO;\n  assert_eq!(expr, F::ZERO, \"0 - 0 != 0\");\n  assert_eq!(F::ONE - F::ZERO, F::ONE, \"1 - 0 != 1\");\n  #[allow(clippy::eq_op)]\n  let expr = F::ONE - F::ONE;\n  assert_eq!(expr, F::ZERO, \"1 - 1 != 0\");\n}\n\n/// Perform basic tests on negation.\npub fn test_neg<F: Field>() {\n  assert_eq!(-F::ZERO, F::ZERO, \"-0 != 0\");\n  assert_eq!(-(-F::ONE), F::ONE, \"-(-1) != 1\");\n  assert_eq!(F::ONE + (-F::ONE), F::ZERO, \"1 + -1 != 0\");\n  assert_eq!(F::ONE - (-F::ONE), F::ONE.double(), \"1 - -1 != 2\");\n}\n\n/// Perform basic tests on multiplication.\npub fn test_mul<F: Field>() {\n  assert_eq!(F::ZERO * F::ZERO, F::ZERO, \"0 * 0 != 0\");\n  assert_eq!(F::ONE * F::ZERO, F::ZERO, \"1 * 0 != 0\");\n  assert_eq!(F::ONE * F::ONE, F::ONE, \"1 * 1 != 1\");\n  let two = F::ONE.double();\n  assert_eq!(two * (two + F::ONE), two + two + two, \"2 * 3 != 6\");\n}\n\n/// Perform basic tests on product.\npub fn test_product<F: Field>() {\n  assert_eq!((&[] as &[F]).iter().product::<F>(), F::ONE, \"[].product() != 1\");\n  assert_eq!([F::ZERO].iter().product::<F>(), F::ZERO, \"[0].product() != 0\");\n  assert_eq!([F::ONE].iter().product::<F>(), F::ONE, \"[1].product() != 1\");\n\n  assert_eq!([F::ONE, F::ONE].iter().product::<F>(), F::ONE, \"[1, 1].product() != 2\");\n  let two = F::ONE + F::ONE;\n  assert_eq!([two, F::ONE].iter().product::<F>(), two, \"[2, 1].product() != 2\");\n  assert_eq!([two, two].iter().product::<F>(), two + two, \"[2, 2].product() != 4\");\n  assert_eq!([two, two, F::ONE].iter().product::<F>(), two + two, \"[2, 2, 1].product() != 4\");\n  assert_eq!([two, F::ZERO, F::ONE].iter().product::<F>(), F::ZERO, \"[2, 0, 1].product() != 0\");\n}\n\n/// Perform basic tests on the square function.\npub fn test_square<F: Field>() {\n  assert_eq!(F::ZERO.square(), F::ZERO, \"0^2 != 0\");\n  assert_eq!(F::ONE.square(), F::ONE, \"1^2 != 1\");\n  let two = F::ONE.double();\n  assert_eq!(two.square(), two + two, \"2^2 != 4\");\n  let three = two + F::ONE;\n  assert_eq!(three.square(), three * three, \"3^2 != 9\");\n}\n\n/// Perform basic tests on the invert function.\npub fn test_invert<F: Field>() {\n  assert!(bool::from(F::ZERO.invert().is_none()), \"0.invert() is some\");\n  assert_eq!(F::ONE.invert().unwrap(), F::ONE, \"1.invert() != 1\");\n\n  let two = F::ONE.double();\n  let three = two + F::ONE;\n  assert_eq!(two * three.invert().unwrap() * three, two, \"2 * 3.invert() * 3 != 2\");\n}\n\n/// Perform basic tests on the sqrt functions.\npub fn test_sqrt<F: Field>() {\n  assert_eq!(F::ZERO.sqrt().unwrap(), F::ZERO, \"sqrt(0) != 0\");\n  assert!(\n    (F::ONE.sqrt().unwrap() == F::ONE) || (F::ONE.sqrt().unwrap() == -F::ONE),\n    \"sqrt(1) != 1\"\n  );\n\n  let mut has_root = F::ONE.double();\n  while bool::from(has_root.sqrt().is_none()) {\n    has_root += F::ONE;\n  }\n\n  // The following code doesn't assume which root is returned, yet it does assume a consistent root\n  // is returned\n  let root = has_root.sqrt().unwrap();\n  assert_eq!(root * root, has_root, \"sqrt(x)^2 != x\");\n\n  let check = |value: (_, _), expected: (_, F), msg| {\n    assert_eq!(bool::from(value.0), bool::from(expected.0), \"{msg}\");\n    assert!((value.1 == expected.1) || (value.1 == -expected.1), \"{msg}\");\n  };\n  check(\n    F::sqrt_ratio(&has_root, &F::ONE),\n    (Choice::from(1), root),\n    \"sqrt_ratio didn't return the root with a divisor of 1\",\n  );\n  check(\n    F::sqrt_ratio(&(has_root * F::ONE.double()), &F::ONE.double()),\n    (Choice::from(1), root),\n    \"sqrt_ratio didn't return the root with a divisor of 2\",\n  );\n\n  check(F::sqrt_alt(&F::ZERO), F::sqrt_ratio(&F::ZERO, &F::ONE), \"sqrt_alt(0) != sqrt_ratio(0, 1)\");\n  check(F::sqrt_alt(&F::ONE), F::sqrt_ratio(&F::ONE, &F::ONE), \"sqrt_alt(1) != sqrt_ratio(1, 1)\");\n  check(F::sqrt_alt(&has_root), (Choice::from(1), root), \"sqrt_alt(square) != (1, root)\");\n\n  // Check 0 divisors are properly implemented\n  check(\n    F::sqrt_ratio(&has_root, &F::ZERO),\n    (Choice::from(0), F::ZERO),\n    \"sqrt_ratio didn't return the right value for a 0 divisor\",\n  );\n\n  // Check non-squares are appropriately marked\n  let mut no_root = has_root + F::ONE;\n  while bool::from(no_root.sqrt().is_some()) {\n    no_root += F::ONE;\n  }\n  assert!(\n    !bool::from(F::sqrt_ratio(&no_root, &F::ONE).0),\n    \"sqrt_ratio claimed non-square had root\"\n  );\n  assert!(!bool::from(F::sqrt_alt(&no_root).0), \"sqrt_alt claimed non-square had root\");\n}\n\n/// Perform basic tests on the is_zero functions.\npub fn test_is_zero<F: Field>() {\n  assert!(bool::from(F::ZERO.is_zero()), \"0 is not 0\");\n  assert!(F::ZERO.is_zero_vartime(), \"0 is not 0\");\n}\n\n/// Perform basic tests on the cube function.\npub fn test_cube<F: Field>() {\n  assert_eq!(F::ZERO.cube(), F::ZERO, \"0^3 != 0\");\n  assert_eq!(F::ONE.cube(), F::ONE, \"1^3 != 1\");\n  let two = F::ONE.double();\n  assert_eq!(two.cube(), two * two * two, \"2^3 != 8\");\n}\n\n/// Test random.\npub fn test_random<R: RngCore, F: Field>(rng: &mut R) {\n  let a = F::random(&mut *rng);\n\n  // Run up to 128 times so small fields, which may occasionally return the same element twice,\n  // are statistically unlikely to fail\n  // Field of order 1 will always fail this test due to lack of distinct elements to sample\n  // from\n  let mut pass = false;\n  for _ in 0 .. 128 {\n    let b = F::random(&mut *rng);\n    // This test passes if a distinct element is returned at least once\n    if b != a {\n      pass = true;\n    }\n  }\n  assert!(pass, \"random always returned the same value\");\n}\n\n/// Run all tests on fields implementing Field.\npub fn test_field<R: RngCore, F: Field>(rng: &mut R) {\n  test_eq::<F>();\n  test_conditional_select::<F>();\n\n  test_add::<F>();\n  test_sum::<F>();\n\n  test_sub::<F>();\n  test_neg::<F>();\n\n  test_mul::<F>();\n  test_product::<F>();\n\n  test_square::<F>();\n  test_invert::<F>();\n  test_sqrt::<F>();\n  test_is_zero::<F>();\n\n  test_cube::<F>();\n\n  test_random::<R, F>(rng);\n}\n"
  },
  {
    "path": "crypto/ff-group-tests/src/group.rs",
    "content": "use rand_core::RngCore;\nuse group::{\n  ff::{Field, PrimeFieldBits},\n  Group,\n  prime::PrimeGroup,\n};\n\nuse crate::prime_field::{test_prime_field, test_prime_field_bits};\n\n/// Test equality.\npub fn test_eq<G: Group>() {\n  assert_eq!(G::identity(), G::identity(), \"identity != identity\");\n  assert_eq!(G::generator(), G::generator(), \"generator != generator\");\n  assert!(G::identity() != G::generator(), \"identity == generator\");\n}\n\n/// Test identity.\npub fn test_identity<G: Group>() {\n  assert!(bool::from(G::identity().is_identity()), \"identity wasn't identity\");\n  assert!(\n    bool::from((G::identity() + G::identity()).is_identity()),\n    \"identity + identity wasn't identity\"\n  );\n  assert!(\n    bool::from((G::generator() - G::generator()).is_identity()),\n    \"generator - generator wasn't identity\"\n  );\n  assert!(!bool::from(G::generator().is_identity()), \"is_identity claimed generator was identity\");\n}\n\n/// Sanity check the generator.\npub fn test_generator<G: Group>() {\n  assert!(G::generator() != G::identity(), \"generator was identity\");\n  assert!(\n    (G::generator() + G::generator()) != G::generator(),\n    \"generator added to itself was identity\"\n  );\n}\n\n/// Test doubling of group elements.\npub fn test_double<G: Group>() {\n  assert!(bool::from(G::identity().double().is_identity()), \"identity.double() wasn't identity\");\n  assert_eq!(\n    G::generator() + G::generator(),\n    G::generator().double(),\n    \"generator + generator != generator.double()\"\n  );\n}\n\n/// Test addition.\npub fn test_add<G: Group>() {\n  assert_eq!(G::identity() + G::identity(), G::identity(), \"identity + identity != identity\");\n  assert_eq!(G::identity() + G::generator(), G::generator(), \"identity + generator != generator\");\n  assert_eq!(G::generator() + G::identity(), G::generator(), \"generator + identity != generator\");\n\n  let two = G::generator().double();\n  assert_eq!(G::generator() + G::generator(), two, \"generator + generator != two\");\n  let four = two.double();\n  assert_eq!(\n    G::generator() + G::generator() + G::generator() + G::generator(),\n    four,\n    \"generator + generator + generator + generator != four\"\n  );\n}\n\n/// Test summation.\npub fn test_sum<G: Group>() {\n  assert_eq!(\n    [G::generator(), G::generator()].iter().sum::<G>(),\n    G::generator().double(),\n    \"[generator, generator].sum() != two\"\n  );\n  assert_eq!(\n    [G::generator().double(), G::generator()].iter().sum::<G>(),\n    G::generator().double() + G::generator(),\n    \"[generator.double(), generator].sum() != three\"\n  );\n}\n\n/// Test negation.\npub fn test_neg<G: Group>() {\n  assert_eq!(G::identity(), G::identity().neg(), \"identity != -identity\");\n  assert_eq!(\n    G::generator() + G::generator().neg(),\n    G::identity(),\n    \"generator + -generator != identity\"\n  );\n}\n\n/// Test subtraction.\npub fn test_sub<G: Group>() {\n  assert_eq!(G::generator() - G::generator(), G::identity(), \"generator - generator != identity\");\n  let two = G::generator() + G::generator();\n  assert_eq!(two - G::generator(), G::generator(), \"two - one != one\");\n}\n\n/// Test scalar multiplication\npub fn test_mul<G: Group>() {\n  assert_eq!(G::generator() * G::Scalar::from(0), G::identity(), \"generator * 0 != identity\");\n  assert_eq!(G::generator() * G::Scalar::from(1), G::generator(), \"generator * 1 != generator\");\n  assert_eq!(\n    G::generator() * G::Scalar::from(2),\n    G::generator() + G::generator(),\n    \"generator * 2 != generator + generator\"\n  );\n  assert_eq!(G::identity() * G::Scalar::from(2), G::identity(), \"identity * 2 != identity\");\n}\n\n/// Test `((order - 1) * G) + G == identity`.\npub fn test_order<G: Group>() {\n  let minus_one = G::generator() * (G::Scalar::ZERO - G::Scalar::ONE);\n  assert!(minus_one != G::identity(), \"(modulus - 1) * G was identity\");\n  assert_eq!(minus_one + G::generator(), G::identity(), \"((modulus - 1) * G) + G wasn't identity\");\n}\n\n/// Test random.\npub fn test_random<R: RngCore, G: Group>(rng: &mut R) {\n  let a = G::random(&mut *rng);\n  assert!(!bool::from(a.is_identity()), \"random returned identity\");\n\n  // Run up to 128 times so small groups, which may occasionally return the same element twice,\n  // are statistically unlikely to fail\n  // Groups of order <= 2 will always fail this test due to lack of distinct elements to sample\n  // from\n  let mut pass = false;\n  for _ in 0 .. 128 {\n    let b = G::random(&mut *rng);\n    assert!(!bool::from(b.is_identity()), \"random returned identity\");\n\n    // This test passes if a distinct element is returned at least once\n    if b != a {\n      pass = true;\n    }\n  }\n  assert!(pass, \"random always returned the same value\");\n}\n\n/// Run all tests on groups implementing Group.\npub fn test_group<R: RngCore, G: Group>(rng: &mut R) {\n  test_prime_field::<R, G::Scalar>(rng);\n\n  test_eq::<G>();\n  test_identity::<G>();\n  test_generator::<G>();\n  test_double::<G>();\n  test_add::<G>();\n  test_sum::<G>();\n  test_neg::<G>();\n  test_sub::<G>();\n  test_mul::<G>();\n  test_order::<G>();\n  test_random::<R, G>(rng);\n}\n\n/// Test encoding and decoding of group elements.\npub fn test_encoding<G: PrimeGroup>() {\n  let test = |point: G, msg| -> G {\n    let bytes = point.to_bytes();\n    let mut repr = G::Repr::default();\n    repr.as_mut().copy_from_slice(bytes.as_ref());\n    let decoded = G::from_bytes(&repr).unwrap();\n    assert_eq!(point, decoded, \"{msg} couldn't be encoded and decoded\");\n    assert_eq!(\n      point,\n      G::from_bytes_unchecked(&repr).unwrap(),\n      \"{msg} couldn't be encoded and decoded\",\n    );\n    decoded\n  };\n  assert!(bool::from(test(G::identity(), \"identity\").is_identity()));\n  test(G::generator(), \"generator\");\n  test(G::generator() + G::generator(), \"(generator * 2)\");\n}\n\n/// Run all tests on groups implementing PrimeGroup (Group + GroupEncoding).\npub fn test_prime_group<R: RngCore, G: PrimeGroup>(rng: &mut R) {\n  test_group::<R, G>(rng);\n\n  test_encoding::<G>();\n}\n\n/// Run all tests offered by this crate on the group.\npub fn test_prime_group_bits<R: RngCore, G: PrimeGroup<Scalar: PrimeFieldBits>>(rng: &mut R) {\n  test_prime_field_bits::<R, G::Scalar>(rng);\n  test_prime_group::<R, G>(rng);\n}\n\n// Run these tests against k256/p256\n// This ensures that these tests are well formed and won't error for valid implementations,\n// assuming the validity of k256/p256\n// While k256 and p256 may be malformed in a way which coincides with a faulty test, this is\n// considered unlikely\n// The other option, not running against any libraries, would leave faulty tests completely\n// undetected\n\n#[test]\nfn test_k256() {\n  test_prime_group_bits::<_, k256::ProjectivePoint>(&mut rand_core::OsRng);\n}\n\n#[test]\nfn test_p256() {\n  test_prime_group_bits::<_, p256::ProjectivePoint>(&mut rand_core::OsRng);\n}\n\n#[test]\nfn test_bls12_381() {\n  test_prime_group_bits::<_, bls12_381::G1Projective>(&mut rand_core::OsRng);\n  test_prime_group_bits::<_, bls12_381::G2Projective>(&mut rand_core::OsRng);\n}\n\n#[test]\nfn test_pallas_vesta() {\n  test_prime_group_bits::<_, pasta_curves::pallas::Point>(&mut rand_core::OsRng);\n  test_prime_group_bits::<_, pasta_curves::vesta::Point>(&mut rand_core::OsRng);\n}\n"
  },
  {
    "path": "crypto/ff-group-tests/src/lib.rs",
    "content": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![doc = include_str!(\"../README.md\")]\n\n/// Tests for the Field trait.\npub mod field;\n/// Tests for the PrimeField and PrimeFieldBits traits.\npub mod prime_field;\n\n/// Tests for the Group and GroupEncoding traits.\npub mod group;\n"
  },
  {
    "path": "crypto/ff-group-tests/src/prime_field.rs",
    "content": "use rand_core::RngCore;\nuse group::ff::{PrimeField, PrimeFieldBits};\n\nuse crate::field::test_field;\n\n// Ideally, this and test_one would be under Field, yet these tests require access to From<u64>\n/// Test zero returns F::from(0).\npub fn test_zero<F: PrimeField>() {\n  assert_eq!(F::ZERO, F::from(0u64), \"0 != 0\");\n}\n\n/// Test one returns F::from(1).\npub fn test_one<F: PrimeField>() {\n  assert_eq!(F::ONE, F::from(1u64), \"1 != 1\");\n}\n\n/// Test `From<u64>` for F works.\npub fn test_from_u64<F: PrimeField>() {\n  assert_eq!(F::ZERO, F::from(0u64), \"0 != 0u64\");\n  assert_eq!(F::ONE, F::from(1u64), \"1 != 1u64\");\n  assert_eq!(F::ONE.double(), F::from(2u64), \"2 != 2u64\");\n  assert_eq!(F::ONE.double() + F::ONE, F::from(3u64), \"3 != 3u64\");\n}\n\n/// Test from_u128 for F works.\npub fn test_from_u128<F: PrimeField>() {\n  assert_eq!(F::ZERO, F::from_u128(0u128), \"0 != 0u128\");\n  assert_eq!(F::ONE, F::from_u128(1u128), \"1 != 1u128\");\n  assert_eq!(F::from(2u64), F::from_u128(2u128), \"2u64 != 2u128\");\n  assert_eq!(F::from(3u64), F::from_u128(3u128), \"3u64 != 3u128\");\n}\n\n/// Test is_odd/is_even works.\n///\n/// This test assumes an odd modulus with oddness being determined by the least-significant bit.\n/// Accordingly, this test doesn't support fields alternatively defined.\n/// TODO: Improve in the future.\npub fn test_is_odd<F: PrimeField>() {\n  assert_eq!(F::ZERO.is_odd().unwrap_u8(), 0, \"0 was odd\");\n  assert_eq!(F::ZERO.is_even().unwrap_u8(), 1, \"0 wasn't even\");\n\n  assert_eq!(F::ONE.is_odd().unwrap_u8(), 1, \"1 was even\");\n  assert_eq!(F::ONE.is_even().unwrap_u8(), 0, \"1 wasn't odd\");\n\n  // Make sure an odd value added to an odd value is even\n  let two = F::ONE.double();\n  assert_eq!(two.is_odd().unwrap_u8(), 0, \"2 was odd\");\n  assert_eq!(two.is_even().unwrap_u8(), 1, \"2 wasn't even\");\n\n  // Make sure an even value added to an even value is even\n  let four = two.double();\n  assert_eq!(four.is_odd().unwrap_u8(), 0, \"4 was odd\");\n  assert_eq!(four.is_even().unwrap_u8(), 1, \"4 wasn't even\");\n\n  let neg_one = -F::ONE;\n  assert_eq!(neg_one.is_odd().unwrap_u8(), 0, \"-1 was odd\");\n  assert_eq!(neg_one.is_even().unwrap_u8(), 1, \"-1 wasn't even\");\n\n  assert_eq!(neg_one.double().is_odd().unwrap_u8(), 1, \"(-1).double() was even\");\n  assert_eq!(neg_one.double().is_even().unwrap_u8(), 0, \"(-1).double() wasn't odd\");\n}\n\n/// Test encoding and decoding of field elements.\npub fn test_encoding<F: PrimeField>() {\n  let test = |scalar: F, msg| {\n    let bytes = scalar.to_repr();\n    let mut repr = F::Repr::default();\n    repr.as_mut().copy_from_slice(bytes.as_ref());\n    assert_eq!(scalar, F::from_repr(repr).unwrap(), \"{msg} couldn't be encoded and decoded\");\n    assert_eq!(\n      scalar,\n      F::from_repr_vartime(repr).unwrap(),\n      \"{msg} couldn't be encoded and decoded\",\n    );\n    assert_eq!(\n      bytes.as_ref(),\n      F::from_repr(repr).unwrap().to_repr().as_ref(),\n      \"canonical encoding decoded produced distinct encoding\"\n    );\n  };\n  test(F::ZERO, \"0\");\n  test(F::ONE, \"1\");\n  test(F::ONE + F::ONE, \"2\");\n  test(-F::ONE, \"-1\");\n\n  // Also check if a non-canonical encoding is possible\n  let mut high = (F::ZERO - F::ONE).to_repr();\n  let mut possible_non_canon = false;\n  for byte in high.as_mut() {\n    // The fact a bit isn't set in the highest possible value suggests there's unused bits\n    // If there's unused bits, mark the possibility of a non-canonical encoding and set the bits\n    if *byte != 255 {\n      possible_non_canon = true;\n      *byte = 255;\n      break;\n    }\n  }\n\n  // Any non-canonical encoding should fail to be read\n  if possible_non_canon {\n    assert!(!bool::from(F::from_repr(high).is_some()));\n  }\n}\n\n/// Run all tests on fields implementing PrimeField.\npub fn test_prime_field<R: RngCore, F: PrimeField>(rng: &mut R) {\n  test_field::<R, F>(rng);\n\n  test_zero::<F>();\n  test_one::<F>();\n  test_from_u64::<F>();\n  test_from_u128::<F>();\n  test_is_odd::<F>();\n\n  // Do a sanity check on the CAPACITY. A full test can't be done at this time\n  assert!(F::CAPACITY <= F::NUM_BITS, \"capacity exceeded number of bits\");\n\n  test_encoding::<F>();\n}\n\n/// Test to_le_bits returns the little-endian bits of a value.\n// This test assumes that the modulus is at least 4.\npub fn test_to_le_bits<F: PrimeField + PrimeFieldBits>() {\n  {\n    let bits = F::ZERO.to_le_bits();\n    assert_eq!(bits.iter().filter(|bit| **bit).count(), 0, \"0 had bits set\");\n  }\n\n  {\n    let bits = F::ONE.to_le_bits();\n    assert!(bits[0], \"1 didn't have its least significant bit set\");\n    assert_eq!(bits.iter().filter(|bit| **bit).count(), 1, \"1 had multiple bits set\");\n  }\n\n  {\n    let bits = F::from(2).to_le_bits();\n    assert!(bits[1], \"2 didn't have its second bit set\");\n    assert_eq!(bits.iter().filter(|bit| **bit).count(), 1, \"2 had multiple bits set\");\n  }\n\n  {\n    let bits = F::from(3).to_le_bits();\n    assert!(bits[0], \"3 didn't have its first bit set\");\n    assert!(bits[1], \"3 didn't have its second bit set\");\n    assert_eq!(bits.iter().filter(|bit| **bit).count(), 2, \"2 didn't have two bits set\");\n  }\n}\n\n/// Test char_le_bits returns the bits of the modulus.\npub fn test_char_le_bits<F: PrimeField + PrimeFieldBits>() {\n  // A field with a modulus of 0 may be technically valid? Yet these tests assume some basic\n  // functioning.\n  assert!(F::char_le_bits().iter().any(|bit| *bit), \"char_le_bits contained 0\");\n\n  // Test this is the bit pattern of the modulus by reconstructing the modulus from it\n  let mut bit = F::ONE;\n  let mut modulus = F::ZERO;\n  for set in F::char_le_bits() {\n    if set {\n      modulus += bit;\n    }\n    bit = bit.double();\n  }\n  assert_eq!(modulus, F::ZERO, \"char_le_bits did not contain the field's modulus\");\n}\n\n/// Test NUM_BITS is accurate.\npub fn test_num_bits<F: PrimeField + PrimeFieldBits>() {\n  let mut val = F::ONE;\n  let mut bit = 0;\n  while ((bit + 1) < val.to_le_bits().len()) && val.double().to_le_bits()[bit + 1] {\n    val = val.double();\n    bit += 1;\n  }\n  assert_eq!(\n    F::NUM_BITS,\n    u32::try_from(bit + 1).unwrap(),\n    \"NUM_BITS was incorrect. it should be {}\",\n    bit + 1\n  );\n}\n\n/// Test CAPACITY is accurate.\npub fn test_capacity<F: PrimeField + PrimeFieldBits>() {\n  assert!(F::CAPACITY <= F::NUM_BITS, \"capacity exceeded number of bits\");\n\n  let mut val = F::ONE;\n  assert!(val.to_le_bits()[0], \"1 didn't have its least significant bit set\");\n  for b in 1 .. F::CAPACITY {\n    val = val.double();\n    val += F::ONE;\n    for i in 0 ..= b {\n      assert!(\n        val.to_le_bits()[usize::try_from(i).unwrap()],\n        \"couldn't set a bit within the capacity\",\n      );\n    }\n  }\n\n  // If the field has a modulus which is a power of 2, NUM_BITS should equal CAPACITY\n  // Adding one would also be sufficient to trigger an overflow\n  if F::char_le_bits().iter().filter(|bit| **bit).count() == 1 {\n    assert_eq!(\n      F::NUM_BITS,\n      F::CAPACITY,\n      \"field has a power of two modulus yet CAPACITY doesn't equal NUM_BITS\",\n    );\n    assert_eq!(val + F::ONE, F::ZERO, \"CAPACITY set bits, + 1, != zero for a binary field\");\n    return;\n  }\n\n  assert_eq!(F::NUM_BITS - 1, F::CAPACITY, \"capacity wasn't NUM_BITS - 1\");\n}\n\nfn pow<F: PrimeFieldBits>(base: F, exp: F) -> F {\n  let mut res = F::ONE;\n  for bit in exp.to_le_bits().iter().rev() {\n    res *= res;\n    if *bit {\n      res *= base;\n    }\n  }\n  res\n}\n\n// Ideally, this would be under field.rs, yet the above pow function requires PrimeFieldBits\n/// Perform basic tests on the pow functions, even when passed non-canonical inputs.\npub fn test_pow<F: PrimeFieldBits>() {\n  // Sanity check the local pow algorithm. Does not have assert messages as these shouldn't fail\n  assert_eq!(pow(F::ONE, F::ZERO), F::ONE);\n  assert_eq!(pow(F::ONE.double(), F::ZERO), F::ONE);\n  assert_eq!(pow(F::ONE, F::ONE), F::ONE);\n\n  let two = F::ONE.double();\n  assert_eq!(pow(two, F::ONE), two);\n  assert_eq!(pow(two, two), two.double());\n  let three = two + F::ONE;\n  assert_eq!(pow(three, F::ONE), three);\n  assert_eq!(pow(three, two), three * three);\n  assert_eq!(pow(three, three), three * three * three);\n\n  // Choose a small base without a notably uniform bit pattern\n  let bit_0 = F::ONE;\n  let base = {\n    let bit_1 = bit_0.double();\n    let bit_2 = bit_1.double();\n    let bit_3 = bit_2.double();\n    let bit_4 = bit_3.double();\n    let bit_5 = bit_4.double();\n    let bit_6 = bit_5.double();\n    let bit_7 = bit_6.double();\n    bit_7 + bit_6 + bit_5 + bit_2 + bit_0\n  };\n\n  // Ensure pow/pow_vartime return 1 when the base is raised to 0, handling malleated inputs\n  assert_eq!(base.pow([]), F::ONE, \"pow x^0 ([]) != 1\");\n  assert_eq!(base.pow_vartime([]), F::ONE, \"pow x^0 ([]) != 1\");\n  assert_eq!(base.pow([0]), F::ONE, \"pow_vartime x^0 ([0]) != 1\");\n  assert_eq!(base.pow_vartime([0]), F::ONE, \"pow_vartime x^0 ([0]) != 1\");\n  assert_eq!(base.pow([0, 0]), F::ONE, \"pow x^0 ([0, 0]) != 1\");\n  assert_eq!(base.pow_vartime([0, 0]), F::ONE, \"pow_vartime x^0 ([0, 0]) != 1\");\n\n  // Ensure pow/pow_vartime return the base when raised to 1, handling malleated inputs\n  assert_eq!(base.pow([1]), base, \"pow x^1 ([1]) != x\");\n  assert_eq!(base.pow_vartime([1, 0]), base, \"pow_vartime x^1 ([1, 0]) != x\");\n  assert_eq!(base.pow([1]), base, \"pow x^1 ([1]) != x\");\n  assert_eq!(base.pow_vartime([1, 0]), base, \"pow_vartime x^1 ([1, 0]) != x\");\n\n  // Ensure pow/pow_vartime can handle multiple u64s properly\n  // Create a scalar which exceeds u64\n  let mut bit_64 = bit_0;\n  for _ in 0 .. 64 {\n    bit_64 = bit_64.double();\n  }\n  // Run the tests\n  assert_eq!(base.pow([0, 1]), pow(base, bit_64), \"pow x^(2^64) != x^(2^64)\");\n  assert_eq!(base.pow_vartime([0, 1]), pow(base, bit_64), \"pow_vartime x^(2^64) != x^(2^64)\");\n  assert_eq!(base.pow([1, 1]), pow(base, bit_64 + F::ONE), \"pow x^(2^64 + 1) != x^(2^64 + 1)\");\n  assert_eq!(\n    base.pow_vartime([1, 1]),\n    pow(base, bit_64 + F::ONE),\n    \"pow_vartime x^(2^64 + 1) != x^(2^64 + 1)\"\n  );\n}\n\n/// Test the inverted constants are correct.\npub fn test_inv_consts<F: PrimeFieldBits>() {\n  assert_eq!(F::TWO_INV, F::from(2u64).invert().unwrap(), \"F::TWO_INV != 2.invert()\");\n  assert_eq!(\n    F::ROOT_OF_UNITY_INV,\n    F::ROOT_OF_UNITY.invert().unwrap(),\n    \"F::ROOT_OF_UNITY_INV != F::ROOT_OF_UNITY.invert()\"\n  );\n}\n\n/// Test S is correct.\npub fn test_s<F: PrimeFieldBits>() {\n  // \"This is the number of leading zero bits in the little-endian bit representation of\n  // `modulus - 1`.\"\n  let mut s = 0;\n  for b in (F::ZERO - F::ONE).to_le_bits() {\n    if b {\n      break;\n    }\n    s += 1;\n  }\n  assert_eq!(s, F::S, \"incorrect S\");\n}\n\n/// Test the root of unity is correct for the provided multiplicative generator.\npub fn test_root_of_unity<F: PrimeFieldBits>() {\n  // \"It can be calculated by exponentiating `Self::multiplicative_generator` by `t`, where\n  // `t = (modulus - 1) >> Self::S`.\"\n\n  // Get the bytes to shift\n  let mut bits = (F::ZERO - F::ONE).to_le_bits().iter().map(|bit| *bit).collect::<Vec<_>>();\n  for _ in 0 .. F::S {\n    bits.remove(0);\n  }\n\n  // Construct t\n  let mut bit = F::ONE;\n  let mut t = F::ZERO;\n  for set in bits {\n    if set {\n      t += bit;\n    }\n    bit = bit.double();\n  }\n  assert!(bool::from(t.is_odd()), \"t wasn't odd\");\n\n  assert_eq!(pow(F::MULTIPLICATIVE_GENERATOR, t), F::ROOT_OF_UNITY, \"incorrect root of unity\");\n  assert_eq!(\n    pow(F::ROOT_OF_UNITY, pow(F::from(2u64), F::from(F::S.into()))),\n    F::ONE,\n    \"root of unity raised to 2^S wasn't 1\",\n  );\n}\n\n/// Test DELTA is correct.\npub fn test_delta<F: PrimeFieldBits>() {\n  assert_eq!(\n    pow(F::MULTIPLICATIVE_GENERATOR, pow(F::from(2u64), F::from(u64::from(F::S)))),\n    F::DELTA,\n    \"F::DELTA is incorrect\"\n  );\n}\n\n/// Run all tests on fields implementing PrimeFieldBits.\npub fn test_prime_field_bits<R: RngCore, F: PrimeFieldBits>(rng: &mut R) {\n  test_prime_field::<R, F>(rng);\n\n  test_to_le_bits::<F>();\n  test_char_le_bits::<F>();\n\n  test_pow::<F>();\n\n  test_inv_consts::<F>();\n  test_s::<F>();\n  test_root_of_unity::<F>();\n  test_delta::<F>();\n\n  test_num_bits::<F>();\n  test_capacity::<F>();\n}\n"
  },
  {
    "path": "crypto/frost/Cargo.toml",
    "content": "[package]\nname = \"modular-frost\"\nversion = \"0.10.1\"\ndescription = \"Modular implementation of FROST over ff/group\"\nlicense = \"MIT\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/crypto/frost\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nkeywords = [\"frost\", \"multisig\", \"threshold\"]\nedition = \"2021\"\nrust-version = \"1.80\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nthiserror = { version = \"2\", default-features = false, features = [\"std\"] }\n\nrand_core = { version = \"0.6\", default-features = false, features = [\"std\"] }\nrand_chacha = { version = \"0.3\", default-features = false, features = [\"std\"] }\n\nzeroize = { version = \"^1.5\", default-features = false, features = [\"std\", \"zeroize_derive\"] }\nsubtle = { version = \"^2.4\", default-features = false, features = [\"std\"] }\n\nhex = { version = \"0.4\", default-features = false, features = [\"std\"], optional = true }\n\ndigest = { version = \"0.10\", default-features = false, features = [\"std\"] }\ntranscript = { package = \"flexible-transcript\", path = \"../transcript\", version = \"^0.3.2\", default-features = false, features = [\"std\", \"recommended\"] }\n\ndalek-ff-group = { path = \"../dalek-ff-group\", version = \"0.4\", default-features = false, features = [\"std\"], optional = true }\nminimal-ed448 = { path = \"../ed448\", version = \"0.4\", default-features = false, features = [\"std\"], optional = true }\n\nciphersuite = { path = \"../ciphersuite\", version = \"^0.4.1\", default-features = false, features = [\"std\"] }\nciphersuite-kp256 = { path = \"../ciphersuite/kp256\", version = \"0.4\", default-features = false, features = [\"std\"], optional = true }\n\nmultiexp = { path = \"../multiexp\", version = \"0.4\", default-features = false, features = [\"std\", \"batch\"] }\n\nschnorr = { package = \"schnorr-signatures\", path = \"../schnorr\", version = \"^0.5.1\", default-features = false, features = [\"std\"] }\n\ndkg = { path = \"../dkg\", version = \"0.6.1\", default-features = false, features = [\"std\"] }\ndkg-recovery = { path = \"../dkg/recovery\", version = \"0.6\", default-features = false, features = [\"std\"], optional = true }\ndkg-dealer = { path = \"../dkg/dealer\", version = \"0.6\", default-features = false, features = [\"std\"], optional = true }\n\n[dev-dependencies]\nhex = \"0.4\"\nserde_json = { version = \"1\", default-features = false, features = [\"std\"] }\n\ndkg = { path = \"../dkg\", default-features = false, features = [\"std\"] }\ndkg-recovery = { path = \"../dkg/recovery\", default-features = false, features = [\"std\"] }\ndkg-dealer = { path = \"../dkg/dealer\", default-features = false, features = [\"std\"] }\n\n[features]\ned25519 = [\"dalek-ff-group\"]\nristretto = [\"dalek-ff-group\"]\n\nsecp256k1 = [\"ciphersuite-kp256\"]\np256 = [\"ciphersuite-kp256\"]\n\ned448 = [\"minimal-ed448\"]\n\ntests = [\"hex\", \"rand_core/getrandom\", \"dkg-dealer\", \"dkg-recovery\"]\n"
  },
  {
    "path": "crypto/frost/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2021-2023 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "crypto/frost/README.md",
    "content": "# Modular FROST\n\nA modular implementation of FROST for any curve with a ff/group API.\nAdditionally, custom algorithms may be specified so any signature reducible to\nSchnorr-like may be used with FROST.\n\nA Schnorr algorithm is provided, of the form (R, s) where `s = r + cx`, which\nallows specifying the challenge format. This is intended to easily allow\nintegrating with existing systems.\n\nThis library offers ciphersuites compatible with the\n[IETF draft](https://github.com/cfrg/draft-irtf-cfrg-frost). Currently, version\n15 is supported.\n\nA variety of testing utilities are provided under the `tests` feature. These\nare provided with no guarantees and may have completely arbitrary behavior,\nincluding panicking for completely well-reasoned input.\n\nThis library was\n[audited by Cypher Stack in March 2023](https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf),\nculminating in commit\n[669d2dbffc1dafb82a09d9419ea182667115df06](https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06).\nAny subsequent changes have not undergone auditing. While this audit included\nFROST's definition of Ed448, the underlying Ed448 ciphersuite (offered by the\nciphersuite crate) was not audited, nor was the minimal-ed448 crate implementing\nthe curve itself.\n"
  },
  {
    "path": "crypto/frost/src/algorithm.rs",
    "content": "use core::{marker::PhantomData, fmt::Debug};\nuse std::io::{self, Read, Write};\n\nuse zeroize::Zeroizing;\nuse rand_core::{RngCore, CryptoRng};\n\nuse transcript::Transcript;\n\nuse crate::{Participant, ThresholdKeys, ThresholdView, Curve, FrostError};\npub use schnorr::SchnorrSignature;\n\n/// Write an addendum to a writer.\npub trait WriteAddendum {\n  fn write<W: Write>(&self, writer: &mut W) -> io::Result<()>;\n}\n\nimpl WriteAddendum for () {\n  fn write<W: Write>(&self, _: &mut W) -> io::Result<()> {\n    Ok(())\n  }\n}\n\n/// Trait alias for the requirements to be used as an addendum.\npub trait Addendum: Send + Sync + Clone + PartialEq + Debug + WriteAddendum {}\nimpl<A: Send + Sync + Clone + PartialEq + Debug + WriteAddendum> Addendum for A {}\n\n/// Algorithm trait usable by the FROST signing machine to produce signatures..\npub trait Algorithm<C: Curve>: Send + Sync {\n  /// The transcript format this algorithm uses. This likely should NOT be the IETF-compatible\n  /// transcript included in this crate.\n  type Transcript: Sync + Clone + Debug + Transcript;\n  /// Serializable addendum, used in algorithms requiring more data than just the nonces.\n  type Addendum: Addendum;\n  /// The resulting type of the signatures this algorithm will produce.\n  type Signature: Clone + PartialEq + Debug;\n\n  /// Obtain a mutable borrow of the underlying transcript.\n  fn transcript(&mut self) -> &mut Self::Transcript;\n\n  /// Obtain the list of nonces to generate, as specified by the generators to create commitments\n  /// against per-nonce.\n  ///\n  /// The Algorithm is responsible for all transcripting of these nonce specifications/generators.\n  ///\n  /// The prover will be passed the commitments, and the commitments will be sent to all other\n  /// participants. No guarantees the commitments are internally consistent (have the same discrete\n  /// logarithm across generators) are made. Any Algorithm which specifies multiple generators for\n  /// a single nonce must handle that itself.\n  fn nonces(&self) -> Vec<Vec<C::G>>;\n\n  /// Generate an addendum to FROST\"s preprocessing stage.\n  fn preprocess_addendum<R: RngCore + CryptoRng>(\n    &mut self,\n    rng: &mut R,\n    keys: &ThresholdKeys<C>,\n  ) -> Self::Addendum;\n\n  /// Read an addendum from a reader.\n  fn read_addendum<R: Read>(&self, reader: &mut R) -> io::Result<Self::Addendum>;\n\n  /// Process the addendum for the specified participant. Guaranteed to be called in order.\n  fn process_addendum(\n    &mut self,\n    params: &ThresholdView<C>,\n    l: Participant,\n    reader: Self::Addendum,\n  ) -> Result<(), FrostError>;\n\n  /// Sign a share with the given secret/nonce.\n  /// The secret will already have been its lagrange coefficient applied so it is the necessary\n  /// key share.\n  /// The nonce will already have been processed into the combined form d + (e * p).\n  fn sign_share(\n    &mut self,\n    params: &ThresholdView<C>,\n    nonce_sums: &[Vec<C::G>],\n    nonces: Vec<Zeroizing<C::F>>,\n    msg: &[u8],\n  ) -> C::F;\n\n  /// Verify a signature.\n  #[must_use]\n  fn verify(&self, group_key: C::G, nonces: &[Vec<C::G>], sum: C::F) -> Option<Self::Signature>;\n\n  /// Verify a specific share given as a response.\n  /// This function should return a series of pairs whose products should sum to zero for a valid\n  /// share. Any error raised is treated as the share being invalid.\n  #[allow(clippy::type_complexity, clippy::result_unit_err)]\n  fn verify_share(\n    &self,\n    verification_share: C::G,\n    nonces: &[Vec<C::G>],\n    share: C::F,\n  ) -> Result<Vec<(C::F, C::G)>, ()>;\n}\n\nmod sealed {\n  pub use super::*;\n\n  /// IETF-compliant transcript. This is incredibly naive and should not be used within larger\n  /// protocols.\n  #[derive(Clone, Debug)]\n  pub struct IetfTranscript(pub(crate) Vec<u8>);\n  impl Transcript for IetfTranscript {\n    type Challenge = Vec<u8>;\n\n    fn new(_: &'static [u8]) -> IetfTranscript {\n      IetfTranscript(vec![])\n    }\n\n    fn domain_separate(&mut self, _: &[u8]) {}\n\n    fn append_message<M: AsRef<[u8]>>(&mut self, _: &'static [u8], message: M) {\n      self.0.extend(message.as_ref());\n    }\n\n    fn challenge(&mut self, _: &'static [u8]) -> Vec<u8> {\n      self.0.clone()\n    }\n\n    // FROST won't use this and this shouldn't be used outside of FROST\n    fn rng_seed(&mut self, _: &'static [u8]) -> [u8; 32] {\n      unimplemented!()\n    }\n  }\n}\npub(crate) use sealed::IetfTranscript;\n\n/// HRAm usable by the included Schnorr signature algorithm to generate challenges.\npub trait Hram<C: Curve>: Send + Sync + Clone {\n  /// HRAm function to generate a challenge.\n  /// H2 from the IETF draft, despite having a different argument set (not being pre-formatted).\n  #[allow(non_snake_case)]\n  fn hram(R: &C::G, A: &C::G, m: &[u8]) -> C::F;\n}\n\n/// Schnorr signature algorithm ((R, s) where s = r + cx).\n///\n/// `verify`, `verify_share` must be called after `sign_share` is called.\n#[derive(Clone)]\npub struct Schnorr<C: Curve, T: Sync + Clone + Debug + Transcript, H: Hram<C>> {\n  transcript: T,\n  c: Option<C::F>,\n  _hram: PhantomData<H>,\n}\n\n/// IETF-compliant Schnorr signature algorithm.\n///\n/// This algorithm specifically uses the transcript format defined in the FROST IETF draft.\n/// It's a naive transcript format not viable for usage in larger protocols, yet is presented here\n/// in order to provide compatibility.\n///\n/// Usage of this with key offsets will break the intended compatibility as the IETF draft does not\n/// specify a protocol for offsets.\npub type IetfSchnorr<C, H> = Schnorr<C, IetfTranscript, H>;\n\nimpl<C: Curve, T: Sync + Clone + Debug + Transcript, H: Hram<C>> Schnorr<C, T, H> {\n  /// Construct a Schnorr algorithm continuing the specified transcript.\n  pub fn new(transcript: T) -> Schnorr<C, T, H> {\n    Schnorr { transcript, c: None, _hram: PhantomData }\n  }\n}\n\nimpl<C: Curve, H: Hram<C>> IetfSchnorr<C, H> {\n  /// Construct a IETF-compatible Schnorr algorithm.\n  ///\n  /// Please see the `IetfSchnorr` documentation for the full details of this.\n  pub fn ietf() -> IetfSchnorr<C, H> {\n    Schnorr::new(IetfTranscript(vec![]))\n  }\n}\n\nimpl<C: Curve, T: Sync + Clone + Debug + Transcript, H: Hram<C>> Algorithm<C> for Schnorr<C, T, H> {\n  type Transcript = T;\n  type Addendum = ();\n  type Signature = SchnorrSignature<C>;\n\n  fn transcript(&mut self) -> &mut Self::Transcript {\n    &mut self.transcript\n  }\n\n  fn nonces(&self) -> Vec<Vec<C::G>> {\n    vec![vec![C::generator()]]\n  }\n\n  fn preprocess_addendum<R: RngCore + CryptoRng>(&mut self, _: &mut R, _: &ThresholdKeys<C>) {}\n\n  fn read_addendum<R: Read>(&self, _: &mut R) -> io::Result<Self::Addendum> {\n    Ok(())\n  }\n\n  fn process_addendum(\n    &mut self,\n    _: &ThresholdView<C>,\n    _: Participant,\n    (): (),\n  ) -> Result<(), FrostError> {\n    Ok(())\n  }\n\n  fn sign_share(\n    &mut self,\n    params: &ThresholdView<C>,\n    nonce_sums: &[Vec<C::G>],\n    mut nonces: Vec<Zeroizing<C::F>>,\n    msg: &[u8],\n  ) -> C::F {\n    let c = H::hram(&nonce_sums[0][0], &params.group_key(), msg);\n    self.c = Some(c);\n    SchnorrSignature::<C>::sign(params.secret_share(), nonces.swap_remove(0), c).s\n  }\n\n  #[must_use]\n  fn verify(&self, group_key: C::G, nonces: &[Vec<C::G>], sum: C::F) -> Option<Self::Signature> {\n    let sig = SchnorrSignature { R: nonces[0][0], s: sum };\n    Some(sig).filter(|sig| sig.verify(group_key, self.c.unwrap()))\n  }\n\n  fn verify_share(\n    &self,\n    verification_share: C::G,\n    nonces: &[Vec<C::G>],\n    share: C::F,\n  ) -> Result<Vec<(C::F, C::G)>, ()> {\n    Ok(\n      SchnorrSignature::<C> { R: nonces[0][0], s: share }\n        .batch_statements(verification_share, self.c.unwrap())\n        .to_vec(),\n    )\n  }\n}\n"
  },
  {
    "path": "crypto/frost/src/curve/dalek.rs",
    "content": "use digest::Digest;\n\nuse dalek_ff_group::Scalar;\n\nuse ciphersuite::Ciphersuite;\n\nuse crate::{curve::Curve, algorithm::Hram};\n\nmacro_rules! dalek_curve {\n  (\n    $feature: literal,\n\n    $Curve:      ident,\n    $Hram:       ident,\n\n    $CONTEXT: literal,\n    $chal: literal\n  ) => {\n    pub use dalek_ff_group::$Curve;\n\n    impl Curve for $Curve {\n      const CONTEXT: &'static [u8] = $CONTEXT;\n    }\n\n    /// The challenge function for this ciphersuite.\n    #[derive(Copy, Clone)]\n    pub struct $Hram;\n    impl Hram<$Curve> for $Hram {\n      #[allow(non_snake_case)]\n      fn hram(R: &<$Curve as Ciphersuite>::G, A: &<$Curve as Ciphersuite>::G, m: &[u8]) -> Scalar {\n        let mut hash = <$Curve as Ciphersuite>::H::new();\n        if $chal.len() != 0 {\n          hash.update(&[$CONTEXT.as_ref(), $chal].concat());\n        }\n        Scalar::from_hash(\n          hash.chain_update(&[&R.compress().to_bytes(), &A.compress().to_bytes(), m].concat()),\n        )\n      }\n    }\n  };\n}\n\n#[cfg(feature = \"ristretto\")]\ndalek_curve!(\"ristretto\", Ristretto, IetfRistrettoHram, b\"FROST-RISTRETTO255-SHA512-v1\", b\"chal\");\n\n#[cfg(feature = \"ed25519\")]\ndalek_curve!(\"ed25519\", Ed25519, IetfEd25519Hram, b\"FROST-ED25519-SHA512-v1\", b\"\");\n"
  },
  {
    "path": "crypto/frost/src/curve/ed448.rs",
    "content": "use digest::Digest;\n\nuse minimal_ed448::{Scalar, Point};\npub use minimal_ed448::Ed448;\npub use ciphersuite::{group::GroupEncoding, Ciphersuite};\n\nuse crate::{curve::Curve, algorithm::Hram};\n\nconst CONTEXT: &[u8] = b\"FROST-ED448-SHAKE256-v1\";\n\nimpl Curve for Ed448 {\n  const CONTEXT: &'static [u8] = CONTEXT;\n}\n\n// The RFC-8032 Ed448 challenge function.\n#[derive(Copy, Clone)]\npub(crate) struct Ietf8032Ed448Hram;\nimpl Ietf8032Ed448Hram {\n  #[allow(non_snake_case)]\n  pub(crate) fn hram(context: &[u8], R: &Point, A: &Point, m: &[u8]) -> Scalar {\n    Scalar::wide_reduce(\n      <Ed448 as Ciphersuite>::H::digest(\n        [\n          &[b\"SigEd448\".as_ref(), &[0, u8::try_from(context.len()).unwrap()]].concat(),\n          context,\n          &[R.to_bytes().as_ref(), A.to_bytes().as_ref(), m].concat(),\n        ]\n        .concat(),\n      )\n      .as_ref()\n      .try_into()\n      .unwrap(),\n    )\n  }\n}\n\n/// The challenge function for FROST's Ed448 ciphersuite.\n#[derive(Copy, Clone)]\npub struct IetfEd448Hram;\nimpl Hram<Ed448> for IetfEd448Hram {\n  #[allow(non_snake_case)]\n  fn hram(R: &Point, A: &Point, m: &[u8]) -> Scalar {\n    Ietf8032Ed448Hram::hram(&[], R, A, m)\n  }\n}\n"
  },
  {
    "path": "crypto/frost/src/curve/kp256.rs",
    "content": "use ciphersuite::{group::GroupEncoding, Ciphersuite};\n\nuse crate::{curve::Curve, algorithm::Hram};\n\nmacro_rules! kp_curve {\n  (\n    $feature: literal,\n\n    $Curve: ident,\n    $Hram:  ident,\n\n    $CONTEXT: literal\n  ) => {\n    pub use ciphersuite_kp256::$Curve;\n\n    impl Curve for $Curve {\n      const CONTEXT: &'static [u8] = $CONTEXT;\n    }\n\n    /// The challenge function for this ciphersuite.\n    #[derive(Clone)]\n    pub struct $Hram;\n    impl Hram<$Curve> for $Hram {\n      #[allow(non_snake_case)]\n      fn hram(\n        R: &<$Curve as Ciphersuite>::G,\n        A: &<$Curve as Ciphersuite>::G,\n        m: &[u8],\n      ) -> <$Curve as Ciphersuite>::F {\n        <$Curve as Curve>::hash_to_F(\n          b\"chal\",\n          &[R.to_bytes().as_ref(), A.to_bytes().as_ref(), m].concat(),\n        )\n      }\n    }\n  };\n}\n\n#[cfg(feature = \"p256\")]\nkp_curve!(\"p256\", P256, IetfP256Hram, b\"FROST-P256-SHA256-v1\");\n\n#[cfg(feature = \"secp256k1\")]\nkp_curve!(\"secp256k1\", Secp256k1, IetfSecp256k1Hram, b\"FROST-secp256k1-SHA256-v1\");\n"
  },
  {
    "path": "crypto/frost/src/curve/mod.rs",
    "content": "use core::ops::Deref;\nuse std::io::{self, Read};\n\nuse rand_core::{RngCore, CryptoRng};\n\nuse zeroize::{Zeroize, Zeroizing};\nuse subtle::ConstantTimeEq;\n\nuse digest::{Digest, Output};\n\npub use ciphersuite::{\n  group::{\n    ff::{Field, PrimeField},\n    Group,\n  },\n  Ciphersuite,\n};\n\n#[cfg(any(feature = \"ristretto\", feature = \"ed25519\"))]\nmod dalek;\n#[cfg(feature = \"ristretto\")]\npub use dalek::{Ristretto, IetfRistrettoHram};\n#[cfg(feature = \"ed25519\")]\npub use dalek::{Ed25519, IetfEd25519Hram};\n\n#[cfg(any(feature = \"secp256k1\", feature = \"p256\"))]\nmod kp256;\n#[cfg(feature = \"secp256k1\")]\npub use kp256::{Secp256k1, IetfSecp256k1Hram};\n#[cfg(feature = \"p256\")]\npub use kp256::{P256, IetfP256Hram};\n\n#[cfg(feature = \"ed448\")]\nmod ed448;\n#[cfg(feature = \"ed448\")]\npub use ed448::{Ed448, IetfEd448Hram};\n#[cfg(all(test, feature = \"ed448\"))]\npub(crate) use ed448::Ietf8032Ed448Hram;\n\n/// FROST Ciphersuite.\n///\n/// This exclude the signing algorithm specific H2, making this solely the curve, its associated\n/// hash function, and the functions derived from it.\npub trait Curve: Ciphersuite {\n  /// Context string for this curve.\n  const CONTEXT: &'static [u8];\n\n  /// Hash the given dst and data to a byte vector. Used to instantiate H4 and H5.\n  fn hash(dst: &[u8], data: &[u8]) -> Output<Self::H> {\n    Self::H::digest([Self::CONTEXT, dst, data].concat())\n  }\n\n  /// Field element from hash. Used during key gen and by other crates under Serai as a general\n  /// utility. Used to instantiate H1 and H3.\n  #[allow(non_snake_case)]\n  fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F {\n    <Self as Ciphersuite>::hash_to_F(&[Self::CONTEXT, dst].concat(), msg)\n  }\n\n  /// Hash the message for the binding factor. H4 from the IETF draft.\n  fn hash_msg(msg: &[u8]) -> Output<Self::H> {\n    Self::hash(b\"msg\", msg)\n  }\n\n  /// Hash the commitments for the binding factor. H5 from the IETF draft.\n  fn hash_commitments(commitments: &[u8]) -> Output<Self::H> {\n    Self::hash(b\"com\", commitments)\n  }\n\n  /// Hash the commitments and message to calculate the binding factor. H1 from the IETF draft.\n  //\n  // This may return 0, which is invalid according to the FROST preprint, as all binding factors\n  // are expected to be in the multiplicative subgroup. This isn't a practical issue, as there's a\n  // negligible probability of this returning 0.\n  //\n  // When raised in\n  // https://github.com/cfrg/draft-irtf-cfrg-frost/issues/451#issuecomment-1715985505,\n  // the negligible probbility was seen as sufficient reason not to edit the spec to be robust in\n  // this regard.\n  //\n  // While that decision may be disagreeable, this library cannot implement a robust scheme while\n  // following the specification. Following the specification is preferred to being robust against\n  // an impractical probability enabling a complex attack (made infeasible by the impractical\n  // probability required).\n  //\n  // We could still panic on the 0-hash, preferring correctness to liveliness. Finding the 0-hash\n  // is as computationally complex as simply calculating the group key's discrete log however,\n  // making it not worth having a panic (as this library is expected not to panic).\n  fn hash_binding_factor(binding: &[u8]) -> Self::F {\n    <Self as Curve>::hash_to_F(b\"rho\", binding)\n  }\n\n  /// Securely generate a random nonce. H3 from the IETF draft.\n  fn random_nonce<R: RngCore + CryptoRng>(\n    secret: &Zeroizing<Self::F>,\n    rng: &mut R,\n  ) -> Zeroizing<Self::F> {\n    let mut seed = Zeroizing::new(vec![0; 32]);\n    rng.fill_bytes(seed.as_mut());\n\n    let mut repr = secret.to_repr();\n\n    // Perform rejection sampling until we reach a non-zero nonce\n    // While the IETF spec doesn't explicitly require this, generating a zero nonce will produce\n    // commitments which will be rejected for being zero (and if they were used, leak the secret\n    // share)\n    // Rejection sampling here will prevent an honest participant from ever generating 'malicious'\n    // values and ensure safety\n    let mut res;\n    while {\n      seed.extend(repr.as_ref());\n      res = Zeroizing::new(<Self as Curve>::hash_to_F(b\"nonce\", seed.deref()));\n      res.ct_eq(&Self::F::ZERO).into()\n    } {\n      seed = Zeroizing::new(vec![0; 32]);\n      rng.fill_bytes(&mut seed);\n    }\n    repr.as_mut().zeroize();\n\n    res\n  }\n\n  /// Read a point from a reader, rejecting identity.\n  #[allow(non_snake_case)]\n  fn read_G<R: Read>(reader: &mut R) -> io::Result<Self::G> {\n    let res = <Self as Ciphersuite>::read_G(reader)?;\n    if res.is_identity().into() {\n      Err(io::Error::other(\"identity point\"))?;\n    }\n    Ok(res)\n  }\n}\n"
  },
  {
    "path": "crypto/frost/src/lib.rs",
    "content": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![doc = include_str!(\"../README.md\")]\n\nuse core::fmt::Debug;\nuse std::collections::HashMap;\n\nuse thiserror::Error;\n\n/// Distributed key generation protocol.\npub use dkg::{self, Participant, ThresholdParams, ThresholdKeys, ThresholdView};\n\n/// Curve trait and provided curves/HRAMs, forming various ciphersuites.\npub mod curve;\nuse curve::Curve;\n\n/// Algorithm for the signing process.\npub mod algorithm;\nmod nonce;\n/// Threshold signing protocol.\npub mod sign;\n\n/// Tests for application-provided curves and algorithms.\n#[cfg(any(test, feature = \"tests\"))]\npub mod tests;\n\n/// Various errors possible during signing.\n#[derive(Clone, Copy, PartialEq, Eq, Debug, Error)]\npub enum FrostError {\n  #[error(\"internal error: {0}\")]\n  InternalError(&'static str),\n\n  #[error(\"invalid participant (0 < participant <= {0}, yet participant is {1})\")]\n  InvalidParticipant(u16, Participant),\n  #[error(\"invalid signing set ({0})\")]\n  InvalidSigningSet(&'static str),\n  #[error(\"invalid participant quantity (expected {0}, got {1})\")]\n  InvalidParticipantQuantity(usize, usize),\n  #[error(\"duplicated participant ({0})\")]\n  DuplicatedParticipant(Participant),\n  #[error(\"missing participant {0}\")]\n  MissingParticipant(Participant),\n\n  #[error(\"invalid preprocess (participant {0})\")]\n  InvalidPreprocess(Participant),\n  #[error(\"invalid share (participant {0})\")]\n  InvalidShare(Participant),\n}\n\n/// Validate a map of values to have the expected participants.\npub fn validate_map<T>(\n  map: &HashMap<Participant, T>,\n  included: &[Participant],\n  ours: Participant,\n) -> Result<(), FrostError> {\n  if (map.len() + 1) != included.len() {\n    Err(FrostError::InvalidParticipantQuantity(included.len(), map.len() + 1))?;\n  }\n\n  for included in included {\n    if *included == ours {\n      if map.contains_key(included) {\n        Err(FrostError::DuplicatedParticipant(*included))?;\n      }\n      continue;\n    }\n\n    if !map.contains_key(included) {\n      Err(FrostError::MissingParticipant(*included))?;\n    }\n  }\n\n  Ok(())\n}\n"
  },
  {
    "path": "crypto/frost/src/nonce.rs",
    "content": "// FROST defines its nonce as sum(Di, Ei * bi)\n//\n// In order for this library to be robust, it supports generating an arbitrary amount of nonces,\n// each against an arbitrary list of generators\n//\n// Each nonce remains of the form (d, e) and made into a proper nonce with d + (e * b)\n\nuse core::ops::Deref;\nuse std::{\n  io::{self, Read, Write},\n  collections::HashMap,\n};\n\nuse rand_core::{RngCore, CryptoRng};\n\nuse zeroize::{Zeroize, Zeroizing};\n\nuse transcript::Transcript;\n\nuse ciphersuite::group::{ff::PrimeField, Group, GroupEncoding};\nuse multiexp::multiexp_vartime;\n\nuse crate::{curve::Curve, Participant};\n\n// Each nonce is actually a pair of random scalars, notated as d, e under the FROST paper\n// This is considered a single nonce as r = d + be\n#[derive(Clone, Zeroize)]\npub(crate) struct Nonce<C: Curve>(pub(crate) [Zeroizing<C::F>; 2]);\n\n// Commitments to a specific generator for this binomial nonce\n#[derive(Copy, Clone, PartialEq, Eq)]\npub(crate) struct GeneratorCommitments<C: Curve>(pub(crate) [C::G; 2]);\nimpl<C: Curve> GeneratorCommitments<C> {\n  fn read<R: Read>(reader: &mut R) -> io::Result<GeneratorCommitments<C>> {\n    Ok(GeneratorCommitments([<C as Curve>::read_G(reader)?, <C as Curve>::read_G(reader)?]))\n  }\n\n  fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {\n    writer.write_all(self.0[0].to_bytes().as_ref())?;\n    writer.write_all(self.0[1].to_bytes().as_ref())\n  }\n}\n\n// A single nonce's commitments\n#[derive(Clone, PartialEq, Eq)]\npub(crate) struct NonceCommitments<C: Curve> {\n  // Called generators as these commitments are indexed by generator later on\n  // So to get the commitments for the first generator, it'd be commitments.generators[0]\n  pub(crate) generators: Vec<GeneratorCommitments<C>>,\n}\n\nimpl<C: Curve> NonceCommitments<C> {\n  pub(crate) fn new<R: RngCore + CryptoRng>(\n    rng: &mut R,\n    secret_share: &Zeroizing<C::F>,\n    generators: &[C::G],\n  ) -> (Nonce<C>, NonceCommitments<C>) {\n    let nonce = Nonce::<C>([\n      C::random_nonce(secret_share, &mut *rng),\n      C::random_nonce(secret_share, &mut *rng),\n    ]);\n\n    let mut commitments = Vec::with_capacity(generators.len());\n    for generator in generators {\n      commitments.push(GeneratorCommitments([\n        *generator * nonce.0[0].deref(),\n        *generator * nonce.0[1].deref(),\n      ]));\n    }\n\n    (nonce, NonceCommitments { generators: commitments })\n  }\n\n  fn read<R: Read>(reader: &mut R, generators: &[C::G]) -> io::Result<NonceCommitments<C>> {\n    Ok(NonceCommitments {\n      generators: (0 .. generators.len())\n        .map(|_| GeneratorCommitments::read(reader))\n        .collect::<Result<_, _>>()?,\n    })\n  }\n\n  fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {\n    for generator in &self.generators {\n      generator.write(writer)?;\n    }\n    Ok(())\n  }\n\n  fn transcript<T: Transcript>(&self, t: &mut T) {\n    t.domain_separate(b\"nonce\");\n    for commitments in &self.generators {\n      t.append_message(b\"commitment_D\", commitments.0[0].to_bytes());\n      t.append_message(b\"commitment_E\", commitments.0[1].to_bytes());\n    }\n  }\n}\n\n/// Commitments for all the nonces across all their generators.\n#[derive(Clone, PartialEq, Eq)]\npub(crate) struct Commitments<C: Curve> {\n  // Called nonces as these commitments are indexed by nonce\n  // So to get the commitments for the first nonce, it'd be commitments.nonces[0]\n  pub(crate) nonces: Vec<NonceCommitments<C>>,\n}\n\nimpl<C: Curve> Commitments<C> {\n  pub(crate) fn new<R: RngCore + CryptoRng>(\n    rng: &mut R,\n    secret_share: &Zeroizing<C::F>,\n    planned_nonces: &[Vec<C::G>],\n  ) -> (Vec<Nonce<C>>, Commitments<C>) {\n    let mut nonces = vec![];\n    let mut commitments = vec![];\n\n    for generators in planned_nonces {\n      let (nonce, these_commitments): (Nonce<C>, _) =\n        NonceCommitments::new(&mut *rng, secret_share, generators);\n\n      nonces.push(nonce);\n      commitments.push(these_commitments);\n    }\n\n    (nonces, Commitments { nonces: commitments })\n  }\n\n  pub(crate) fn transcript<T: Transcript>(&self, t: &mut T) {\n    t.domain_separate(b\"commitments\");\n    for nonce in &self.nonces {\n      nonce.transcript(t);\n    }\n  }\n\n  pub(crate) fn read<R: Read>(reader: &mut R, generators: &[Vec<C::G>]) -> io::Result<Self> {\n    let nonces = (0 .. generators.len())\n      .map(|i| NonceCommitments::read(reader, &generators[i]))\n      .collect::<Result<Vec<NonceCommitments<C>>, _>>()?;\n\n    Ok(Commitments { nonces })\n  }\n\n  pub(crate) fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {\n    for nonce in &self.nonces {\n      nonce.write(writer)?;\n    }\n    Ok(())\n  }\n}\n\npub(crate) struct IndividualBinding<C: Curve> {\n  commitments: Commitments<C>,\n  binding_factors: Option<Vec<C::F>>,\n}\n\npub(crate) struct BindingFactor<C: Curve>(pub(crate) HashMap<Participant, IndividualBinding<C>>);\n\nimpl<C: Curve> BindingFactor<C> {\n  pub(crate) fn insert(&mut self, i: Participant, commitments: Commitments<C>) {\n    self.0.insert(i, IndividualBinding { commitments, binding_factors: None });\n  }\n\n  pub(crate) fn calculate_binding_factors<T: Clone + Transcript>(&mut self, transcript: &T) {\n    for (l, binding) in &mut self.0 {\n      let mut transcript = transcript.clone();\n      transcript.append_message(b\"participant\", C::F::from(u64::from(u16::from(*l))).to_repr());\n      // It *should* be perfectly fine to reuse a binding factor for multiple nonces\n      // This generates a binding factor per nonce just to ensure it never comes up as a question\n      binding.binding_factors = Some(\n        (0 .. binding.commitments.nonces.len())\n          .map(|_| C::hash_binding_factor(transcript.challenge(b\"rho\").as_ref()))\n          .collect(),\n      );\n    }\n  }\n\n  pub(crate) fn binding_factors(&self, i: Participant) -> &[C::F] {\n    self.0[&i].binding_factors.as_ref().unwrap()\n  }\n\n  // Get the bound nonces for a specific party\n  pub(crate) fn bound(&self, l: Participant) -> Vec<Vec<C::G>> {\n    let mut res = vec![];\n    for (i, (nonce, rho)) in\n      self.0[&l].commitments.nonces.iter().zip(self.binding_factors(l).iter()).enumerate()\n    {\n      res.push(vec![]);\n      for generator in &nonce.generators {\n        res[i].push(generator.0[0] + (generator.0[1] * rho));\n      }\n    }\n    res\n  }\n\n  // Get the nonces for this signing session\n  pub(crate) fn nonces(&self, planned_nonces: &[Vec<C::G>]) -> Vec<Vec<C::G>> {\n    let mut nonces = Vec::with_capacity(planned_nonces.len());\n    for n in 0 .. planned_nonces.len() {\n      nonces.push(Vec::with_capacity(planned_nonces[n].len()));\n      for g in 0 .. planned_nonces[n].len() {\n        #[allow(non_snake_case)]\n        let mut D = C::G::identity();\n        let mut statements = Vec::with_capacity(self.0.len());\n        #[allow(non_snake_case)]\n        for IndividualBinding { commitments, binding_factors } in self.0.values() {\n          D += commitments.nonces[n].generators[g].0[0];\n          statements\n            .push((binding_factors.as_ref().unwrap()[n], commitments.nonces[n].generators[g].0[1]));\n        }\n        nonces[n].push(D + multiexp_vartime(&statements));\n      }\n    }\n    nonces\n  }\n}\n"
  },
  {
    "path": "crypto/frost/src/sign.rs",
    "content": "use core::{ops::Deref, fmt::Debug};\nuse std::{\n  io::{self, Read, Write},\n  collections::HashMap,\n};\n\nuse rand_core::{RngCore, CryptoRng, SeedableRng};\nuse rand_chacha::ChaCha20Rng;\n\nuse zeroize::{Zeroize, Zeroizing};\n\nuse transcript::Transcript;\n\nuse ciphersuite::group::{\n  ff::{Field, PrimeField},\n  GroupEncoding,\n};\nuse multiexp::BatchVerifier;\n\nuse crate::{\n  curve::Curve,\n  Participant, FrostError, ThresholdParams, ThresholdKeys, ThresholdView,\n  algorithm::{WriteAddendum, Addendum, Algorithm},\n  validate_map,\n};\n\npub(crate) use crate::nonce::*;\n\n/// Trait enabling writing preprocesses and signature shares.\npub trait Writable {\n  fn write<W: Write>(&self, writer: &mut W) -> io::Result<()>;\n\n  fn serialize(&self) -> Vec<u8> {\n    let mut buf = vec![];\n    self.write(&mut buf).unwrap();\n    buf\n  }\n}\n\nimpl<T: Writable> Writable for Vec<T> {\n  fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {\n    for w in self {\n      w.write(writer)?;\n    }\n    Ok(())\n  }\n}\n\n// Pairing of an Algorithm with a ThresholdKeys instance.\n#[derive(Zeroize)]\nstruct Params<C: Curve, A: Algorithm<C>> {\n  // Skips the algorithm due to being too large a bound to feasibly enforce on users\n  #[zeroize(skip)]\n  algorithm: A,\n  keys: ThresholdKeys<C>,\n}\n\nimpl<C: Curve, A: Algorithm<C>> Params<C, A> {\n  fn new(algorithm: A, keys: ThresholdKeys<C>) -> Params<C, A> {\n    Params { algorithm, keys }\n  }\n\n  fn multisig_params(&self) -> ThresholdParams {\n    self.keys.params()\n  }\n}\n\n/// Preprocess for an instance of the FROST signing protocol.\n#[derive(Clone, PartialEq, Eq)]\npub struct Preprocess<C: Curve, A: Addendum> {\n  pub(crate) commitments: Commitments<C>,\n  /// The addendum used by the algorithm.\n  pub addendum: A,\n}\n\nimpl<C: Curve, A: Addendum> Writable for Preprocess<C, A> {\n  fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {\n    self.commitments.write(writer)?;\n    self.addendum.write(writer)\n  }\n}\n\n/// A cached preprocess.\n///\n/// A preprocess MUST only be used once. Reuse will enable third-party recovery of your private\n/// key share. Additionally, this MUST be handled with the same security as your private key share,\n/// as knowledge of it also enables recovery.\n// Directly exposes the [u8; 32] member to void needing to route through std::io interfaces.\n// Still uses Zeroizing internally so when users grab it, they have a higher likelihood of\n// appreciating how to handle it and don't immediately start copying it just by grabbing it.\n#[derive(Zeroize)]\npub struct CachedPreprocess(pub Zeroizing<[u8; 32]>);\n\n/// Trait for the initial state machine of a two-round signing protocol.\npub trait PreprocessMachine: Send {\n  /// Preprocess message for this machine.\n  type Preprocess: Clone + PartialEq + Writable;\n  /// Signature produced by this machine.\n  type Signature: Clone + PartialEq + Debug;\n  /// SignMachine this PreprocessMachine turns into.\n  type SignMachine: SignMachine<Self::Signature, Preprocess = Self::Preprocess>;\n\n  /// Perform the preprocessing round required in order to sign.\n  /// Returns a preprocess message to be broadcast to all participants, over an authenticated\n  /// channel.\n  fn preprocess<R: RngCore + CryptoRng>(self, rng: &mut R)\n    -> (Self::SignMachine, Self::Preprocess);\n}\n\n/// State machine which manages signing for an arbitrary signature algorithm.\npub struct AlgorithmMachine<C: Curve, A: Algorithm<C>> {\n  params: Params<C, A>,\n}\n\nimpl<C: Curve, A: Algorithm<C>> AlgorithmMachine<C, A> {\n  /// Creates a new machine to generate a signature with the specified keys.\n  pub fn new(algorithm: A, keys: ThresholdKeys<C>) -> AlgorithmMachine<C, A> {\n    AlgorithmMachine { params: Params::new(algorithm, keys) }\n  }\n\n  fn seeded_preprocess(\n    self,\n    seed: CachedPreprocess,\n  ) -> (AlgorithmSignMachine<C, A>, Preprocess<C, A::Addendum>) {\n    let mut params = self.params;\n\n    let mut rng = ChaCha20Rng::from_seed(*seed.0);\n    let (nonces, commitments) = Commitments::new::<_>(\n      &mut rng,\n      params.keys.original_secret_share(),\n      &params.algorithm.nonces(),\n    );\n    let addendum = params.algorithm.preprocess_addendum(&mut rng, &params.keys);\n\n    let preprocess = Preprocess { commitments, addendum };\n\n    // Also obtain entropy to randomly sort the included participants if we need to identify blame\n    let mut blame_entropy = [0; 32];\n    rng.fill_bytes(&mut blame_entropy);\n    (\n      AlgorithmSignMachine { params, seed, nonces, preprocess: preprocess.clone(), blame_entropy },\n      preprocess,\n    )\n  }\n\n  #[cfg(any(test, feature = \"tests\"))]\n  pub(crate) fn unsafe_override_preprocess(\n    self,\n    nonces: Vec<Nonce<C>>,\n    preprocess: Preprocess<C, A::Addendum>,\n  ) -> AlgorithmSignMachine<C, A> {\n    AlgorithmSignMachine {\n      params: self.params,\n      seed: CachedPreprocess(Zeroizing::new([0; 32])),\n\n      nonces,\n      preprocess,\n      // Uses 0s since this is just used to protect against a malicious participant from\n      // deliberately increasing the amount of time needed to identify them (and is accordingly\n      // not necessary to function)\n      blame_entropy: [0; 32],\n    }\n  }\n}\n\nimpl<C: Curve, A: Algorithm<C>> PreprocessMachine for AlgorithmMachine<C, A> {\n  type Preprocess = Preprocess<C, A::Addendum>;\n  type Signature = A::Signature;\n  type SignMachine = AlgorithmSignMachine<C, A>;\n\n  fn preprocess<R: RngCore + CryptoRng>(\n    self,\n    rng: &mut R,\n  ) -> (Self::SignMachine, Preprocess<C, A::Addendum>) {\n    let mut seed = CachedPreprocess(Zeroizing::new([0; 32]));\n    rng.fill_bytes(seed.0.as_mut());\n    self.seeded_preprocess(seed)\n  }\n}\n\n/// Share of a signature produced via FROST.\n#[derive(Clone, PartialEq, Eq)]\npub struct SignatureShare<C: Curve>(C::F);\nimpl<C: Curve> Writable for SignatureShare<C> {\n  fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {\n    writer.write_all(self.0.to_repr().as_ref())\n  }\n}\n#[cfg(any(test, feature = \"tests\"))]\nimpl<C: Curve> SignatureShare<C> {\n  pub(crate) fn invalidate(&mut self) {\n    self.0 += C::F::ONE;\n  }\n}\n\n/// Trait for the second machine of a two-round signing protocol.\npub trait SignMachine<S>: Send + Sync + Sized {\n  /// Params used to instantiate this machine which can be used to rebuild from a cache.\n  type Params;\n  /// Keys used for signing operations.\n  type Keys;\n  /// Preprocess message for this machine.\n  type Preprocess: Clone + PartialEq + Writable;\n  /// SignatureShare message for this machine.\n  type SignatureShare: Clone + PartialEq + Writable;\n  /// SignatureMachine this SignMachine turns into.\n  type SignatureMachine: SignatureMachine<S, SignatureShare = Self::SignatureShare>;\n\n  /// Cache this preprocess for usage later.\n  ///\n  /// This cached preprocess MUST only be used once. Reuse of it enables recovery of your private\n  /// key share. Third-party recovery of a cached preprocess also enables recovery of your private\n  /// key share, so this MUST be treated with the same security as your private key share.\n  fn cache(self) -> CachedPreprocess;\n\n  /// Create a sign machine from a cached preprocess.\n  ///\n  /// After this, the preprocess must be deleted so it's never reused. Any reuse will presumably\n  /// cause the signer to leak their secret share.\n  fn from_cache(\n    params: Self::Params,\n    keys: Self::Keys,\n    cache: CachedPreprocess,\n  ) -> (Self, Self::Preprocess);\n\n  /// Read a Preprocess message.\n  ///\n  /// Despite taking self, this does not save the preprocess. It must be externally cached and\n  /// passed into sign.\n  fn read_preprocess<R: Read>(&self, reader: &mut R) -> io::Result<Self::Preprocess>;\n\n  /// Sign a message.\n  ///\n  /// Takes in the participants' preprocess messages. Returns the signature share to be broadcast\n  /// to all participants, over an authenticated channel. The parties who participate here will\n  /// become the signing set for this session.\n  fn sign(\n    self,\n    commitments: HashMap<Participant, Self::Preprocess>,\n    msg: &[u8],\n  ) -> Result<(Self::SignatureMachine, Self::SignatureShare), FrostError>;\n}\n\n/// Next step of the state machine for the signing process.\n#[derive(Zeroize)]\npub struct AlgorithmSignMachine<C: Curve, A: Algorithm<C>> {\n  params: Params<C, A>,\n  seed: CachedPreprocess,\n\n  pub(crate) nonces: Vec<Nonce<C>>,\n  // Skips the preprocess due to being too large a bound to feasibly enforce on users\n  #[zeroize(skip)]\n  pub(crate) preprocess: Preprocess<C, A::Addendum>,\n  pub(crate) blame_entropy: [u8; 32],\n}\n\nimpl<C: Curve, A: Algorithm<C>> SignMachine<A::Signature> for AlgorithmSignMachine<C, A> {\n  type Params = A;\n  type Keys = ThresholdKeys<C>;\n  type Preprocess = Preprocess<C, A::Addendum>;\n  type SignatureShare = SignatureShare<C>;\n  type SignatureMachine = AlgorithmSignatureMachine<C, A>;\n\n  fn cache(self) -> CachedPreprocess {\n    self.seed\n  }\n\n  fn from_cache(\n    algorithm: A,\n    keys: ThresholdKeys<C>,\n    cache: CachedPreprocess,\n  ) -> (Self, Self::Preprocess) {\n    AlgorithmMachine::new(algorithm, keys).seeded_preprocess(cache)\n  }\n\n  fn read_preprocess<R: Read>(&self, reader: &mut R) -> io::Result<Self::Preprocess> {\n    Ok(Preprocess {\n      commitments: Commitments::read::<_>(reader, &self.params.algorithm.nonces())?,\n      addendum: self.params.algorithm.read_addendum(reader)?,\n    })\n  }\n\n  fn sign(\n    mut self,\n    mut preprocesses: HashMap<Participant, Preprocess<C, A::Addendum>>,\n    msg: &[u8],\n  ) -> Result<(Self::SignatureMachine, SignatureShare<C>), FrostError> {\n    let multisig_params = self.params.multisig_params();\n\n    let mut included = Vec::with_capacity(preprocesses.len() + 1);\n    included.push(multisig_params.i());\n    for l in preprocesses.keys() {\n      included.push(*l);\n    }\n    included.sort_unstable();\n\n    // Included < threshold\n    if included.len() < usize::from(multisig_params.t()) {\n      Err(FrostError::InvalidSigningSet(\"not enough signers\"))?;\n    }\n    // OOB index\n    if u16::from(included[included.len() - 1]) > multisig_params.n() {\n      Err(FrostError::InvalidParticipant(multisig_params.n(), included[included.len() - 1]))?;\n    }\n    // Same signer included multiple times\n    for i in 0 .. (included.len() - 1) {\n      if included[i] == included[i + 1] {\n        Err(FrostError::DuplicatedParticipant(included[i]))?;\n      }\n    }\n\n    let view = self.params.keys.view(included.clone()).unwrap();\n    validate_map(&preprocesses, &included, multisig_params.i())?;\n\n    {\n      // Domain separate FROST\n      self.params.algorithm.transcript().domain_separate(b\"FROST\");\n    }\n\n    let nonces = self.params.algorithm.nonces();\n    #[allow(non_snake_case)]\n    let mut B = BindingFactor(HashMap::<Participant, _>::with_capacity(included.len()));\n    {\n      // Parse the preprocesses\n      for l in &included {\n        {\n          self\n            .params\n            .algorithm\n            .transcript()\n            .append_message(b\"participant\", C::F::from(u64::from(u16::from(*l))).to_repr());\n        }\n\n        if *l == self.params.keys.params().i() {\n          let commitments = self.preprocess.commitments.clone();\n          commitments.transcript(self.params.algorithm.transcript());\n\n          let addendum = self.preprocess.addendum.clone();\n          {\n            let mut buf = vec![];\n            addendum.write(&mut buf).unwrap();\n            self.params.algorithm.transcript().append_message(b\"addendum\", buf);\n          }\n\n          B.insert(*l, commitments);\n          self.params.algorithm.process_addendum(&view, *l, addendum)?;\n        } else {\n          let preprocess = preprocesses.remove(l).unwrap();\n          preprocess.commitments.transcript(self.params.algorithm.transcript());\n          {\n            let mut buf = vec![];\n            preprocess.addendum.write(&mut buf).unwrap();\n            self.params.algorithm.transcript().append_message(b\"addendum\", buf);\n          }\n\n          B.insert(*l, preprocess.commitments);\n          self.params.algorithm.process_addendum(&view, *l, preprocess.addendum)?;\n        }\n      }\n\n      // Re-format into the FROST-expected rho transcript\n      let mut rho_transcript = A::Transcript::new(b\"FROST_rho\");\n      rho_transcript.append_message(b\"group_key\", self.params.keys.group_key().to_bytes());\n      rho_transcript.append_message(b\"message\", C::hash_msg(msg));\n      rho_transcript.append_message(\n        b\"preprocesses\",\n        C::hash_commitments(self.params.algorithm.transcript().challenge(b\"preprocesses\").as_ref()),\n      );\n\n      // Generate the per-signer binding factors\n      B.calculate_binding_factors(&rho_transcript);\n\n      // Merge the rho transcript back into the global one to ensure its advanced, while\n      // simultaneously committing to everything\n      self\n        .params\n        .algorithm\n        .transcript()\n        .append_message(b\"rho_transcript\", rho_transcript.challenge(b\"merge\"));\n    }\n\n    #[allow(non_snake_case)]\n    let Rs = B.nonces(&nonces);\n\n    let our_binding_factors = B.binding_factors(multisig_params.i());\n    let nonces = self\n      .nonces\n      .drain(..)\n      .enumerate()\n      .map(|(n, nonces)| {\n        let [base, mut actual] = nonces.0;\n        *actual *= our_binding_factors[n];\n        *actual += base.deref();\n        actual\n      })\n      .collect::<Vec<_>>();\n\n    let share = self.params.algorithm.sign_share(&view, &Rs, nonces, msg);\n\n    Ok((\n      AlgorithmSignatureMachine {\n        params: self.params,\n        view,\n        B,\n        Rs,\n        share,\n        blame_entropy: self.blame_entropy,\n      },\n      SignatureShare(share),\n    ))\n  }\n}\n\n/// Trait for the final machine of a two-round signing protocol.\npub trait SignatureMachine<S>: Send + Sync {\n  /// SignatureShare message for this machine.\n  type SignatureShare: Clone + PartialEq + Writable;\n\n  /// Read a Signature Share message.\n  fn read_share<R: Read>(&self, reader: &mut R) -> io::Result<Self::SignatureShare>;\n\n  /// Complete signing.\n  /// Takes in everyone elses' shares. Returns the signature.\n  fn complete(self, shares: HashMap<Participant, Self::SignatureShare>) -> Result<S, FrostError>;\n}\n\n/// Final step of the state machine for the signing process.\n///\n/// This may panic if an invalid algorithm is provided.\n#[allow(non_snake_case)]\npub struct AlgorithmSignatureMachine<C: Curve, A: Algorithm<C>> {\n  params: Params<C, A>,\n  view: ThresholdView<C>,\n  B: BindingFactor<C>,\n  Rs: Vec<Vec<C::G>>,\n  share: C::F,\n  blame_entropy: [u8; 32],\n}\n\nimpl<C: Curve, A: Algorithm<C>> SignatureMachine<A::Signature> for AlgorithmSignatureMachine<C, A> {\n  type SignatureShare = SignatureShare<C>;\n\n  fn read_share<R: Read>(&self, reader: &mut R) -> io::Result<SignatureShare<C>> {\n    Ok(SignatureShare(C::read_F(reader)?))\n  }\n\n  fn complete(\n    self,\n    mut shares: HashMap<Participant, SignatureShare<C>>,\n  ) -> Result<A::Signature, FrostError> {\n    let params = self.params.multisig_params();\n    validate_map(&shares, self.view.included(), params.i())?;\n\n    let mut responses = HashMap::new();\n    responses.insert(params.i(), self.share);\n    let mut sum = self.share;\n    for (l, share) in shares.drain() {\n      responses.insert(l, share.0);\n      sum += share.0;\n    }\n\n    // Perform signature validation instead of individual share validation\n    // For the success route, which should be much more frequent, this should be faster\n    // It also acts as an integrity check of this library's signing function\n    if let Some(sig) = self.params.algorithm.verify(self.view.group_key(), &self.Rs, sum) {\n      return Ok(sig);\n    }\n\n    // We could remove blame_entropy by taking in an RNG here\n    // Considering we don't need any RNG for a valid signature, and we only use the RNG here for\n    // performance reasons, it doesn't feel worthwhile to include as an argument to every\n    // implementor of the trait\n    let mut rng = ChaCha20Rng::from_seed(self.blame_entropy);\n    let mut batch = BatchVerifier::new(self.view.included().len());\n    for l in self.view.included() {\n      if let Ok(statements) = self.params.algorithm.verify_share(\n        self.view.verification_share(*l),\n        &self.B.bound(*l),\n        responses[l],\n      ) {\n        batch.queue(&mut rng, *l, statements);\n      } else {\n        Err(FrostError::InvalidShare(*l))?;\n      }\n    }\n\n    if let Err(l) = batch.verify_vartime_with_vartime_blame() {\n      Err(FrostError::InvalidShare(l))?;\n    }\n\n    // If everyone has a valid share, and there were enough participants, this should've worked\n    // The only known way to cause this, for valid parameters/algorithms, is to deserialize a\n    // semantically invalid FrostKeys\n    Err(FrostError::InternalError(\"everyone had a valid share yet the signature was still invalid\"))\n  }\n}\n"
  },
  {
    "path": "crypto/frost/src/tests/literal/dalek.rs",
    "content": "use rand_core::OsRng;\n\nuse crate::{\n  curve,\n  tests::vectors::{Vectors, test_with_vectors},\n};\n\n#[cfg(feature = \"ristretto\")]\n#[test]\nfn ristretto_vectors() {\n  test_with_vectors::<_, curve::Ristretto, curve::IetfRistrettoHram>(\n    &mut OsRng,\n    &Vectors::from(\n      serde_json::from_str::<serde_json::Value>(include_str!(\n        \"vectors/frost-ristretto255-sha512.json\"\n      ))\n      .unwrap(),\n    ),\n  );\n}\n\n#[cfg(feature = \"ed25519\")]\n#[test]\nfn ed25519_vectors() {\n  test_with_vectors::<_, curve::Ed25519, curve::IetfEd25519Hram>(\n    &mut OsRng,\n    &Vectors::from(\n      serde_json::from_str::<serde_json::Value>(include_str!(\"vectors/frost-ed25519-sha512.json\"))\n        .unwrap(),\n    ),\n  );\n}\n"
  },
  {
    "path": "crypto/frost/src/tests/literal/ed448.rs",
    "content": "use rand_core::OsRng;\n\nuse ciphersuite::Ciphersuite;\n\nuse schnorr::SchnorrSignature;\n\nuse crate::{\n  curve::{Ed448, Ietf8032Ed448Hram, IetfEd448Hram},\n  tests::vectors::{Vectors, test_with_vectors},\n};\n\n// This is a vector from RFC 8032 to sanity check the HRAM is properly implemented\n// The RFC 8032 Ed448 HRAM is much more complex than the other HRAMs, hence why it's helpful to\n// have additional testing for it\n// Additionally, FROST, despite being supposed to use the RFC 8032 HRAMs, originally applied\n// Ed25519's HRAM to both Ed25519 and Ed448\n// This test was useful when proposing the corrections to the spec to demonstrate the correctness\n// the new algorithm/vectors\n// While we could test all Ed448 vectors here, this is sufficient for sanity\n#[test]\nfn ed448_8032_vector() {\n  let context = hex::decode(\"666f6f\").unwrap();\n\n  #[allow(non_snake_case)]\n  let A = Ed448::read_G::<&[u8]>(\n    &mut hex::decode(\n      \"43ba28f430cdff456ae531545f7ecd0ac834a55d9358c0372bfa0c6c\".to_owned() +\n        \"6798c0866aea01eb00742802b8438ea4cb82169c235160627b4c3a94\" +\n        \"80\",\n    )\n    .unwrap()\n    .as_ref(),\n  )\n  .unwrap();\n\n  let msg = hex::decode(\"03\").unwrap();\n\n  let sig = hex::decode(\n    \"d4f8f6131770dd46f40867d6fd5d5055de43541f8c5e35abbcd001b3\".to_owned() +\n      \"2a89f7d2151f7647f11d8ca2ae279fb842d607217fce6e042f6815ea\" +\n      \"00\" +\n      \"0c85741de5c8da1144a6a1aba7f96de42505d7a7298524fda538fccb\" +\n      \"bb754f578c1cad10d54d0d5428407e85dcbc98a49155c13764e66c3c\" +\n      \"00\",\n  )\n  .unwrap();\n  #[allow(non_snake_case)]\n  let R = Ed448::read_G::<&[u8]>(&mut sig.as_ref()).unwrap();\n  let s = Ed448::read_F::<&[u8]>(&mut &sig[57 ..]).unwrap();\n\n  assert!(\n    SchnorrSignature::<Ed448> { R, s }.verify(A, Ietf8032Ed448Hram::hram(&context, &R, &A, &msg))\n  );\n}\n\n#[test]\nfn ed448_vectors() {\n  test_with_vectors::<_, Ed448, IetfEd448Hram>(\n    &mut OsRng,\n    &Vectors::from(\n      serde_json::from_str::<serde_json::Value>(include_str!(\"vectors/frost-ed448-shake256.json\"))\n        .unwrap(),\n    ),\n  );\n}\n"
  },
  {
    "path": "crypto/frost/src/tests/literal/kp256.rs",
    "content": "use rand_core::OsRng;\n\nuse crate::tests::vectors::{Vectors, test_with_vectors};\n\n#[cfg(feature = \"secp256k1\")]\nuse crate::curve::{Secp256k1, IetfSecp256k1Hram};\n\n#[cfg(feature = \"p256\")]\nuse crate::curve::{P256, IetfP256Hram};\n\n#[cfg(feature = \"secp256k1\")]\n#[test]\nfn secp256k1_vectors() {\n  test_with_vectors::<_, Secp256k1, IetfSecp256k1Hram>(\n    &mut OsRng,\n    &Vectors::from(\n      serde_json::from_str::<serde_json::Value>(include_str!(\n        \"vectors/frost-secp256k1-sha256.json\"\n      ))\n      .unwrap(),\n    ),\n  );\n}\n\n#[cfg(feature = \"p256\")]\n#[test]\nfn p256_vectors() {\n  test_with_vectors::<_, P256, IetfP256Hram>(\n    &mut OsRng,\n    &Vectors::from(\n      serde_json::from_str::<serde_json::Value>(include_str!(\"vectors/frost-p256-sha256.json\"))\n        .unwrap(),\n    ),\n  );\n}\n"
  },
  {
    "path": "crypto/frost/src/tests/literal/mod.rs",
    "content": "#[cfg(any(feature = \"ristretto\", feature = \"ed25519\"))]\nmod dalek;\n#[cfg(any(feature = \"secp256k1\", feature = \"p256\"))]\nmod kp256;\n#[cfg(feature = \"ed448\")]\nmod ed448;\n"
  },
  {
    "path": "crypto/frost/src/tests/literal/vectors/frost-ed25519-sha512.json",
    "content": "{\n  \"config\": {\n    \"MAX_PARTICIPANTS\": \"3\",\n    \"NUM_PARTICIPANTS\": \"2\",\n    \"MIN_PARTICIPANTS\": \"2\",\n    \"name\": \"FROST(Ed25519, SHA-512)\",\n    \"group\": \"ed25519\",\n    \"hash\": \"SHA-512\"\n  },\n  \"inputs\": {\n    \"participant_list\": [\n      1,\n      3\n    ],\n    \"group_secret_key\": \"7b1c33d3f5291d85de664833beb1ad469f7fb6025a0ec78b3a790c6e13a98304\",\n    \"group_public_key\": \"15d21ccd7ee42959562fc8aa63224c8851fb3ec85a3faf66040d380fb9738673\",\n    \"message\": \"74657374\",\n    \"share_polynomial_coefficients\": [\n      \"178199860edd8c62f5212ee91eff1295d0d670ab4ed4506866bae57e7030b204\"\n    ],\n    \"participant_shares\": [\n      {\n        \"identifier\": 1,\n        \"participant_share\": \"929dcc590407aae7d388761cddb0c0db6f5627aea8e217f4a033f2ec83d93509\"\n      },\n      {\n        \"identifier\": 2,\n        \"participant_share\": \"a91e66e012e4364ac9aaa405fcafd370402d9859f7b6685c07eed76bf409e80d\"\n      },\n      {\n        \"identifier\": 3,\n        \"participant_share\": \"d3cb090a075eb154e82fdb4b3cb507f110040905468bb9c46da8bdea643a9a02\"\n      }\n    ]\n  },\n  \"round_one_outputs\": {\n    \"outputs\": [\n      {\n        \"identifier\": 1,\n        \"hiding_nonce_randomness\": \"0fd2e39e111cdc266f6c0f4d0fd45c947761f1f5d3cb583dfcb9bbaf8d4c9fec\",\n        \"binding_nonce_randomness\": \"69cd85f631d5f7f2721ed5e40519b1366f340a87c2f6856363dbdcda348a7501\",\n        \"hiding_nonce\": \"812d6104142944d5a55924de6d49940956206909f2acaeedecda2b726e630407\",\n        \"binding_nonce\": \"b1110165fc2334149750b28dd813a39244f315cff14d4e89e6142f262ed83301\",\n        \"hiding_nonce_commitment\": \"b5aa8ab305882a6fc69cbee9327e5a45e54c08af61ae77cb8207be3d2ce13de3\",\n        \"binding_nonce_commitment\": \"67e98ab55aa310c3120418e5050c9cf76cf387cb20ac9e4b6fdb6f82a469f932\",\n        \"binding_factor_input\": \"15d21ccd7ee42959562fc8aa63224c8851fb3ec85a3faf66040d380fb9738673504df914fa965023fb75c25ded4bb260f417de6d32e5c442c6ba313791cc9a4948d6273e8d3511f93348ea7a708a9b862bc73ba2a79cfdfe07729a193751cbc973af46d8ac3440e518d4ce440a0e7d4ad5f62ca8940f32de6d8dc00fc12c660b817d587d82f856d277ce6473cae6d2f5763f7da2e8b4d799a3f3e725d4522ec70100000000000000000000000000000000000000000000000000000000000000\",\n        \"binding_factor\": \"f2cb9d7dd9beff688da6fcc83fa89046b3479417f47f55600b106760eb3b5603\"\n      },\n      {\n        \"identifier\": 3,\n        \"hiding_nonce_randomness\": \"86d64a260059e495d0fb4fcc17ea3da7452391baa494d4b00321098ed2a0062f\",\n        \"binding_nonce_randomness\": \"13e6b25afb2eba51716a9a7d44130c0dbae0004a9ef8d7b5550c8a0e07c61775\",\n        \"hiding_nonce\": \"c256de65476204095ebdc01bd11dc10e57b36bc96284595b8215222374f99c0e\",\n        \"binding_nonce\": \"243d71944d929063bc51205714ae3c2218bd3451d0214dfb5aeec2a90c35180d\",\n        \"hiding_nonce_commitment\": \"cfbdb165bd8aad6eb79deb8d287bcc0ab6658ae57fdcc98ed12c0669e90aec91\",\n        \"binding_nonce_commitment\": \"7487bc41a6e712eea2f2af24681b58b1cf1da278ea11fe4e8b78398965f13552\",\n        \"binding_factor_input\": \"15d21ccd7ee42959562fc8aa63224c8851fb3ec85a3faf66040d380fb9738673504df914fa965023fb75c25ded4bb260f417de6d32e5c442c6ba313791cc9a4948d6273e8d3511f93348ea7a708a9b862bc73ba2a79cfdfe07729a193751cbc973af46d8ac3440e518d4ce440a0e7d4ad5f62ca8940f32de6d8dc00fc12c660b817d587d82f856d277ce6473cae6d2f5763f7da2e8b4d799a3f3e725d4522ec70300000000000000000000000000000000000000000000000000000000000000\",\n        \"binding_factor\": \"b087686bf35a13f3dc78e780a34b0fe8a77fef1b9938c563f5573d71d8d7890f\"\n      }\n    ]\n  },\n  \"round_two_outputs\": {\n    \"outputs\": [\n      {\n        \"identifier\": 1,\n        \"sig_share\": \"001719ab5a53ee1a12095cd088fd149702c0720ce5fd2f29dbecf24b7281b603\"\n      },\n      {\n        \"identifier\": 3,\n        \"sig_share\": \"bd86125de990acc5e1f13781d8e32c03a9bbd4c53539bbc106058bfd14326007\"\n      }\n    ]\n  },\n  \"final_output\": {\n    \"sig\": \"36282629c383bb820a88b71cae937d41f2f2adfcc3d02e55507e2fb9e2dd3cbebd9d2b0844e49ae0f3fa935161e1419aab7b47d21a37ebeae1f17d4987b3160b\"\n  }\n}"
  },
  {
    "path": "crypto/frost/src/tests/literal/vectors/frost-ed448-shake256.json",
    "content": "{\n  \"config\": {\n    \"MAX_PARTICIPANTS\": \"3\",\n    \"NUM_PARTICIPANTS\": \"2\",\n    \"MIN_PARTICIPANTS\": \"2\",\n    \"name\": \"FROST(Ed448, SHAKE256)\",\n    \"group\": \"ed448\",\n    \"hash\": \"SHAKE256\"\n  },\n  \"inputs\": {\n    \"participant_list\": [\n      1,\n      3\n    ],\n    \"group_secret_key\": \"6298e1eef3c379392caaed061ed8a31033c9e9e3420726f23b404158a401cd9df24632adfe6b418dc942d8a091817dd8bd70e1c72ba52f3c00\",\n    \"group_public_key\": \"3832f82fda00ff5365b0376df705675b63d2a93c24c6e81d40801ba265632be10f443f95968fadb70d10786827f30dc001c8d0f9b7c1d1b000\",\n    \"message\": \"74657374\",\n    \"share_polynomial_coefficients\": [\n      \"dbd7a514f7a731976620f0436bd135fe8dddc3fadd6e0d13dbd58a1981e587d377d48e0b7ce4e0092967c5e85884d0275a7a740b6abdcd0500\"\n    ],\n    \"participant_shares\": [\n      {\n        \"identifier\": 1,\n        \"participant_share\": \"4a2b2f5858a932ad3d3b18bd16e76ced3070d72fd79ae4402df201f525e754716a1bc1b87a502297f2a99d89ea054e0018eb55d39562fd0100\"\n      },\n      {\n        \"identifier\": 2,\n        \"participant_share\": \"2503d56c4f516444a45b080182b8a2ebbe4d9b2ab509f25308c88c0ea7ccdc44e2ef4fc4f63403a11b116372438a1e287265cadeff1fcb0700\"\n      },\n      {\n        \"identifier\": 3,\n        \"participant_share\": \"00db7a8146f995db0a7cf844ed89d8e94c2b5f259378ff66e39d172828b264185ac4decf7219e4aa4478285b9c0eef4fccdf3eea69dd980d00\"\n      }\n    ]\n  },\n  \"round_one_outputs\": {\n    \"outputs\": [\n      {\n        \"identifier\": 1,\n        \"hiding_nonce_randomness\": \"9cda90c98863ef3141b75f09375757286b4bc323dd61aeb45c07de45e4937bbd\",\n        \"binding_nonce_randomness\": \"781bf4881ffe1aa06f9341a747179f07a49745f8cd37d4696f226aa065683c0a\",\n        \"hiding_nonce\": \"f922beb51a5ac88d1e862278d89e12c05263b945147db04b9566acb2b5b0f7422ccea4f9286f4f80e6b646e72143eeaecc0e5988f8b2b93100\",\n        \"binding_nonce\": \"1890f16a120cdeac092df29955a29c7cf29c13f6f7be60e63d63f3824f2d37e9c3a002dfefc232972dc08658a8c37c3ec06a0c5dc146150500\",\n        \"hiding_nonce_commitment\": \"3518c2246c874569e54ab254cb1da666ca30f7879605cc43b4d2c47a521f8b5716080ab723d3a0cd04b7e41f3cc1d3031c94ccf3829b23fe80\",\n        \"binding_nonce_commitment\": \"11b3d5220c57d02057497de3c4eebab384900206592d877059b0a5f1d5250d002682f0e22dff096c46bb81b46d60fcfe7752ed47cea76c3900\",\n        \"binding_factor_input\": \"3832f82fda00ff5365b0376df705675b63d2a93c24c6e81d40801ba265632be10f443f95968fadb70d10786827f30dc001c8d0f9b7c1d1b000e9a0f30b97fe77ef751b08d4e252a3719ae9135e7f7926f7e3b7dd6656b27089ca354997fe5a633aa0946c89f022462e7e9d50fd6ef313f72d956ea4571089427daa1862f623a41625177d91e4a8f350ce9c8bd3bc7c766515dc1dd3a0eab93777526b616cccb148fe1e5992dc1ae705c8ba2f97ca8983328d41d375ed1e5fde5c9d672121c9e8f177f4a1a9b2575961531b33f054451363c8f27618382cd66ce14ad93b68dac6a09f5edcbccc813906b3fc50b8fef1cc09757b06646f38ceed1674cd6ced28a59c93851b325c6a9ef6a4b3b88860b7138ee246034561c7460db0b3fae5010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\",\n        \"binding_factor\": \"71966390dfdbed73cf9b79486f3b70e23b243e6c40638fb55998642a60109daecbfcb879eed9fe7dbbed8d9e47317715a5740f772173342e00\"\n      },\n      {\n        \"identifier\": 3,\n        \"hiding_nonce_randomness\": \"b3adf97ceea770e703ab295babf311d77e956a20d3452b4b3344aa89a828e6df\",\n        \"binding_nonce_randomness\": \"81dbe7742b0920930299197322b255734e52bbb91f50cfe8ce689f56fadbce31\",\n        \"hiding_nonce\": \"ccb5c1e82f23e0a4b966b824dbc7b0ef1cc5f56eeac2a4126e2b2143c5f3a4d890c52d27803abcf94927faf3fc405c0b2123a57a93cefa3b00\",\n        \"binding_nonce\": \"e089df9bf311cf711e2a24ea27af53e07b846d09692fe11035a1112f04d8b7462a62f34d8c01493a22b57a1cbf1f0a46c77d64d46449a90100\",\n        \"hiding_nonce_commitment\": \"1254546d7d104c04e4fbcf29e05747e2edd392f6787d05a6216f3713ef859efe573d180d291e48411e5e3006e9f90ee986ccc26b7a42490b80\",\n        \"binding_nonce_commitment\": \"3ef0cec20be15e56b3ddcb6f7b956fca0c8f71990f45316b537b4f64c5e8763e6629d7262ff7cd0235d0781f23be97bf8fa8817643ea19cd00\",\n        \"binding_factor_input\": \"3832f82fda00ff5365b0376df705675b63d2a93c24c6e81d40801ba265632be10f443f95968fadb70d10786827f30dc001c8d0f9b7c1d1b000e9a0f30b97fe77ef751b08d4e252a3719ae9135e7f7926f7e3b7dd6656b27089ca354997fe5a633aa0946c89f022462e7e9d50fd6ef313f72d956ea4571089427daa1862f623a41625177d91e4a8f350ce9c8bd3bc7c766515dc1dd3a0eab93777526b616cccb148fe1e5992dc1ae705c8ba2f97ca8983328d41d375ed1e5fde5c9d672121c9e8f177f4a1a9b2575961531b33f054451363c8f27618382cd66ce14ad93b68dac6a09f5edcbccc813906b3fc50b8fef1cc09757b06646f38ceed1674cd6ced28a59c93851b325c6a9ef6a4b3b88860b7138ee246034561c7460db0b3fae5030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\",\n        \"binding_factor\": \"236a6f7239ac2019334bad21323ec93bef2fead37bd55114356419f3fc1fb59f797f44079f28b1a64f51dd0a113f90f2c3a1c27d2faa4f1300\"\n      }\n    ]\n  },\n  \"round_two_outputs\": {\n    \"outputs\": [\n      {\n        \"identifier\": 1,\n        \"sig_share\": \"e1eb9bfbef792776b7103891032788406c070c5c315e3bf5d64acd46ea8855e85b53146150a09149665cbfec71626810b575e6f4dbe9ba3700\"\n      },\n      {\n        \"identifier\": 3,\n        \"sig_share\": \"815434eb0b9f9242d54b8baf2141fe28976cabe5f441ccfcd5ee7cdb4b52185b02b99e6de28e2ab086c7764068c5a01b5300986b9f084f3e00\"\n      }\n    ]\n  },\n  \"final_output\": {\n    \"sig\": \"cd642cba59c449dad8e896a78a60e8edfcbd9040df524370891ff8077d47ce721d683874483795f0d85efcbd642c4510614328605a19c6ed806ffb773b6956419537cdfdb2b2a51948733de192dcc4b82dc31580a536db6d435e0cb3ce322fbcf9ec23362dda27092c08767e607bf2093600\"\n  }\n}"
  },
  {
    "path": "crypto/frost/src/tests/literal/vectors/frost-p256-sha256.json",
    "content": "{\n  \"config\": {\n    \"MAX_PARTICIPANTS\": \"3\",\n    \"NUM_PARTICIPANTS\": \"2\",\n    \"MIN_PARTICIPANTS\": \"2\",\n    \"name\": \"FROST(P-256, SHA-256)\",\n    \"group\": \"P-256\",\n    \"hash\": \"SHA-256\"\n  },\n  \"inputs\": {\n    \"participant_list\": [\n      1,\n      3\n    ],\n    \"group_secret_key\": \"8ba9bba2e0fd8c4767154d35a0b7562244a4aaf6f36c8fb8735fa48b301bd8de\",\n    \"group_public_key\": \"023a309ad94e9fe8a7ba45dfc58f38bf091959d3c99cfbd02b4dc00585ec45ab70\",\n    \"message\": \"74657374\",\n    \"share_polynomial_coefficients\": [\n      \"80f25e6c0709353e46bfbe882a11bdbb1f8097e46340eb8673b7e14556e6c3a4\"\n    ],\n    \"participant_shares\": [\n      {\n        \"identifier\": 1,\n        \"participant_share\": \"0c9c1a0fe806c184add50bbdcac913dda73e482daf95dcb9f35dbb0d8a9f7731\"\n      },\n      {\n        \"identifier\": 2,\n        \"participant_share\": \"8d8e787bef0ff6c2f494ca45f4dad198c6bee01212d6c84067159c52e1863ad5\"\n      },\n      {\n        \"identifier\": 3,\n        \"participant_share\": \"0e80d6e8f6192c003b5488ce1eec8f5429587d48cf001541e713b2d53c09d928\"\n      }\n    ]\n  },\n  \"round_one_outputs\": {\n    \"outputs\": [\n      {\n        \"identifier\": 1,\n        \"hiding_nonce_randomness\": \"ec4c891c85fee802a9d757a67d1252e7f4e5efb8a538991ac18fbd0e06fb6fd3\",\n        \"binding_nonce_randomness\": \"9334e29d09061223f69a09421715a347e4e6deba77444c8f42b0c833f80f4ef9\",\n        \"hiding_nonce\": \"9f0542a5ba879a58f255c09f06da7102ef6a2dec6279700c656d58394d8facd4\",\n        \"binding_nonce\": \"6513dfe7429aa2fc972c69bb495b27118c45bbc6e654bb9dc9be55385b55c0d7\",\n        \"hiding_nonce_commitment\": \"0213b3e6298bf8ad46fd5e9389519a8665d63d98f4ec6a1fcca434e809d2d8070e\",\n        \"binding_nonce_commitment\": \"02188ff1390bf69374d7b272e454b1878ef10a6b6ea3ff36f114b300b4dbd5233b\",\n        \"binding_factor_input\": \"023a309ad94e9fe8a7ba45dfc58f38bf091959d3c99cfbd02b4dc00585ec45ab70825371853e974bc30ac5b947b216d70461919666584c70c51f9f56f117736c5d178dd0b521ad9c1abe98048419cbdec81504c85e12eb40e3bcb6ec73d3fc4afd0000000000000000000000000000000000000000000000000000000000000001\",\n        \"binding_factor\": \"7925f0d4693f204e6e59233e92227c7124664a99739d2c06b81cf64ddf90559e\"\n      },\n      {\n        \"identifier\": 3,\n        \"hiding_nonce_randomness\": \"c0451c5a0a5480d6c1f860e5db7d655233dca2669fd90ff048454b8ce983367b\",\n        \"binding_nonce_randomness\": \"2ba5f7793ae700e40e78937a82f407dd35e847e33d1e607b5c7eb6ed2a8ed799\",\n        \"hiding_nonce\": \"f73444a8972bcda9e506bbca3d2b1c083c10facdf4bb5d47fef7c2dc1d9f2a0d\",\n        \"binding_nonce\": \"44c6a29075d6e7e4f8b97796205f9e22062e7835141470afe9417fd317c1c303\",\n        \"hiding_nonce_commitment\": \"033ac9a5fe4a8b57316ba1c34e8a6de453033b750e8984924a984eb67a11e73a3f\",\n        \"binding_nonce_commitment\": \"03a7a2480ee16199262e648aea3acab628a53e9b8c1945078f2ddfbdc98b7df369\",\n        \"binding_factor_input\": \"023a309ad94e9fe8a7ba45dfc58f38bf091959d3c99cfbd02b4dc00585ec45ab70825371853e974bc30ac5b947b216d70461919666584c70c51f9f56f117736c5d178dd0b521ad9c1abe98048419cbdec81504c85e12eb40e3bcb6ec73d3fc4afd0000000000000000000000000000000000000000000000000000000000000003\",\n        \"binding_factor\": \"e10d24a8a403723bcb6f9bb4c537f316593683b472f7a89f166630dde11822c4\"\n      }\n    ]\n  },\n  \"round_two_outputs\": {\n    \"outputs\": [\n      {\n        \"identifier\": 1,\n        \"sig_share\": \"400308eaed7a2ddee02a265abe6a1cfe04d946ee8720768899619cfabe7a3aeb\"\n      },\n      {\n        \"identifier\": 3,\n        \"sig_share\": \"561da3c179edbb0502d941bb3e3ace3c37d122aaa46fb54499f15f3a3331de44\"\n      }\n    ]\n  },\n  \"final_output\": {\n    \"sig\": \"026d8d434874f87bdb7bc0dfd239b2c00639044f9dcb195e9a04426f70bfa4b70d9620acac6767e8e3e3036815fca4eb3a3caa69992b902bcd3352fc34f1ac192f\"\n  }\n}"
  },
  {
    "path": "crypto/frost/src/tests/literal/vectors/frost-ristretto255-sha512.json",
    "content": "{\n  \"config\": {\n    \"MAX_PARTICIPANTS\": \"3\",\n    \"NUM_PARTICIPANTS\": \"2\",\n    \"MIN_PARTICIPANTS\": \"2\",\n    \"name\": \"FROST(ristretto255, SHA-512)\",\n    \"group\": \"ristretto255\",\n    \"hash\": \"SHA-512\"\n  },\n  \"inputs\": {\n    \"participant_list\": [\n      1,\n      3\n    ],\n    \"group_secret_key\": \"1b25a55e463cfd15cf14a5d3acc3d15053f08da49c8afcf3ab265f2ebc4f970b\",\n    \"group_public_key\": \"e2a62f39eede11269e3bd5a7d97554f5ca384f9f6d3dd9c3c0d05083c7254f57\",\n    \"message\": \"74657374\",\n    \"share_polynomial_coefficients\": [\n      \"410f8b744b19325891d73736923525a4f596c805d060dfb9c98009d34e3fec02\"\n    ],\n    \"participant_shares\": [\n      {\n        \"identifier\": 1,\n        \"participant_share\": \"5c3430d391552f6e60ecdc093ff9f6f4488756aa6cebdbad75a768010b8f830e\"\n      },\n      {\n        \"identifier\": 2,\n        \"participant_share\": \"b06fc5eac20b4f6e1b271d9df2343d843e1e1fb03c4cbb673f2872d459ce6f01\"\n      },\n      {\n        \"identifier\": 3,\n        \"participant_share\": \"f17e505f0e2581c6acfe54d3846a622834b5e7b50cad9a2109a97ba7a80d5c04\"\n      }\n    ]\n  },\n  \"round_one_outputs\": {\n    \"outputs\": [\n      {\n        \"identifier\": 1,\n        \"hiding_nonce_randomness\": \"f595a133b4d95c6e1f79887220c8b275ce6277e7f68a6640e1e7140f9be2fb5c\",\n        \"binding_nonce_randomness\": \"34dd1001360e3513cb37bebfabe7be4a32c5bb91ba19fbd4360d039111f0fbdc\",\n        \"hiding_nonce\": \"214f2cabb86ed71427ea7ad4283b0fae26b6746c801ce824b83ceb2b99278c03\",\n        \"binding_nonce\": \"c9b8f5e16770d15603f744f8694c44e335e8faef00dad182b8d7a34a62552f0c\",\n        \"hiding_nonce_commitment\": \"965def4d0958398391fc06d8c2d72932608b1e6255226de4fb8d972dac15fd57\",\n        \"binding_nonce_commitment\": \"ec5170920660820007ae9e1d363936659ef622f99879898db86e5bf1d5bf2a14\",\n        \"binding_factor_input\": \"e2a62f39eede11269e3bd5a7d97554f5ca384f9f6d3dd9c3c0d05083c7254f572889dde2854e26377a16caf77dfee5f6be8fe5b4c80318da84698a4161021b033911db5ef8205362701bc9ecd983027814abee94f46d094943a2f4b79a6e4d4603e52c435d8344554942a0a472d8ad84320585b8da3ae5b9ce31cd1903f795c1af66de22af1a45f652cd05ee446b1b4091aaccc91e2471cd18a85a659cecd11f0100000000000000000000000000000000000000000000000000000000000000\",\n        \"binding_factor\": \"8967fd70fa06a58e5912603317fa94c77626395a695a0e4e4efc4476662eba0c\"\n      },\n      {\n        \"identifier\": 3,\n        \"hiding_nonce_randomness\": \"daa0cf42a32617786d390e0c7edfbf2efbd428037069357b5173ae61d6dd5d5e\",\n        \"binding_nonce_randomness\": \"b4387e72b2e4108ce4168931cc2c7fcce5f345a5297368952c18b5fc8473f050\",\n        \"hiding_nonce\": \"3f7927872b0f9051dd98dd73eb2b91494173bbe0feb65a3e7e58d3e2318fa40f\",\n        \"binding_nonce\": \"ffd79445fb8030f0a3ddd3861aa4b42b618759282bfe24f1f9304c7009728305\",\n        \"hiding_nonce_commitment\": \"480e06e3de182bf83489c45d7441879932fd7b434a26af41455756264fbd5d6e\",\n        \"binding_nonce_commitment\": \"3064746dfd3c1862ef58fc68c706da287dd925066865ceacc816b3a28c7b363b\",\n        \"binding_factor_input\": \"e2a62f39eede11269e3bd5a7d97554f5ca384f9f6d3dd9c3c0d05083c7254f572889dde2854e26377a16caf77dfee5f6be8fe5b4c80318da84698a4161021b033911db5ef8205362701bc9ecd983027814abee94f46d094943a2f4b79a6e4d4603e52c435d8344554942a0a472d8ad84320585b8da3ae5b9ce31cd1903f795c1af66de22af1a45f652cd05ee446b1b4091aaccc91e2471cd18a85a659cecd11f0300000000000000000000000000000000000000000000000000000000000000\",\n        \"binding_factor\": \"f2c1bb7c33a10511158c2f1766a4a5fadf9f86f2a92692ed333128277cc31006\"\n      }\n    ]\n  },\n  \"round_two_outputs\": {\n    \"outputs\": [\n      {\n        \"identifier\": 1,\n        \"sig_share\": \"9285f875923ce7e0c491a592e9ea1865ec1b823ead4854b48c8a46287749ee09\"\n      },\n      {\n        \"identifier\": 3,\n        \"sig_share\": \"7cb211fe0e3d59d25db6e36b3fb32344794139602a7b24f1ae0dc4e26ad7b908\"\n      }\n    ]\n  },\n  \"final_output\": {\n    \"sig\": \"fc45655fbc66bbffad654ea4ce5fdae253a49a64ace25d9adb62010dd9fb25552164141787162e5b4cab915b4aa45d94655dbb9ed7c378a53b980a0be220a802\"\n  }\n}"
  },
  {
    "path": "crypto/frost/src/tests/literal/vectors/frost-secp256k1-sha256.json",
    "content": "{\n  \"config\": {\n    \"MAX_PARTICIPANTS\": \"3\",\n    \"NUM_PARTICIPANTS\": \"2\",\n    \"MIN_PARTICIPANTS\": \"2\",\n    \"name\": \"FROST(secp256k1, SHA-256)\",\n    \"group\": \"secp256k1\",\n    \"hash\": \"SHA-256\"\n  },\n  \"inputs\": {\n    \"participant_list\": [\n      1,\n      3\n    ],\n    \"group_secret_key\": \"0d004150d27c3bf2a42f312683d35fac7394b1e9e318249c1bfe7f0795a83114\",\n    \"group_public_key\": \"02f37c34b66ced1fb51c34a90bdae006901f10625cc06c4f64663b0eae87d87b4f\",\n    \"message\": \"74657374\",\n    \"share_polynomial_coefficients\": [\n      \"fbf85eadae3058ea14f19148bb72b45e4399c0b16028acaf0395c9b03c823579\"\n    ],\n    \"participant_shares\": [\n      {\n        \"identifier\": 1,\n        \"participant_share\": \"08f89ffe80ac94dcb920c26f3f46140bfc7f95b493f8310f5fc1ea2b01f4254c\"\n      },\n      {\n        \"identifier\": 2,\n        \"participant_share\": \"04f0feac2edcedc6ce1253b7fab8c86b856a797f44d83d82a385554e6e401984\"\n      },\n      {\n        \"identifier\": 3,\n        \"participant_share\": \"00e95d59dd0d46b0e303e500b62b7ccb0e555d49f5b849f5e748c071da8c0dbc\"\n      }\n    ]\n  },\n  \"round_one_outputs\": {\n    \"outputs\": [\n      {\n        \"identifier\": 1,\n        \"hiding_nonce_randomness\": \"7ea5ed09af19f6ff21040c07ec2d2adbd35b759da5a401d4c99dd26b82391cb2\",\n        \"binding_nonce_randomness\": \"47acab018f116020c10cb9b9abdc7ac10aae1b48ca6e36dc15acb6ec9be5cdc5\",\n        \"hiding_nonce\": \"841d3a6450d7580b4da83c8e618414d0f024391f2aeb511d7579224420aa81f0\",\n        \"binding_nonce\": \"8d2624f532af631377f33cf44b5ac5f849067cae2eacb88680a31e77c79b5a80\",\n        \"hiding_nonce_commitment\": \"03c699af97d26bb4d3f05232ec5e1938c12f1e6ae97643c8f8f11c9820303f1904\",\n        \"binding_nonce_commitment\": \"02fa2aaccd51b948c9dc1a325d77226e98a5a3fe65fe9ba213761a60123040a45e\",\n        \"binding_factor_input\": \"02f37c34b66ced1fb51c34a90bdae006901f10625cc06c4f64663b0eae87d87b4fff9b5210ffbb3c07a73a7c8935be4a8c62cf015f6cf7ade6efac09a6513540fc3f5a816aaebc2114a811a415d7a55db7c5cbc1cf27183e79dd9def941b5d48010000000000000000000000000000000000000000000000000000000000000001\",\n        \"binding_factor\": \"3e08fe561e075c653cbfd46908a10e7637c70c74f0a77d5fd45d1a750c739ec6\"\n      },\n      {\n        \"identifier\": 3,\n        \"hiding_nonce_randomness\": \"e6cc56ccbd0502b3f6f831d91e2ebd01c4de0479e0191b66895a4ffd9b68d544\",\n        \"binding_nonce_randomness\": \"7203d55eb82a5ca0d7d83674541ab55f6e76f1b85391d2c13706a89a064fd5b9\",\n        \"hiding_nonce\": \"2b19b13f193f4ce83a399362a90cdc1e0ddcd83e57089a7af0bdca71d47869b2\",\n        \"binding_nonce\": \"7a443bde83dc63ef52dda354005225ba0e553243402a4705ce28ffaafe0f5b98\",\n        \"hiding_nonce_commitment\": \"03077507ba327fc074d2793955ef3410ee3f03b82b4cdc2370f71d865beb926ef6\",\n        \"binding_nonce_commitment\": \"02ad53031ddfbbacfc5fbda3d3b0c2445c8e3e99cbc4ca2db2aa283fa68525b135\",\n        \"binding_factor_input\": \"02f37c34b66ced1fb51c34a90bdae006901f10625cc06c4f64663b0eae87d87b4fff9b5210ffbb3c07a73a7c8935be4a8c62cf015f6cf7ade6efac09a6513540fc3f5a816aaebc2114a811a415d7a55db7c5cbc1cf27183e79dd9def941b5d48010000000000000000000000000000000000000000000000000000000000000003\",\n        \"binding_factor\": \"93f79041bb3fd266105be251adaeb5fd7f8b104fb554a4ba9a0becea48ddbfd7\"\n      }\n    ]\n  },\n  \"round_two_outputs\": {\n    \"outputs\": [\n      {\n        \"identifier\": 1,\n        \"sig_share\": \"c4fce1775a1e141fb579944166eab0d65eefe7b98d480a569bbbfcb14f91c197\"\n      },\n      {\n        \"identifier\": 3,\n        \"sig_share\": \"0160fd0d388932f4826d2ebcd6b9eaba734f7c71cf25b4279a4ca2581e47b18d\"\n      }\n    ]\n  },\n  \"final_output\": {\n    \"sig\": \"0205b6d04d3774c8929413e3c76024d54149c372d57aae62574ed74319b5ea14d0c65dde8492a7471437e6c2fe3da49b90d23f642b5c6dbe7e36089f096dd97324\"\n  }\n}"
  },
  {
    "path": "crypto/frost/src/tests/mod.rs",
    "content": "use std::collections::HashMap;\n\nuse rand_core::{RngCore, CryptoRng};\n\nuse ciphersuite::Ciphersuite;\npub use dkg_recovery::recover_key;\n\nuse crate::{\n  Curve, Participant, ThresholdKeys, FrostError,\n  algorithm::{Algorithm, Hram, IetfSchnorr},\n  sign::{Writable, PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine},\n};\n\n/// Tests for the nonce handling code.\npub mod nonces;\nuse nonces::test_multi_nonce;\n\n/// Vectorized test suite to ensure consistency.\npub mod vectors;\n\n// Literal test definitions to run during `cargo test`\n#[cfg(test)]\nmod literal;\n\n/// Constant amount of participants to use when testing.\npub const PARTICIPANTS: u16 = 5;\n/// Constant threshold of participants to use when signing.\npub const THRESHOLD: u16 = ((PARTICIPANTS * 2) / 3) + 1;\n\n/// Create a key, for testing purposes.\npub fn key_gen<R: RngCore + CryptoRng, C: Ciphersuite>(\n  rng: &mut R,\n) -> HashMap<Participant, ThresholdKeys<C>> {\n  let res = dkg_dealer::key_gen::<R, C>(rng, THRESHOLD, PARTICIPANTS).unwrap();\n  assert_eq!(\n    C::generator() * *recover_key(&res.values().cloned().collect::<Vec<_>>()).unwrap(),\n    res.values().next().unwrap().group_key()\n  );\n  res\n}\n\n/// Clone a map without a specific value.\npub fn clone_without<K: Clone + core::cmp::Eq + core::hash::Hash, V: Clone>(\n  map: &HashMap<K, V>,\n  without: &K,\n) -> HashMap<K, V> {\n  let mut res = map.clone();\n  res.remove(without).unwrap();\n  res\n}\n\n/// Spawn algorithm machines for a random selection of signers, each executing the given algorithm.\npub fn algorithm_machines_without_clone<R: RngCore, C: Curve, A: Algorithm<C>>(\n  rng: &mut R,\n  keys: &HashMap<Participant, ThresholdKeys<C>>,\n  machines: HashMap<Participant, AlgorithmMachine<C, A>>,\n) -> HashMap<Participant, AlgorithmMachine<C, A>> {\n  let mut included = vec![];\n  while included.len() < usize::from(keys[&Participant::new(1).unwrap()].params().t()) {\n    let n = Participant::new(\n      u16::try_from((rng.next_u64() % u64::try_from(keys.len()).unwrap()) + 1).unwrap(),\n    )\n    .unwrap();\n    if included.contains(&n) {\n      continue;\n    }\n    included.push(n);\n  }\n\n  machines\n    .into_iter()\n    .filter_map(|(i, machine)| if included.contains(&i) { Some((i, machine)) } else { None })\n    .collect()\n}\n\n/// Spawn algorithm machines for a random selection of signers, each executing the given algorithm.\npub fn algorithm_machines<R: RngCore, C: Curve, A: Clone + Algorithm<C>>(\n  rng: &mut R,\n  algorithm: &A,\n  keys: &HashMap<Participant, ThresholdKeys<C>>,\n) -> HashMap<Participant, AlgorithmMachine<C, A>> {\n  algorithm_machines_without_clone(\n    rng,\n    keys,\n    keys\n      .values()\n      .map(|keys| (keys.params().i(), AlgorithmMachine::new(algorithm.clone(), keys.clone())))\n      .collect(),\n  )\n}\n\n// Run the preprocess step\npub(crate) fn preprocess<\n  R: RngCore + CryptoRng,\n  M: PreprocessMachine,\n  F: FnMut(&mut R, &mut HashMap<Participant, M::SignMachine>),\n>(\n  rng: &mut R,\n  mut machines: HashMap<Participant, M>,\n  mut cache: F,\n) -> (HashMap<Participant, M::SignMachine>, HashMap<Participant, M::Preprocess>) {\n  let mut commitments = HashMap::new();\n  let mut machines = machines\n    .drain()\n    .map(|(i, machine)| {\n      let (machine, preprocess) = machine.preprocess(rng);\n      commitments.insert(i, {\n        let mut buf = vec![];\n        preprocess.write(&mut buf).unwrap();\n        machine.read_preprocess::<&[u8]>(&mut buf.as_ref()).unwrap()\n      });\n      (i, machine)\n    })\n    .collect::<HashMap<_, _>>();\n\n  cache(rng, &mut machines);\n\n  (machines, commitments)\n}\n\n// Run the preprocess and generate signature shares\n#[allow(clippy::type_complexity)]\npub(crate) fn preprocess_and_shares<\n  R: RngCore + CryptoRng,\n  M: PreprocessMachine,\n  F: FnMut(&mut R, &mut HashMap<Participant, M::SignMachine>),\n>(\n  rng: &mut R,\n  machines: HashMap<Participant, M>,\n  cache: F,\n  msg: &[u8],\n) -> (\n  HashMap<Participant, <M::SignMachine as SignMachine<M::Signature>>::SignatureMachine>,\n  HashMap<Participant, <M::SignMachine as SignMachine<M::Signature>>::SignatureShare>,\n) {\n  let (mut machines, commitments) = preprocess(rng, machines, cache);\n\n  let mut shares = HashMap::new();\n  let machines = machines\n    .drain()\n    .map(|(i, machine)| {\n      let (machine, share) = machine.sign(clone_without(&commitments, &i), msg).unwrap();\n      shares.insert(i, {\n        let mut buf = vec![];\n        share.write(&mut buf).unwrap();\n        machine.read_share::<&[u8]>(&mut buf.as_ref()).unwrap()\n      });\n      (i, machine)\n    })\n    .collect::<HashMap<_, _>>();\n\n  (machines, shares)\n}\n\nfn sign_internal<\n  R: RngCore + CryptoRng,\n  M: PreprocessMachine,\n  F: FnMut(&mut R, &mut HashMap<Participant, M::SignMachine>),\n>(\n  rng: &mut R,\n  machines: HashMap<Participant, M>,\n  cache: F,\n  msg: &[u8],\n) -> M::Signature {\n  let (mut machines, shares) = preprocess_and_shares(rng, machines, cache, msg);\n\n  let mut signature = None;\n  for (i, machine) in machines.drain() {\n    let sig = machine.complete(clone_without(&shares, &i)).unwrap();\n    if signature.is_none() {\n      signature = Some(sig.clone());\n    }\n    assert_eq!(&sig, signature.as_ref().unwrap());\n  }\n  signature.unwrap()\n}\n\n/// Execute the signing protocol, without caching any machines. This isn't as comprehensive at\n/// testing as sign, and accordingly isn't preferred, yet is usable for machines not supporting\n/// caching.\npub fn sign_without_caching<R: RngCore + CryptoRng, M: PreprocessMachine>(\n  rng: &mut R,\n  machines: HashMap<Participant, M>,\n  msg: &[u8],\n) -> M::Signature {\n  sign_internal(rng, machines, |_, _| {}, msg)\n}\n\n/// Execute the signing protocol, randomly caching various machines to ensure they can cache\n/// successfully.\npub fn sign_without_clone<R: RngCore + CryptoRng, M: PreprocessMachine>(\n  rng: &mut R,\n  mut keys: HashMap<Participant, <M::SignMachine as SignMachine<M::Signature>>::Keys>,\n  mut params: HashMap<Participant, <M::SignMachine as SignMachine<M::Signature>>::Params>,\n  machines: HashMap<Participant, M>,\n  msg: &[u8],\n) -> M::Signature {\n  sign_internal(\n    rng,\n    machines,\n    |rng, machines| {\n      // Cache and rebuild half of the machines\n      let included = machines.keys().copied().collect::<Vec<_>>();\n      for i in included {\n        if (rng.next_u64() % 2) == 0 {\n          let cache = machines.remove(&i).unwrap().cache();\n          machines.insert(\n            i,\n            M::SignMachine::from_cache(params.remove(&i).unwrap(), keys.remove(&i).unwrap(), cache)\n              .0,\n          );\n        }\n      }\n    },\n    msg,\n  )\n}\n\n/// Execute the signing protocol, randomly caching various machines to ensure they can cache\n/// successfully.\npub fn sign<\n  R: RngCore + CryptoRng,\n  M: PreprocessMachine<SignMachine: SignMachine<M::Signature, Params: Clone>>,\n>(\n  rng: &mut R,\n  params: &<M::SignMachine as SignMachine<M::Signature>>::Params,\n  keys: HashMap<Participant, <M::SignMachine as SignMachine<M::Signature>>::Keys>,\n  machines: HashMap<Participant, M>,\n  msg: &[u8],\n) -> M::Signature {\n  let params = keys.keys().map(|i| (*i, params.clone())).collect();\n  sign_without_clone(rng, keys, params, machines, msg)\n}\n\n/// Test a basic Schnorr signature with the provided keys.\npub fn test_schnorr_with_keys<R: RngCore + CryptoRng, C: Curve, H: Hram<C>>(\n  rng: &mut R,\n  keys: &HashMap<Participant, ThresholdKeys<C>>,\n) {\n  const MSG: &[u8] = b\"Hello, World!\";\n\n  let machines = algorithm_machines(&mut *rng, &IetfSchnorr::<C, H>::ietf(), keys);\n  let sig = sign(&mut *rng, &IetfSchnorr::<C, H>::ietf(), keys.clone(), machines, MSG);\n  let group_key = keys[&Participant::new(1).unwrap()].group_key();\n  assert!(sig.verify(group_key, H::hram(&sig.R, &group_key, MSG)));\n}\n\n/// Test a basic Schnorr signature.\npub fn test_schnorr<R: RngCore + CryptoRng, C: Curve, H: Hram<C>>(rng: &mut R) {\n  let keys = key_gen(&mut *rng);\n  test_schnorr_with_keys::<_, _, H>(&mut *rng, &keys)\n}\n\n/// Test an offset Schnorr signature.\npub fn test_offset_schnorr<R: RngCore + CryptoRng, C: Curve, H: Hram<C>>(rng: &mut R) {\n  const MSG: &[u8] = b\"Hello, World!\";\n\n  let mut keys = key_gen(&mut *rng);\n  let group_key = keys[&Participant::new(1).unwrap()].group_key();\n\n  let scalar = C::F::from(3);\n  let offset = C::F::from(5);\n  let offset_key = (group_key * scalar) + (C::generator() * offset);\n  for keys in keys.values_mut() {\n    *keys = keys.clone().scale(scalar).unwrap().offset(offset);\n    assert_eq!(keys.group_key(), offset_key);\n  }\n\n  let machines = algorithm_machines(&mut *rng, &IetfSchnorr::<C, H>::ietf(), &keys);\n  let sig = sign(&mut *rng, &IetfSchnorr::<C, H>::ietf(), keys.clone(), machines, MSG);\n  let group_key = keys[&Participant::new(1).unwrap()].group_key();\n  assert!(sig.verify(offset_key, H::hram(&sig.R, &group_key, MSG)));\n}\n\n/// Test blame for an invalid Schnorr signature share.\npub fn test_schnorr_blame<R: RngCore + CryptoRng, C: Curve, H: Hram<C>>(rng: &mut R) {\n  const MSG: &[u8] = b\"Hello, World!\";\n\n  let keys = key_gen(&mut *rng);\n  let machines = algorithm_machines(&mut *rng, &IetfSchnorr::<C, H>::ietf(), &keys);\n\n  let (mut machines, shares) = preprocess_and_shares(&mut *rng, machines, |_, _| {}, MSG);\n\n  for (i, machine) in machines.drain() {\n    let mut shares = clone_without(&shares, &i);\n\n    // Select a random participant to give an invalid share\n    let participants = shares.keys().collect::<Vec<_>>();\n    let faulty = *participants\n      [usize::try_from(rng.next_u64() % u64::try_from(participants.len()).unwrap()).unwrap()];\n    shares.get_mut(&faulty).unwrap().invalidate();\n\n    assert_eq!(machine.complete(shares).err(), Some(FrostError::InvalidShare(faulty)));\n  }\n}\n\n/// Run a variety of tests against a ciphersuite.\npub fn test_ciphersuite<R: RngCore + CryptoRng, C: Curve, H: Hram<C>>(rng: &mut R) {\n  test_schnorr::<R, C, H>(rng);\n  test_offset_schnorr::<R, C, H>(rng);\n  test_schnorr_blame::<R, C, H>(rng);\n\n  test_multi_nonce::<R, C>(rng);\n}\n"
  },
  {
    "path": "crypto/frost/src/tests/nonces.rs",
    "content": "use std::io::{self, Read};\n\nuse zeroize::Zeroizing;\n\nuse rand_core::{RngCore, CryptoRng, SeedableRng};\nuse rand_chacha::ChaCha20Rng;\n\nuse transcript::{Transcript, RecommendedTranscript};\n\nuse ciphersuite::group::{ff::Field, Group, GroupEncoding};\n\nuse crate::{\n  Curve, Participant, ThresholdView, ThresholdKeys, FrostError,\n  algorithm::Algorithm,\n  tests::{key_gen, algorithm_machines, sign},\n};\n\n#[derive(Clone)]\nstruct MultiNonce<C: Curve> {\n  transcript: RecommendedTranscript,\n  nonces: Option<Vec<Vec<C::G>>>,\n}\n\nimpl<C: Curve> MultiNonce<C> {\n  fn new() -> MultiNonce<C> {\n    MultiNonce {\n      transcript: RecommendedTranscript::new(b\"FROST MultiNonce Algorithm Test\"),\n      nonces: None,\n    }\n  }\n}\n\nfn nonces<C: Curve>() -> Vec<Vec<C::G>> {\n  vec![\n    vec![C::generator(), C::generator().double()],\n    vec![C::generator(), C::generator() * C::F::from(3), C::generator() * C::F::from(4)],\n  ]\n}\n\nfn verify_nonces<C: Curve>(nonces: &[Vec<C::G>]) {\n  assert_eq!(nonces.len(), 2);\n\n  // Each nonce should be a series of commitments, over some generators, which share a discrete log\n  // Since they share a discrete log, their only distinction should be the generator\n  // Above, the generators were created with a known relationship\n  // Accordingly, we can check here that relationship holds to make sure these commitments are well\n  // formed\n  assert_eq!(nonces[0].len(), 2);\n  assert_eq!(nonces[0][0].double(), nonces[0][1]);\n\n  assert_eq!(nonces[1].len(), 3);\n  assert_eq!(nonces[1][0] * C::F::from(3), nonces[1][1]);\n  assert_eq!(nonces[1][0] * C::F::from(4), nonces[1][2]);\n\n  assert!(nonces[0][0] != nonces[1][0]);\n}\n\nimpl<C: Curve> Algorithm<C> for MultiNonce<C> {\n  type Transcript = RecommendedTranscript;\n  type Addendum = ();\n  type Signature = ();\n\n  fn transcript(&mut self) -> &mut Self::Transcript {\n    &mut self.transcript\n  }\n\n  fn nonces(&self) -> Vec<Vec<C::G>> {\n    nonces::<C>()\n  }\n\n  fn preprocess_addendum<R: RngCore + CryptoRng>(&mut self, _: &mut R, _: &ThresholdKeys<C>) {}\n\n  fn read_addendum<R: Read>(&self, _: &mut R) -> io::Result<Self::Addendum> {\n    Ok(())\n  }\n\n  fn process_addendum(\n    &mut self,\n    _: &ThresholdView<C>,\n    _: Participant,\n    (): (),\n  ) -> Result<(), FrostError> {\n    Ok(())\n  }\n\n  fn sign_share(\n    &mut self,\n    _: &ThresholdView<C>,\n    nonce_sums: &[Vec<C::G>],\n    nonces: Vec<Zeroizing<C::F>>,\n    _: &[u8],\n  ) -> C::F {\n    // Verify the nonce sums are as expected\n    verify_nonces::<C>(nonce_sums);\n\n    // Verify we actually have two nonces and that they're distinct\n    assert_eq!(nonces.len(), 2);\n    assert!(nonces[0] != nonces[1]);\n\n    // Save the nonce sums for later so we can check they're consistent with the call to verify\n    assert!(self.nonces.is_none());\n    self.nonces = Some(nonce_sums.to_vec());\n\n    // Sum the nonces so we can later check they actually have a relationship to nonce_sums\n    let mut res = C::F::ZERO;\n\n    // Weight each nonce\n    // This is probably overkill, since their unweighted forms would practically still require\n    // some level of crafting to pass a naive sum via malleability, yet this makes it more robust\n    for nonce in nonce_sums {\n      self.transcript.domain_separate(b\"nonce\");\n      for commitment in nonce {\n        self.transcript.append_message(b\"commitment\", commitment.to_bytes());\n      }\n    }\n    let mut rng = ChaCha20Rng::from_seed(self.transcript.clone().rng_seed(b\"weight\"));\n\n    for nonce in nonces {\n      res += *nonce * C::F::random(&mut rng);\n    }\n    res\n  }\n\n  #[must_use]\n  fn verify(&self, _: C::G, nonces: &[Vec<C::G>], sum: C::F) -> Option<Self::Signature> {\n    verify_nonces::<C>(nonces);\n    assert_eq!(&self.nonces.clone().unwrap(), nonces);\n\n    // Make sure the nonce sums actually relate to the nonces\n    let mut res = C::G::identity();\n    let mut rng = ChaCha20Rng::from_seed(self.transcript.clone().rng_seed(b\"weight\"));\n    for nonce in nonces {\n      res += nonce[0] * C::F::random(&mut rng);\n    }\n    assert_eq!(res, C::generator() * sum);\n\n    Some(())\n  }\n\n  fn verify_share(&self, _: C::G, _: &[Vec<C::G>], _: C::F) -> Result<Vec<(C::F, C::G)>, ()> {\n    panic!(\"share verification triggered\");\n  }\n}\n\n/// Test a multi-nonce, multi-generator algorithm.\n// Specifically verifies this library can:\n// 1) Generate multiple nonces\n// 2) Provide the group nonces (nonce_sums) across multiple generators, still with the same\n//    discrete log\n// 3) Provide algorithms with nonces which match the group nonces\npub fn test_multi_nonce<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {\n  let keys = key_gen::<R, C>(&mut *rng);\n  let machines = algorithm_machines(&mut *rng, &MultiNonce::<C>::new(), &keys);\n  sign(&mut *rng, &MultiNonce::<C>::new(), keys.clone(), machines, &[]);\n}\n"
  },
  {
    "path": "crypto/frost/src/tests/vectors.rs",
    "content": "use core::ops::Deref;\n\nuse std::collections::HashMap;\n#[cfg(test)]\nuse std::str::FromStr;\n\nuse zeroize::Zeroizing;\n\nuse rand_core::{RngCore, CryptoRng, SeedableRng};\nuse rand_chacha::ChaCha20Rng;\n\nuse ciphersuite::group::{ff::PrimeField, GroupEncoding};\n\nuse crate::{\n  curve::Curve,\n  Participant, ThresholdKeys,\n  algorithm::{Hram, IetfSchnorr},\n  sign::{\n    Writable, Nonce, GeneratorCommitments, NonceCommitments, Commitments, Preprocess,\n    PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine,\n  },\n  tests::{clone_without, recover_key, test_ciphersuite},\n};\n\n/// Vectors for a ciphersuite.\npub struct Vectors {\n  pub threshold: u16,\n\n  pub group_secret: String,\n  pub group_key: String,\n  pub shares: Vec<String>,\n\n  pub msg: String,\n  pub included: Vec<Participant>,\n\n  pub nonce_randomness: Vec<[String; 2]>,\n  pub nonces: Vec<[String; 2]>,\n  pub commitments: Vec<[String; 2]>,\n\n  pub sig_shares: Vec<String>,\n\n  pub sig: String,\n}\n\n// Vectors are expected to be formatted per the IETF proof of concept\n// The included vectors are directly from\n// https://github.com/cfrg/draft-irtf-cfrg-frost/tree/draft-irtf-cfrg-frost-14/poc\n#[cfg(test)]\nimpl From<serde_json::Value> for Vectors {\n  fn from(value: serde_json::Value) -> Vectors {\n    let to_str = |value: &serde_json::Value| value.as_str().unwrap().to_string();\n    Vectors {\n      threshold: u16::from_str(value[\"config\"][\"NUM_PARTICIPANTS\"].as_str().unwrap()).unwrap(),\n\n      group_secret: to_str(&value[\"inputs\"][\"group_secret_key\"]),\n      group_key: to_str(&value[\"inputs\"][\"group_public_key\"]),\n      shares: value[\"inputs\"][\"participant_shares\"]\n        .as_array()\n        .unwrap()\n        .iter()\n        .map(|share| to_str(&share[\"participant_share\"]))\n        .collect(),\n\n      msg: to_str(&value[\"inputs\"][\"message\"]),\n      included: value[\"inputs\"][\"participant_list\"]\n        .as_array()\n        .unwrap()\n        .iter()\n        .map(|i| Participant::new(u16::try_from(i.as_u64().unwrap()).unwrap()).unwrap())\n        .collect(),\n\n      nonce_randomness: value[\"round_one_outputs\"][\"outputs\"]\n        .as_array()\n        .unwrap()\n        .iter()\n        .map(|value| {\n          [to_str(&value[\"hiding_nonce_randomness\"]), to_str(&value[\"binding_nonce_randomness\"])]\n        })\n        .collect(),\n      nonces: value[\"round_one_outputs\"][\"outputs\"]\n        .as_array()\n        .unwrap()\n        .iter()\n        .map(|value| [to_str(&value[\"hiding_nonce\"]), to_str(&value[\"binding_nonce\"])])\n        .collect(),\n      commitments: value[\"round_one_outputs\"][\"outputs\"]\n        .as_array()\n        .unwrap()\n        .iter()\n        .map(|value| {\n          [to_str(&value[\"hiding_nonce_commitment\"]), to_str(&value[\"binding_nonce_commitment\"])]\n        })\n        .collect(),\n\n      sig_shares: value[\"round_two_outputs\"][\"outputs\"]\n        .as_array()\n        .unwrap()\n        .iter()\n        .map(|value| to_str(&value[\"sig_share\"]))\n        .collect(),\n\n      sig: to_str(&value[\"final_output\"][\"sig\"]),\n    }\n  }\n}\n\n// Load these vectors into ThresholdKeys using a custom serialization it'll deserialize\nfn vectors_to_multisig_keys<C: Curve>(vectors: &Vectors) -> HashMap<Participant, ThresholdKeys<C>> {\n  let shares = vectors\n    .shares\n    .iter()\n    .map(|secret| C::read_F::<&[u8]>(&mut hex::decode(secret).unwrap().as_ref()).unwrap())\n    .collect::<Vec<_>>();\n  let verification_shares = shares.iter().map(|secret| C::generator() * secret).collect::<Vec<_>>();\n\n  let mut keys = HashMap::new();\n  for i in 1 ..= u16::try_from(shares.len()).unwrap() {\n    // Manually re-implement the serialization for ThresholdKeys to import this data\n    let mut serialized = vec![];\n    serialized.extend(u32::try_from(C::ID.len()).unwrap().to_le_bytes());\n    serialized.extend(C::ID);\n    serialized.extend(vectors.threshold.to_le_bytes());\n    serialized.extend(u16::try_from(shares.len()).unwrap().to_le_bytes());\n    serialized.extend(i.to_le_bytes());\n    serialized.push(1);\n    serialized.extend(shares[usize::from(i) - 1].to_repr().as_ref());\n    for share in &verification_shares {\n      serialized.extend(share.to_bytes().as_ref());\n    }\n\n    let these_keys = ThresholdKeys::<C>::read::<&[u8]>(&mut serialized.as_ref()).unwrap();\n    assert_eq!(these_keys.params().t(), vectors.threshold);\n    assert_eq!(usize::from(these_keys.params().n()), shares.len());\n    let participant = Participant::new(i).unwrap();\n    assert_eq!(these_keys.params().i(), participant);\n    assert_eq!(these_keys.original_secret_share().deref(), &shares[usize::from(i - 1)]);\n    assert_eq!(hex::encode(these_keys.group_key().to_bytes().as_ref()), vectors.group_key);\n    keys.insert(participant, these_keys);\n  }\n\n  keys\n}\n\n/// Test a Ciphersuite with its vectors.\npub fn test_with_vectors<R: RngCore + CryptoRng, C: Curve, H: Hram<C>>(\n  rng: &mut R,\n  vectors: &Vectors,\n) {\n  test_ciphersuite::<R, C, H>(rng);\n\n  // Test against the vectors\n  let keys = vectors_to_multisig_keys::<C>(vectors);\n  {\n    let group_key =\n      <C as Curve>::read_G::<&[u8]>(&mut hex::decode(&vectors.group_key).unwrap().as_ref())\n        .unwrap();\n    let secret =\n      C::read_F::<&[u8]>(&mut hex::decode(&vectors.group_secret).unwrap().as_ref()).unwrap();\n    assert_eq!(C::generator() * secret, group_key);\n    assert_eq!(*recover_key(&keys.values().cloned().collect::<Vec<_>>()).unwrap(), secret);\n\n    let mut machines = vec![];\n    for i in &vectors.included {\n      machines.push((i, AlgorithmMachine::new(IetfSchnorr::<C, H>::ietf(), keys[i].clone())));\n    }\n\n    let mut commitments = HashMap::new();\n    let machines = machines\n      .into_iter()\n      .enumerate()\n      .map(|(c, (i, machine))| {\n        let nonce = |i| {\n          Zeroizing::new(\n            C::read_F::<&[u8]>(&mut hex::decode(&vectors.nonces[c][i]).unwrap().as_ref()).unwrap(),\n          )\n        };\n        let nonces = [nonce(0), nonce(1)];\n        let these_commitments =\n          [C::generator() * nonces[0].deref(), C::generator() * nonces[1].deref()];\n\n        assert_eq!(\n          these_commitments[0].to_bytes().as_ref(),\n          hex::decode(&vectors.commitments[c][0]).unwrap()\n        );\n        assert_eq!(\n          these_commitments[1].to_bytes().as_ref(),\n          hex::decode(&vectors.commitments[c][1]).unwrap()\n        );\n\n        let preprocess = Preprocess {\n          commitments: Commitments {\n            nonces: vec![NonceCommitments {\n              generators: vec![GeneratorCommitments(these_commitments)],\n            }],\n          },\n          addendum: (),\n        };\n        // FROST doesn't specify how to serialize these together, yet this is sane\n        // (and the simplest option)\n        assert_eq!(\n          preprocess.serialize(),\n          hex::decode(vectors.commitments[c][0].clone() + &vectors.commitments[c][1]).unwrap()\n        );\n\n        let machine = machine.unsafe_override_preprocess(vec![Nonce(nonces)], preprocess);\n\n        commitments.insert(\n          *i,\n          machine\n            .read_preprocess::<&[u8]>(\n              &mut [\n                these_commitments[0].to_bytes().as_ref(),\n                these_commitments[1].to_bytes().as_ref(),\n              ]\n              .concat()\n              .as_ref(),\n            )\n            .unwrap(),\n        );\n        (i, machine)\n      })\n      .collect::<Vec<_>>();\n\n    let mut shares = HashMap::new();\n    let machines = machines\n      .into_iter()\n      .enumerate()\n      .map(|(c, (i, machine))| {\n        let (machine, share) = machine\n          .sign(clone_without(&commitments, i), &hex::decode(&vectors.msg).unwrap())\n          .unwrap();\n\n        let share = {\n          let mut buf = vec![];\n          share.write(&mut buf).unwrap();\n          buf\n        };\n        assert_eq!(share, hex::decode(&vectors.sig_shares[c]).unwrap());\n\n        shares.insert(*i, machine.read_share::<&[u8]>(&mut share.as_ref()).unwrap());\n        (i, machine)\n      })\n      .collect::<Vec<_>>();\n\n    for (i, machine) in machines {\n      let sig = machine.complete(clone_without(&shares, i)).unwrap();\n      let mut serialized = sig.R.to_bytes().as_ref().to_vec();\n      serialized.extend(sig.s.to_repr().as_ref());\n      assert_eq!(hex::encode(serialized), vectors.sig);\n    }\n  }\n\n  // The above code didn't test the nonce generation due to the infeasibility of doing so against\n  // the current codebase\n\n  // A transparent RNG which has a fixed output\n  struct TransparentRng(Vec<[u8; 32]>);\n  impl RngCore for TransparentRng {\n    fn next_u32(&mut self) -> u32 {\n      unimplemented!()\n    }\n    fn next_u64(&mut self) -> u64 {\n      unimplemented!()\n    }\n    fn fill_bytes(&mut self, dest: &mut [u8]) {\n      dest.copy_from_slice(&self.0.remove(0))\n    }\n    fn try_fill_bytes(&mut self, _: &mut [u8]) -> Result<(), rand_core::Error> {\n      unimplemented!()\n    }\n  }\n  // CryptoRng requires the output not reveal any info about any other outputs\n  // Since this only will produce one output, this is actually met, even though it'd be fine to\n  // fake it as this is a test\n  impl CryptoRng for TransparentRng {}\n\n  // Test C::random_nonce matches the expected vectors\n  for (i, l) in vectors.included.iter().enumerate() {\n    let l = usize::from(u16::from(*l));\n\n    // Shares are a zero-indexed array of all participants, hence l - 1\n    let share = Zeroizing::new(\n      C::read_F::<&[u8]>(&mut hex::decode(&vectors.shares[l - 1]).unwrap().as_ref()).unwrap(),\n    );\n\n    let randomness = vectors.nonce_randomness[i]\n      .iter()\n      .map(|randomness| hex::decode(randomness).unwrap().try_into().unwrap())\n      .collect::<Vec<_>>();\n\n    let nonces = vectors.nonces[i]\n      .iter()\n      .map(|nonce| {\n        Zeroizing::new(C::read_F::<&[u8]>(&mut hex::decode(nonce).unwrap().as_ref()).unwrap())\n      })\n      .collect::<Vec<_>>();\n\n    for (randomness, nonce) in randomness.iter().zip(&nonces) {\n      // Nonces are only present for participating signers, hence i\n      assert_eq!(C::random_nonce(&share, &mut TransparentRng(vec![*randomness])), *nonce);\n    }\n\n    // Also test it at the Commitments level\n    let (generated_nonces, commitments) =\n      Commitments::<C>::new::<_>(&mut TransparentRng(randomness), &share, &[vec![C::generator()]]);\n\n    assert_eq!(generated_nonces.len(), 1);\n    assert_eq!(generated_nonces[0].0, [nonces[0].clone(), nonces[1].clone()]);\n\n    let mut commitments_bytes = vec![];\n    commitments.write(&mut commitments_bytes).unwrap();\n    assert_eq!(\n      commitments_bytes,\n      hex::decode(vectors.commitments[i][0].clone() + &vectors.commitments[i][1]).unwrap()\n    );\n  }\n\n  // This doesn't verify C::random_nonce is called correctly, where the code should call it with\n  // the output from a ChaCha20 stream\n  // Create a known ChaCha20 stream to verify it ends up at random_nonce properly\n\n  {\n    let mut chacha_seed = [0; 32];\n    rng.fill_bytes(&mut chacha_seed);\n    let mut ours = ChaCha20Rng::from_seed(chacha_seed);\n    let frosts = ours.clone();\n\n    // The machines should geenerate a seed, and then use that seed in a ChaCha20 RNG for nonces\n    let mut preprocess_seed = [0; 32];\n    ours.fill_bytes(&mut preprocess_seed);\n    let mut ours = ChaCha20Rng::from_seed(preprocess_seed);\n\n    // Get the randomness which will be used\n    let mut randomness = ([0; 32], [0; 32]);\n    ours.fill_bytes(&mut randomness.0);\n    ours.fill_bytes(&mut randomness.1);\n\n    // Create the machines\n    let mut machines = vec![];\n    for i in &vectors.included {\n      machines.push((i, AlgorithmMachine::new(IetfSchnorr::<C, H>::ietf(), keys[i].clone())));\n    }\n\n    for (i, machine) in machines {\n      let (_, preprocess) = machine.preprocess(&mut frosts.clone());\n\n      // Calculate the expected nonces\n      let mut expected = (C::generator() *\n        C::random_nonce(\n          keys[i].original_secret_share(),\n          &mut TransparentRng(vec![randomness.0]),\n        )\n        .deref())\n      .to_bytes()\n      .as_ref()\n      .to_vec();\n      expected.extend(\n        (C::generator() *\n          C::random_nonce(\n            keys[i].original_secret_share(),\n            &mut TransparentRng(vec![randomness.1]),\n          )\n          .deref())\n        .to_bytes()\n        .as_ref(),\n      );\n\n      // Ensure they match\n      assert_eq!(preprocess.serialize(), expected);\n    }\n  }\n}\n"
  },
  {
    "path": "crypto/multiexp/Cargo.toml",
    "content": "[package]\nname = \"multiexp\"\nversion = \"0.4.2\"\ndescription = \"Multiexponentiation algorithms for ff/group\"\nlicense = \"MIT\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/crypto/multiexp\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nkeywords = [\"multiexp\", \"ff\", \"group\"]\nedition = \"2021\"\nrust-version = \"1.79\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nrustversion = \"1\"\n\nstd-shims = { path = \"../../common/std-shims\", version = \"^0.1.1\", default-features = false }\n\nzeroize = { version = \"^1.5\", default-features = false, features = [\"zeroize_derive\"] }\n\nff = { version = \"0.13\", default-features = false, features = [\"bits\"] }\ngroup = { version = \"0.13\", default-features = false }\n\nrand_core = { version = \"0.6\", default-features = false, optional = true }\n\n[dev-dependencies]\nrand_core = { version = \"0.6\", features = [\"std\"] }\n\nk256 = { version = \"^0.13.1\", default-features = false, features = [\"arithmetic\", \"bits\"] }\ndalek-ff-group = { path = \"../dalek-ff-group\" }\n\n[features]\nstd = [\"std-shims/std\", \"zeroize/std\", \"ff/std\", \"rand_core?/std\"]\n\nbatch = [\"rand_core\"]\n\ndefault = [\"std\"]\n"
  },
  {
    "path": "crypto/multiexp/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2022-2023 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "crypto/multiexp/README.md",
    "content": "# Multiexp\n\nA multiexp implementation for ff/group implementing Straus and Pippenger. A\nbatch verification API is also available via the \"batch\" feature, which enables\nsecure multiexponentiation batch verification given a series of values which\nshould sum to the identity, identifying which doesn't via binary search if they\ndon't.\n\nThis library was\n[audited by Cypher Stack in March 2023](https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf),\nculminating in commit\n[669d2dbffc1dafb82a09d9419ea182667115df06](https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06).\nAny subsequent changes have not undergone auditing.\n\nThis library is usable under no_std, via alloc, when the default features are\ndisabled.\n"
  },
  {
    "path": "crypto/multiexp/src/batch.rs",
    "content": "use std_shims::vec::Vec;\n\nuse rand_core::{RngCore, CryptoRng};\n\nuse zeroize::{Zeroize, Zeroizing};\n\nuse ff::{Field, PrimeFieldBits};\nuse group::Group;\n\nuse crate::{multiexp, multiexp_vartime};\n\n// Flatten the contained statements to a single Vec.\n// Wrapped in Zeroizing in case any of the included statements contain private values.\n#[allow(clippy::type_complexity)]\nfn flat<Id: Copy + Zeroize, G: Zeroize + Group<Scalar: Zeroize + PrimeFieldBits>>(\n  slice: &[(Id, Vec<(G::Scalar, G)>)],\n) -> Zeroizing<Vec<(G::Scalar, G)>> {\n  Zeroizing::new(slice.iter().flat_map(|pairs| pairs.1.iter()).copied().collect::<Vec<_>>())\n}\n\n/// A batch verifier intended to verify a series of statements are each equivalent to zero.\n#[allow(clippy::type_complexity)]\n#[derive(Clone, Zeroize)]\npub struct BatchVerifier<Id: Copy + Zeroize, G: Zeroize + Group<Scalar: Zeroize + PrimeFieldBits>>(\n  Zeroizing<Vec<(Id, Vec<(G::Scalar, G)>)>>,\n);\n\nimpl<Id: Copy + Zeroize, G: Zeroize + Group<Scalar: Zeroize + PrimeFieldBits>>\n  BatchVerifier<Id, G>\n{\n  /// Create a new batch verifier, expected to verify the following amount of statements.\n  ///\n  /// `capacity` is a size hint and is not required to be accurate.\n  pub fn new(capacity: usize) -> BatchVerifier<Id, G> {\n    BatchVerifier(Zeroizing::new(Vec::with_capacity(capacity)))\n  }\n\n  /// Queue a statement for batch verification.\n  pub fn queue<R: RngCore + CryptoRng, I: IntoIterator<Item = (G::Scalar, G)>>(\n    &mut self,\n    rng: &mut R,\n    id: Id,\n    pairs: I,\n  ) {\n    // Define a unique scalar factor for this set of variables so individual items can't overlap\n    let u = if self.0.is_empty() {\n      G::Scalar::ONE\n    } else {\n      let mut weight;\n      while {\n        // Generate a random scalar\n        weight = G::Scalar::random(&mut *rng);\n\n        // Clears half the bits, maintaining security, to minimize scalar additions\n        // Is not practically faster for whatever reason\n        /*\n        // Generate a random scalar\n        let mut repr = G::Scalar::random(&mut *rng).to_repr();\n\n        // Calculate the amount of bytes to clear. We want to clear less than half\n        let repr_len = repr.as_ref().len();\n        let unused_bits = (repr_len * 8) - usize::try_from(G::Scalar::CAPACITY).unwrap();\n        // Don't clear any partial bytes\n        let to_clear = (repr_len / 2) - ((unused_bits + 7) / 8);\n\n        // Clear a safe amount of bytes\n        for b in &mut repr.as_mut()[.. to_clear] {\n          *b = 0;\n        }\n\n        // Ensure these bits are used as the low bits so low scalars multiplied by this don't\n        // become large scalars\n        weight = G::Scalar::from_repr(repr).unwrap();\n        // Tests if any bit we supposedly just cleared is set, and if so, reverses it\n        // Not a security issue if this fails, just a minor performance hit at ~2^-120 odds\n        if weight.to_le_bits().iter().take(to_clear * 8).any(|bit| *bit) {\n          repr.as_mut().reverse();\n          weight = G::Scalar::from_repr(repr).unwrap();\n        }\n        */\n\n        // Ensure it's non-zero, as a zero scalar would cause this item to pass no matter what\n        weight.is_zero().into()\n      } {}\n      weight\n    };\n\n    self.0.push((id, pairs.into_iter().map(|(scalar, point)| (scalar * u, point)).collect()));\n  }\n\n  /// Perform batch verification, returning a boolean of if the statements equaled zero.\n  #[must_use]\n  pub fn verify(&self) -> bool {\n    multiexp(&flat(&self.0)).is_identity().into()\n  }\n\n  /// Perform batch verification in variable time.\n  #[must_use]\n  pub fn verify_vartime(&self) -> bool {\n    multiexp_vartime(&flat(&self.0)).is_identity().into()\n  }\n\n  /// Perform a binary search to identify which statement does not equal 0, returning None if all\n  /// statements do.\n  ///\n  /// This function will only return the ID of one invalid statement, even if multiple are invalid.\n  // A constant time variant may be beneficial for robust protocols\n  pub fn blame_vartime(&self) -> Option<Id> {\n    let mut slice = self.0.as_slice();\n    while slice.len() > 1 {\n      let split = slice.len() / 2;\n      if multiexp_vartime(&flat(&slice[.. split])).is_identity().into() {\n        slice = &slice[split ..];\n      } else {\n        slice = &slice[.. split];\n      }\n    }\n\n    slice\n      .first()\n      .filter(|(_, value)| !bool::from(multiexp_vartime(value).is_identity()))\n      .map(|(id, _)| *id)\n  }\n\n  /// Perform constant time batch verification, and if verification fails, identify one faulty\n  /// statement in variable time.\n  pub fn verify_with_vartime_blame(&self) -> Result<(), Id> {\n    if self.verify() {\n      Ok(())\n    } else {\n      Err(self.blame_vartime().unwrap())\n    }\n  }\n\n  /// Perform variable time batch verification, and if verification fails, identify one faulty\n  /// statement in variable time.\n  pub fn verify_vartime_with_vartime_blame(&self) -> Result<(), Id> {\n    if self.verify_vartime() {\n      Ok(())\n    } else {\n      Err(self.blame_vartime().unwrap())\n    }\n  }\n}\n"
  },
  {
    "path": "crypto/multiexp/src/lib.rs",
    "content": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![doc = include_str!(\"../README.md\")]\n#![cfg_attr(not(feature = \"std\"), no_std)]\n\n#[cfg(not(feature = \"std\"))]\n#[macro_use]\nextern crate alloc;\n#[allow(unused_imports)]\nuse std_shims::prelude::*;\nuse std_shims::vec::Vec;\n\nuse zeroize::Zeroize;\n\nuse ff::PrimeFieldBits;\nuse group::Group;\n\nmod straus;\nuse straus::*;\n\nmod pippenger;\nuse pippenger::*;\n\n#[cfg(feature = \"batch\")]\nmod batch;\n#[cfg(feature = \"batch\")]\npub use batch::BatchVerifier;\n\n#[cfg(test)]\nmod tests;\n\n// Use black_box when possible\n#[rustversion::since(1.66)]\nuse core::hint::black_box;\n#[rustversion::before(1.66)]\nfn black_box<T>(val: T) -> T {\n  val\n}\n\nfn u8_from_bool(bit_ref: &mut bool) -> u8 {\n  let bit_ref = black_box(bit_ref);\n\n  let mut bit = black_box(*bit_ref);\n  #[allow(clippy::cast_lossless)]\n  let res = black_box(bit as u8);\n  bit.zeroize();\n  debug_assert!((res | 1) == 1);\n\n  bit_ref.zeroize();\n  res\n}\n\n// Convert scalars to `window`-sized bit groups, as needed to index a table\n// This algorithm works for `window <= 8`\npub(crate) fn prep_bits<G: Group<Scalar: PrimeFieldBits>>(\n  pairs: &[(G::Scalar, G)],\n  window: u8,\n) -> Vec<Vec<u8>> {\n  let w_usize = usize::from(window);\n\n  let mut groupings = vec![];\n  for pair in pairs {\n    let p = groupings.len();\n    let mut bits = pair.0.to_le_bits();\n    groupings.push(vec![0; bits.len().div_ceil(w_usize)]);\n\n    for (i, mut bit) in bits.iter_mut().enumerate() {\n      let mut bit = u8_from_bool(&mut bit);\n      groupings[p][i / w_usize] |= bit << (i % w_usize);\n      bit.zeroize();\n    }\n  }\n\n  groupings\n}\n\n#[derive(Clone, Copy, PartialEq, Eq, Debug)]\nenum Algorithm {\n  Null,\n  Single,\n  Straus(u8),\n  Pippenger(u8),\n}\n\n/*\nRelease (with runs 20, so all of these are off by 20x):\n\nk256\nStraus 3 is more efficient at 5 with 678µs per\nStraus 4 is more efficient at 10 with 530µs per\nStraus 5 is more efficient at 35 with 467µs per\n\nPippenger 5 is more efficient at 125 with 431µs per\nPippenger 6 is more efficient at 275 with 349µs per\nPippenger 7 is more efficient at 375 with 360µs per\n\ndalek\nStraus 3 is more efficient at 5 with 519µs per\nStraus 4 is more efficient at 10 with 376µs per\nStraus 5 is more efficient at 170 with 330µs per\n\nPippenger 5 is more efficient at 125 with 305µs per\nPippenger 6 is more efficient at 275 with 250µs per\nPippenger 7 is more efficient at 450 with 205µs per\nPippenger 8 is more efficient at 800 with 213µs per\n\nDebug (with runs 5, so...):\n\nk256\nStraus 3 is more efficient at 5 with 2532µs per\nStraus 4 is more efficient at 10 with 1930µs per\nStraus 5 is more efficient at 80 with 1632µs per\n\nPippenger 5 is more efficient at 150 with 1441µs per\nPippenger 6 is more efficient at 300 with 1235µs per\nPippenger 7 is more efficient at 475 with 1182µs per\nPippenger 8 is more efficient at 625 with 1170µs per\n\ndalek:\nStraus 3 is more efficient at 5 with 971µs per\nStraus 4 is more efficient at 10 with 782µs per\nStraus 5 is more efficient at 75 with 778µs per\nStraus 6 is more efficient at 165 with 867µs per\n\nPippenger 5 is more efficient at 125 with 677µs per\nPippenger 6 is more efficient at 250 with 655µs per\nPippenger 7 is more efficient at 475 with 500µs per\nPippenger 8 is more efficient at 875 with 499µs per\n*/\nfn algorithm(len: usize) -> Algorithm {\n  #[cfg(not(debug_assertions))]\n  if len == 0 {\n    Algorithm::Null\n  } else if len == 1 {\n    Algorithm::Single\n  } else if len < 10 {\n    // Straus 2 never showed a performance benefit, even with just 2 elements\n    Algorithm::Straus(3)\n  } else if len < 20 {\n    Algorithm::Straus(4)\n  } else if len < 50 {\n    Algorithm::Straus(5)\n  } else if len < 100 {\n    Algorithm::Pippenger(4)\n  } else if len < 125 {\n    Algorithm::Pippenger(5)\n  } else if len < 275 {\n    Algorithm::Pippenger(6)\n  } else if len < 400 {\n    Algorithm::Pippenger(7)\n  } else {\n    Algorithm::Pippenger(8)\n  }\n\n  #[cfg(debug_assertions)]\n  if len == 0 {\n    Algorithm::Null\n  } else if len == 1 {\n    Algorithm::Single\n  } else if len < 10 {\n    Algorithm::Straus(3)\n  } else if len < 80 {\n    Algorithm::Straus(4)\n  } else if len < 100 {\n    Algorithm::Straus(5)\n  } else if len < 125 {\n    Algorithm::Pippenger(4)\n  } else if len < 275 {\n    Algorithm::Pippenger(5)\n  } else if len < 475 {\n    Algorithm::Pippenger(6)\n  } else if len < 750 {\n    Algorithm::Pippenger(7)\n  } else {\n    Algorithm::Pippenger(8)\n  }\n}\n\n/// Performs a multiexponentiation, automatically selecting the optimal algorithm based on the\n/// amount of pairs.\npub fn multiexp<G: Zeroize + Group<Scalar: Zeroize + PrimeFieldBits>>(\n  pairs: &[(G::Scalar, G)],\n) -> G {\n  match algorithm(pairs.len()) {\n    Algorithm::Null => Group::identity(),\n    Algorithm::Single => pairs[0].1 * pairs[0].0,\n    // These functions panic if called without any pairs\n    Algorithm::Straus(window) => straus(pairs, window),\n    Algorithm::Pippenger(window) => pippenger(pairs, window),\n  }\n}\n\n/// Performs a multiexponentiation in variable time, automatically selecting the optimal algorithm\n/// based on the amount of pairs.\npub fn multiexp_vartime<G: Group<Scalar: PrimeFieldBits>>(pairs: &[(G::Scalar, G)]) -> G {\n  match algorithm(pairs.len()) {\n    Algorithm::Null => Group::identity(),\n    Algorithm::Single => pairs[0].1 * pairs[0].0,\n    Algorithm::Straus(window) => straus_vartime(pairs, window),\n    Algorithm::Pippenger(window) => pippenger_vartime(pairs, window),\n  }\n}\n"
  },
  {
    "path": "crypto/multiexp/src/pippenger.rs",
    "content": "use zeroize::Zeroize;\n\nuse ff::PrimeFieldBits;\nuse group::Group;\n\nuse crate::prep_bits;\n\n// Pippenger's algorithm for multiexponentiation, as published in the SIAM Journal on Computing\n// DOI: 10.1137/0209022\npub(crate) fn pippenger<G: Zeroize + Group<Scalar: PrimeFieldBits>>(\n  pairs: &[(G::Scalar, G)],\n  window: u8,\n) -> G {\n  let mut bits = prep_bits(pairs, window);\n\n  let mut res = G::identity();\n  for n in (0 .. bits[0].len()).rev() {\n    if n != (bits[0].len() - 1) {\n      for _ in 0 .. window {\n        res = res.double();\n      }\n    }\n\n    let mut buckets = vec![G::identity(); 2_usize.pow(window.into())];\n    for p in 0 .. bits.len() {\n      buckets[usize::from(bits[p][n])] += pairs[p].1;\n    }\n\n    let mut intermediate_sum = G::identity();\n    for b in (1 .. buckets.len()).rev() {\n      intermediate_sum += buckets[b];\n      res += intermediate_sum;\n    }\n\n    buckets.zeroize();\n  }\n\n  bits.zeroize();\n  res\n}\n\npub(crate) fn pippenger_vartime<G: Group<Scalar: PrimeFieldBits>>(\n  pairs: &[(G::Scalar, G)],\n  window: u8,\n) -> G {\n  let bits = prep_bits(pairs, window);\n\n  let mut res = G::identity();\n  for n in (0 .. bits[0].len()).rev() {\n    if n != (bits[0].len() - 1) {\n      for _ in 0 .. window {\n        res = res.double();\n      }\n    }\n\n    // Use None to represent identity since is_none is likely faster than is_identity\n    let mut buckets = vec![None; 2_usize.pow(window.into())];\n    for p in 0 .. bits.len() {\n      let nibble = usize::from(bits[p][n]);\n      if nibble != 0 {\n        if let Some(bucket) = buckets[nibble].as_mut() {\n          *bucket += pairs[p].1;\n        } else {\n          buckets[nibble] = Some(pairs[p].1);\n        }\n      }\n    }\n\n    let mut intermediate_sum = None;\n    for b in (1 .. buckets.len()).rev() {\n      if let Some(bucket) = buckets[b].as_ref() {\n        if let Some(intermediate_sum) = intermediate_sum.as_mut() {\n          *intermediate_sum += bucket;\n        } else {\n          intermediate_sum = Some(*bucket);\n        }\n      }\n\n      if let Some(intermediate_sum) = intermediate_sum.as_ref() {\n        res += intermediate_sum;\n      }\n    }\n  }\n\n  res\n}\n"
  },
  {
    "path": "crypto/multiexp/src/straus.rs",
    "content": "use std_shims::vec::Vec;\n\nuse zeroize::Zeroize;\n\nuse ff::PrimeFieldBits;\nuse group::Group;\n\nuse crate::prep_bits;\n\n// Create tables for every included point of size 2^window\nfn prep_tables<G: Group>(pairs: &[(G::Scalar, G)], window: u8) -> Vec<Vec<G>> {\n  let mut tables = Vec::with_capacity(pairs.len());\n  for pair in pairs {\n    let p = tables.len();\n    tables.push(vec![G::identity(); 2_usize.pow(window.into())]);\n    let mut accum = G::identity();\n    for i in 1 .. tables[p].len() {\n      accum += pair.1;\n      tables[p][i] = accum;\n    }\n  }\n  tables\n}\n\n// Straus's algorithm for multiexponentiation, as published in The American Mathematical Monthly\n// DOI: 10.2307/2310929\npub(crate) fn straus<G: Zeroize + Group<Scalar: PrimeFieldBits>>(\n  pairs: &[(G::Scalar, G)],\n  window: u8,\n) -> G {\n  let mut groupings = prep_bits(pairs, window);\n  let mut tables = prep_tables(pairs, window);\n\n  let mut res = G::identity();\n  for b in (0 .. groupings[0].len()).rev() {\n    if b != (groupings[0].len() - 1) {\n      for _ in 0 .. window {\n        res = res.double();\n      }\n    }\n\n    for s in 0 .. tables.len() {\n      res += tables[s][usize::from(groupings[s][b])];\n    }\n  }\n\n  groupings.zeroize();\n  tables.zeroize();\n  res\n}\n\npub(crate) fn straus_vartime<G: Group<Scalar: PrimeFieldBits>>(\n  pairs: &[(G::Scalar, G)],\n  window: u8,\n) -> G {\n  let groupings = prep_bits(pairs, window);\n  let tables = prep_tables(pairs, window);\n\n  let mut res: Option<G> = None;\n  for b in (0 .. groupings[0].len()).rev() {\n    if b != (groupings[0].len() - 1) {\n      for _ in 0 .. window {\n        res = res.map(|res| res.double());\n      }\n    }\n\n    for s in 0 .. tables.len() {\n      if groupings[s][b] != 0 {\n        if let Some(res) = res.as_mut() {\n          *res += tables[s][usize::from(groupings[s][b])];\n        } else {\n          res = Some(tables[s][usize::from(groupings[s][b])]);\n        }\n      }\n    }\n  }\n\n  res.unwrap_or_else(G::identity)\n}\n"
  },
  {
    "path": "crypto/multiexp/src/tests/batch.rs",
    "content": "use rand_core::OsRng;\n\nuse zeroize::Zeroize;\n\nuse rand_core::RngCore;\n\nuse ff::{Field, PrimeFieldBits};\nuse group::Group;\n\nuse crate::BatchVerifier;\n\npub(crate) fn test_batch<G: Zeroize + Group<Scalar: Zeroize + PrimeFieldBits>>() {\n  let valid = |batch: BatchVerifier<_, G>| {\n    assert!(batch.verify());\n    assert!(batch.verify_vartime());\n    assert_eq!(batch.blame_vartime(), None);\n    assert_eq!(batch.verify_with_vartime_blame(), Ok(()));\n    assert_eq!(batch.verify_vartime_with_vartime_blame(), Ok(()));\n  };\n\n  let invalid = |batch: BatchVerifier<_, G>, id| {\n    assert!(!batch.verify());\n    assert!(!batch.verify_vartime());\n    assert_eq!(batch.blame_vartime(), Some(id));\n    assert_eq!(batch.verify_with_vartime_blame(), Err(id));\n    assert_eq!(batch.verify_vartime_with_vartime_blame(), Err(id));\n  };\n\n  // Test an empty batch\n  let batch = BatchVerifier::new(0);\n  valid(batch);\n\n  // Test a batch with one set of statements\n  let valid_statements = vec![(-G::Scalar::ONE, G::generator()), (G::Scalar::ONE, G::generator())];\n  let mut batch = BatchVerifier::new(1);\n  batch.queue(&mut OsRng, 0, valid_statements.clone());\n  valid(batch);\n\n  // Test a batch with an invalid set of statements fails properly\n  let invalid_statements = vec![(-G::Scalar::ONE, G::generator())];\n  let mut batch = BatchVerifier::new(1);\n  batch.queue(&mut OsRng, 0, invalid_statements.clone());\n  invalid(batch, 0);\n\n  // Test blame can properly identify faulty participants\n  // Run with 17 statements, rotating which one is faulty\n  for i in 0 .. 17 {\n    let mut batch = BatchVerifier::new(17);\n    for j in 0 .. 17 {\n      batch.queue(\n        &mut OsRng,\n        j,\n        if i == j { invalid_statements.clone() } else { valid_statements.clone() },\n      );\n    }\n    invalid(batch, i);\n  }\n\n  // Test blame always identifies the left-most invalid statement\n  for i in 1 .. 32 {\n    for j in 1 .. i {\n      let mut batch = BatchVerifier::new(j);\n      let mut leftmost = None;\n\n      // Create j statements\n      for k in 0 .. j {\n        batch.queue(\n          &mut OsRng,\n          k,\n          // The usage of i / 10 makes this less likely to add invalid elements, and increases\n          // the space between them\n          // For high i values, yet low j values, this will make it likely that random elements\n          // are at/near the end\n          if ((OsRng.next_u64() % u64::try_from(1 + (i / 4)).unwrap()) == 0) ||\n            (leftmost.is_none() && (k == (j - 1)))\n          {\n            if leftmost.is_none() {\n              leftmost = Some(k);\n            }\n            invalid_statements.clone()\n          } else {\n            valid_statements.clone()\n          },\n        );\n      }\n\n      invalid(batch, leftmost.unwrap());\n    }\n  }\n}\n"
  },
  {
    "path": "crypto/multiexp/src/tests/mod.rs",
    "content": "use std::time::Instant;\n\nuse rand_core::OsRng;\n\nuse zeroize::Zeroize;\n\nuse ff::{Field, PrimeFieldBits};\nuse group::Group;\n\nuse k256::ProjectivePoint;\nuse dalek_ff_group::EdwardsPoint;\n\nuse crate::{straus, straus_vartime, pippenger, pippenger_vartime, multiexp, multiexp_vartime};\n\n#[cfg(feature = \"batch\")]\nmod batch;\n#[cfg(feature = \"batch\")]\nuse batch::test_batch;\n\n#[allow(dead_code)]\nfn benchmark_internal<G: Zeroize + Group<Scalar: Zeroize + PrimeFieldBits>>(straus_bool: bool) {\n  let runs: usize = 20;\n\n  let mut start = 0;\n  let mut increment: usize = 5;\n  let mut total: usize = 250;\n  let mut current = 2;\n\n  if !straus_bool {\n    start = 100;\n    increment = 25;\n    total = 1000;\n    current = 4;\n  };\n\n  let mut pairs = Vec::with_capacity(total);\n  let mut sum = G::identity();\n\n  for _ in 0 .. start {\n    pairs.push((G::Scalar::random(&mut OsRng), G::generator() * G::Scalar::random(&mut OsRng)));\n    sum += pairs[pairs.len() - 1].1 * pairs[pairs.len() - 1].0;\n  }\n\n  for _ in 0 .. (total / increment) {\n    for _ in 0 .. increment {\n      pairs.push((G::Scalar::random(&mut OsRng), G::generator() * G::Scalar::random(&mut OsRng)));\n      sum += pairs[pairs.len() - 1].1 * pairs[pairs.len() - 1].0;\n    }\n\n    let now = Instant::now();\n    for _ in 0 .. runs {\n      if straus_bool {\n        assert_eq!(straus(&pairs, current), sum);\n      } else {\n        assert_eq!(pippenger(&pairs, current), sum);\n      }\n    }\n    let current_per = now.elapsed().as_micros() / u128::try_from(pairs.len()).unwrap();\n\n    let now = Instant::now();\n    for _ in 0 .. runs {\n      if straus_bool {\n        assert_eq!(straus(&pairs, current + 1), sum);\n      } else {\n        assert_eq!(pippenger(&pairs, current + 1), sum);\n      }\n    }\n    let next_per = now.elapsed().as_micros() / u128::try_from(pairs.len()).unwrap();\n\n    if next_per < current_per {\n      current += 1;\n      println!(\n        \"{} {} is more efficient at {} with {}µs per\",\n        if straus_bool { \"Straus\" } else { \"Pippenger\" },\n        current,\n        pairs.len(),\n        next_per\n      );\n      if current >= 8 {\n        return;\n      }\n    }\n  }\n}\n\nfn test_multiexp<G: Zeroize + Group<Scalar: Zeroize + PrimeFieldBits>>() {\n  let test = |pairs: &[_], sum| {\n    // These should automatically determine the best algorithm\n    assert_eq!(multiexp(pairs), sum);\n    assert_eq!(multiexp_vartime(pairs), sum);\n\n    // Also explicitly test straus/pippenger for each bit size\n    if !pairs.is_empty() {\n      for window in 1 .. 8 {\n        assert_eq!(straus(pairs, window), sum);\n        assert_eq!(straus_vartime(pairs, window), sum);\n        assert_eq!(pippenger(pairs, window), sum);\n        assert_eq!(pippenger_vartime(pairs, window), sum);\n      }\n    }\n  };\n\n  // Test an empty multiexp is identity\n  test(&[], G::identity());\n\n  // Test an multiexp of identity/zero elements is identity\n  test(&[(G::Scalar::ZERO, G::generator())], G::identity());\n  test(&[(G::Scalar::ONE, G::identity())], G::identity());\n\n  // Test a variety of multiexp sizes\n  let mut pairs = Vec::with_capacity(1000);\n  let mut sum = G::identity();\n  for _ in 0 .. 10 {\n    // Test a multiexp of a single item\n    // On successive loop iterations, this will test a multiexp with an odd number of pairs\n    pairs.push((G::Scalar::random(&mut OsRng), G::generator() * G::Scalar::random(&mut OsRng)));\n    sum += pairs[pairs.len() - 1].1 * pairs[pairs.len() - 1].0;\n    test(&pairs, sum);\n\n    for _ in 0 .. 100 {\n      pairs.push((G::Scalar::random(&mut OsRng), G::generator() * G::Scalar::random(&mut OsRng)));\n      sum += pairs[pairs.len() - 1].1 * pairs[pairs.len() - 1].0;\n    }\n    test(&pairs, sum);\n  }\n}\n\n#[test]\nfn test_secp256k1() {\n  test_multiexp::<ProjectivePoint>();\n  #[cfg(feature = \"batch\")]\n  test_batch::<ProjectivePoint>();\n}\n\n#[test]\nfn test_ed25519() {\n  test_multiexp::<EdwardsPoint>();\n  #[cfg(feature = \"batch\")]\n  test_batch::<EdwardsPoint>();\n}\n\n#[ignore]\n#[test]\nfn benchmark() {\n  // Activate the processor's boost clock\n  for _ in 0 .. 30 {\n    test_multiexp::<ProjectivePoint>();\n  }\n\n  benchmark_internal::<ProjectivePoint>(true);\n  benchmark_internal::<ProjectivePoint>(false);\n\n  benchmark_internal::<EdwardsPoint>(true);\n  benchmark_internal::<EdwardsPoint>(false);\n}\n"
  },
  {
    "path": "crypto/schnorr/Cargo.toml",
    "content": "[package]\nname = \"schnorr-signatures\"\nversion = \"0.5.2\"\ndescription = \"Minimal Schnorr signatures crate hosting common code\"\nlicense = \"MIT\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/crypto/schnorr\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nkeywords = [\"schnorr\", \"ff\", \"group\"]\nedition = \"2021\"\nrust-version = \"1.79\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nstd-shims = { path = \"../../common/std-shims\", version = \"^0.1.1\", default-features = false }\n\nrand_core = { version = \"0.6\", default-features = false }\n\nzeroize = { version = \"^1.5\", default-features = false, features = [\"zeroize_derive\"] }\n\ntranscript = { package = \"flexible-transcript\", path = \"../transcript\", version = \"^0.3.2\", default-features = false, optional = true }\n\nciphersuite = { path = \"../ciphersuite\", version = \"^0.4.1\", default-features = false, features = [\"alloc\"] }\nmultiexp = { path = \"../multiexp\", version = \"0.4\", default-features = false, features = [\"batch\"] }\n\n[dev-dependencies]\nhex = \"0.4\"\n\nrand_core = { version = \"0.6\", features = [\"std\"] }\n\nsha2 = \"0.10\"\n\ndalek-ff-group = { path =  \"../dalek-ff-group\" }\nciphersuite = { path = \"../ciphersuite\" }\n\n[features]\naggregate = [\"transcript\"]\nstd = [\"std-shims/std\", \"rand_core/std\", \"zeroize/std\", \"transcript?/std\", \"ciphersuite/std\", \"multiexp/std\"]\ndefault = [\"std\"]\n"
  },
  {
    "path": "crypto/schnorr/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2021-2023 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "crypto/schnorr/README.md",
    "content": "# Schnorr Signatures\n\nA challenge (and therefore HRAm) agnostic Schnorr signature library. This is\nintended to be used as a primitive by a variety of crates relying on Schnorr\nsignatures, voiding the need to constantly define a Schnorr signature struct\nwith associated functions.\n\nThis library provides signatures of the `R, s` form. Batch verification is\nsupported via the multiexp crate. Half-aggregation, as defined in\n<https://eprint.iacr.org/2021/350>, is also supported.\n\nThis library was\n[audited by Cypher Stack in March 2023](https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf),\nculminating in commit\n[669d2dbffc1dafb82a09d9419ea182667115df06](https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06).\nAny subsequent changes have not undergone auditing.\n\nThis library is usable under no_std, via alloc, when the default features are\ndisabled.\n"
  },
  {
    "path": "crypto/schnorr/src/aggregate.rs",
    "content": "use std_shims::{\n  vec::Vec,\n  io::{self, Read, Write},\n};\n\nuse zeroize::Zeroize;\n\nuse transcript::{Transcript, SecureDigest, DigestTranscript};\n\nuse ciphersuite::{\n  group::{\n    ff::{Field, PrimeField},\n    Group, GroupEncoding,\n  },\n  Ciphersuite,\n};\nuse multiexp::multiexp_vartime;\n\nuse crate::SchnorrSignature;\n\n// Returns a unbiased scalar weight to use on a signature in order to prevent malleability\nfn weight<D: Send + Clone + SecureDigest, F: PrimeField>(digest: &mut DigestTranscript<D>) -> F {\n  let mut bytes = digest.challenge(b\"aggregation_weight\");\n  debug_assert_eq!(bytes.len() % 8, 0);\n  // This should be guaranteed thanks to SecureDigest\n  debug_assert!(bytes.len() >= 32);\n\n  let mut res = F::ZERO;\n  let mut i = 0;\n\n  // Derive a scalar from enough bits of entropy that bias is < 2^128\n  // This can't be const due to its usage of a generic\n  // Also due to the usize::try_from, yet that could be replaced with an `as`\n  #[allow(non_snake_case)]\n  let BYTES: usize = usize::try_from((F::NUM_BITS + 128).div_ceil(8)).unwrap();\n\n  let mut remaining = BYTES;\n\n  // We load bits in as u64s\n  const WORD_LEN_IN_BITS: usize = 64;\n  const WORD_LEN_IN_BYTES: usize = WORD_LEN_IN_BITS / 8;\n\n  let mut first = true;\n  while i < remaining {\n    // Shift over the already loaded bits\n    if !first {\n      for _ in 0 .. WORD_LEN_IN_BITS {\n        res += res;\n      }\n    }\n    first = false;\n\n    // Add the next 64 bits\n    res += F::from(u64::from_be_bytes(bytes[i .. (i + WORD_LEN_IN_BYTES)].try_into().unwrap()));\n    i += WORD_LEN_IN_BYTES;\n\n    // If we've exhausted this challenge, get another\n    if i == bytes.len() {\n      bytes = digest.challenge(b\"aggregation_weight_continued\");\n      remaining -= i;\n      i = 0;\n    }\n  }\n  res\n}\n\n/// Aggregate Schnorr signature as defined in <https://eprint.iacr.org/2021/350>.\n#[allow(non_snake_case)]\n#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]\npub struct SchnorrAggregate<C: Ciphersuite> {\n  Rs: Vec<C::G>,\n  s: C::F,\n}\n\nimpl<C: Ciphersuite> SchnorrAggregate<C> {\n  /// Read a SchnorrAggregate from something implementing Read.\n  pub fn read<R: Read>(reader: &mut R) -> io::Result<Self> {\n    let mut len = [0; 4];\n    reader.read_exact(&mut len)?;\n\n    #[allow(non_snake_case)]\n    let mut Rs = vec![];\n    for _ in 0 .. u32::from_le_bytes(len) {\n      Rs.push(C::read_G(reader)?);\n    }\n\n    Ok(SchnorrAggregate { Rs, s: C::read_F(reader)? })\n  }\n\n  /// Write a SchnorrAggregate to something implementing Write.\n  ///\n  /// This will panic if more than 4 billion signatures were aggregated.\n  pub fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {\n    writer.write_all(\n      &u32::try_from(self.Rs.len())\n        .expect(\"more than 4 billion signatures in aggregate\")\n        .to_le_bytes(),\n    )?;\n    #[allow(non_snake_case)]\n    for R in &self.Rs {\n      writer.write_all(R.to_bytes().as_ref())?;\n    }\n    writer.write_all(self.s.to_repr().as_ref())\n  }\n\n  /// Serialize a SchnorrAggregate, returning a `Vec<u8>`.\n  pub fn serialize(&self) -> Vec<u8> {\n    let mut buf = vec![];\n    self.write(&mut buf).unwrap();\n    buf\n  }\n\n  #[allow(non_snake_case)]\n  pub fn Rs(&self) -> &[C::G] {\n    self.Rs.as_slice()\n  }\n\n  /// Perform signature verification.\n  ///\n  /// Challenges must be properly crafted, which means being binding to the public key, nonce, and\n  /// any message. Failure to do so will let a malicious adversary to forge signatures for\n  /// different keys/messages.\n  ///\n  /// The DST used here must prevent a collision with whatever hash function produced the\n  /// challenges.\n  #[must_use]\n  pub fn verify(&self, dst: &'static [u8], keys_and_challenges: &[(C::G, C::F)]) -> bool {\n    if self.Rs.len() != keys_and_challenges.len() {\n      return false;\n    }\n\n    let mut digest = DigestTranscript::<C::H>::new(dst);\n    digest.domain_separate(b\"signatures\");\n    for (_, challenge) in keys_and_challenges {\n      digest.append_message(b\"challenge\", challenge.to_repr());\n    }\n\n    let mut pairs = Vec::with_capacity((2 * keys_and_challenges.len()) + 1);\n    for (i, (key, challenge)) in keys_and_challenges.iter().enumerate() {\n      let z = weight(&mut digest);\n      pairs.push((z, self.Rs[i]));\n      pairs.push((z * challenge, *key));\n    }\n    pairs.push((-self.s, C::generator()));\n    multiexp_vartime(&pairs).is_identity().into()\n  }\n}\n\n/// A signature aggregator capable of consuming signatures in order to produce an aggregate.\n#[allow(non_snake_case)]\n#[derive(Clone, Debug, Zeroize)]\npub struct SchnorrAggregator<C: Ciphersuite> {\n  digest: DigestTranscript<C::H>,\n  sigs: Vec<SchnorrSignature<C>>,\n}\n\nimpl<C: Ciphersuite> SchnorrAggregator<C> {\n  /// Create a new aggregator.\n  ///\n  /// The DST used here must prevent a collision with whatever hash function produced the\n  /// challenges.\n  pub fn new(dst: &'static [u8]) -> Self {\n    let mut res = Self { digest: DigestTranscript::<C::H>::new(dst), sigs: vec![] };\n    res.digest.domain_separate(b\"signatures\");\n    res\n  }\n\n  /// Aggregate a signature.\n  pub fn aggregate(&mut self, challenge: C::F, sig: SchnorrSignature<C>) {\n    self.digest.append_message(b\"challenge\", challenge.to_repr());\n    self.sigs.push(sig);\n  }\n\n  /// Complete aggregation, returning None if none were aggregated.\n  pub fn complete(mut self) -> Option<SchnorrAggregate<C>> {\n    if self.sigs.is_empty() {\n      return None;\n    }\n\n    let mut aggregate = SchnorrAggregate { Rs: Vec::with_capacity(self.sigs.len()), s: C::F::ZERO };\n    for i in 0 .. self.sigs.len() {\n      aggregate.Rs.push(self.sigs[i].R);\n      aggregate.s += self.sigs[i].s * weight::<_, C::F>(&mut self.digest);\n    }\n    Some(aggregate)\n  }\n}\n"
  },
  {
    "path": "crypto/schnorr/src/lib.rs",
    "content": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![doc = include_str!(\"../README.md\")]\n#![cfg_attr(not(feature = \"std\"), no_std)]\n\nuse core::ops::Deref;\n#[cfg(not(feature = \"std\"))]\n#[macro_use]\nextern crate alloc;\nuse std_shims::{\n  vec::Vec,\n  io::{self, Read, Write},\n};\n\nuse rand_core::{RngCore, CryptoRng};\n\nuse zeroize::{Zeroize, Zeroizing};\n\nuse ciphersuite::{\n  group::{\n    ff::{Field, PrimeField},\n    Group, GroupEncoding,\n  },\n  Ciphersuite,\n};\nuse multiexp::{multiexp_vartime, BatchVerifier};\n\n/// Half-aggregation from <https://eprint.iacr.org/2021/350>.\n#[cfg(feature = \"aggregate\")]\npub mod aggregate;\n\n#[cfg(test)]\nmod tests;\n\n/// A Schnorr signature of the form (R, s) where s = r + cx.\n///\n/// These are intended to be strict. It is generic over Ciphersuite which is for PrimeGroups,\n/// and mandates canonical encodings in its read function.\n///\n/// RFC 8032 has an alternative verification formula, 8R = 8s - 8cX, which is intended to handle\n/// torsioned nonces/public keys. Due to this library's strict requirements, such signatures will\n/// not be verifiable with this library.\n#[allow(non_snake_case)]\n#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]\npub struct SchnorrSignature<C: Ciphersuite> {\n  pub R: C::G,\n  pub s: C::F,\n}\n\nimpl<C: Ciphersuite> SchnorrSignature<C> {\n  /// Read a SchnorrSignature from something implementing Read.\n  pub fn read<R: Read>(reader: &mut R) -> io::Result<Self> {\n    Ok(SchnorrSignature { R: C::read_G(reader)?, s: C::read_F(reader)? })\n  }\n\n  /// Write a SchnorrSignature to something implementing Read.\n  pub fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {\n    writer.write_all(self.R.to_bytes().as_ref())?;\n    writer.write_all(self.s.to_repr().as_ref())\n  }\n\n  /// Serialize a SchnorrSignature, returning a `Vec<u8>`.\n  pub fn serialize(&self) -> Vec<u8> {\n    let mut buf = vec![];\n    self.write(&mut buf).unwrap();\n    buf\n  }\n\n  /// Sign a Schnorr signature with the given nonce for the specified challenge.\n  ///\n  /// This challenge must be properly crafted, which means being binding to the public key, nonce,\n  /// and any message. Failure to do so will let a malicious adversary to forge signatures for\n  /// different keys/messages.\n  #[allow(clippy::needless_pass_by_value)] // Prevents further-use of this single-use value\n  pub fn sign(\n    private_key: &Zeroizing<C::F>,\n    nonce: Zeroizing<C::F>,\n    challenge: C::F,\n  ) -> SchnorrSignature<C> {\n    SchnorrSignature {\n      // Uses deref instead of * as * returns C::F yet deref returns &C::F, preventing a copy\n      R: C::generator() * nonce.deref(),\n      s: (challenge * private_key.deref()) + nonce.deref(),\n    }\n  }\n\n  /// Return the series of pairs whose products sum to zero for a valid signature.\n  /// This is intended to be used with a multiexp.\n  pub fn batch_statements(&self, public_key: C::G, challenge: C::F) -> [(C::F, C::G); 3] {\n    // s = r + ca\n    // sG == R + cA\n    // R + cA - sG == 0\n    [\n      // R\n      (C::F::ONE, self.R),\n      // cA\n      (challenge, public_key),\n      // -sG\n      (-self.s, C::generator()),\n    ]\n  }\n\n  /// Verify a Schnorr signature for the given key with the specified challenge.\n  ///\n  /// This challenge must be properly crafted, which means being binding to the public key, nonce,\n  /// and any message. Failure to do so will let a malicious adversary to forge signatures for\n  /// different keys/messages.\n  #[must_use]\n  pub fn verify(&self, public_key: C::G, challenge: C::F) -> bool {\n    multiexp_vartime(&self.batch_statements(public_key, challenge)).is_identity().into()\n  }\n\n  /// Queue a signature for batch verification.\n  ///\n  /// This challenge must be properly crafted, which means being binding to the public key, nonce,\n  /// and any message. Failure to do so will let a malicious adversary to forge signatures for\n  /// different keys/messages.\n  pub fn batch_verify<R: RngCore + CryptoRng, I: Copy + Zeroize>(\n    &self,\n    rng: &mut R,\n    batch: &mut BatchVerifier<I, C::G>,\n    id: I,\n    public_key: C::G,\n    challenge: C::F,\n  ) {\n    batch.queue(rng, id, self.batch_statements(public_key, challenge));\n  }\n}\n"
  },
  {
    "path": "crypto/schnorr/src/tests/mod.rs",
    "content": "use core::ops::Deref;\n\nuse zeroize::Zeroizing;\nuse rand_core::OsRng;\n\nuse dalek_ff_group::Ed25519;\nuse ciphersuite::{\n  group::{ff::Field, Group},\n  Ciphersuite,\n};\nuse multiexp::BatchVerifier;\n\nuse crate::SchnorrSignature;\n#[cfg(feature = \"aggregate\")]\nuse crate::aggregate::{SchnorrAggregator, SchnorrAggregate};\n\nmod rfc8032;\n\npub(crate) fn sign<C: Ciphersuite>() {\n  let private_key = Zeroizing::new(C::random_nonzero_F(&mut OsRng));\n  let nonce = Zeroizing::new(C::random_nonzero_F(&mut OsRng));\n  let challenge = C::random_nonzero_F(&mut OsRng); // Doesn't bother to craft an HRAm\n  assert!(SchnorrSignature::<C>::sign(&private_key, nonce, challenge)\n    .verify(C::generator() * private_key.deref(), challenge));\n}\n\n// The above sign function verifies signing works\n// This verifies invalid signatures don't pass, using zero signatures, which should effectively be\n// random\npub(crate) fn verify<C: Ciphersuite>() {\n  assert!(!SchnorrSignature::<C> { R: C::G::identity(), s: C::F::ZERO }\n    .verify(C::generator() * C::random_nonzero_F(&mut OsRng), C::random_nonzero_F(&mut OsRng)));\n}\n\npub(crate) fn batch_verify<C: Ciphersuite>() {\n  // Create 5 signatures\n  let mut keys = vec![];\n  let mut challenges = vec![];\n  let mut sigs = vec![];\n  for i in 0 .. 5 {\n    keys.push(Zeroizing::new(C::random_nonzero_F(&mut OsRng)));\n    challenges.push(C::random_nonzero_F(&mut OsRng));\n    sigs.push(SchnorrSignature::<C>::sign(\n      &keys[i],\n      Zeroizing::new(C::random_nonzero_F(&mut OsRng)),\n      challenges[i],\n    ));\n  }\n\n  // Batch verify\n  {\n    let mut batch = BatchVerifier::new(5);\n    for (i, sig) in sigs.iter().enumerate() {\n      sig.batch_verify(&mut OsRng, &mut batch, i, C::generator() * keys[i].deref(), challenges[i]);\n    }\n    batch.verify_vartime_with_vartime_blame().unwrap();\n  }\n\n  // Shift 1 from s from one to another and verify it fails\n  // This test will fail if unique factors aren't used per-signature, hence its inclusion\n  {\n    let mut batch = BatchVerifier::new(5);\n    for (i, mut sig) in sigs.clone().drain(..).enumerate() {\n      if i == 1 {\n        sig.s += C::F::ONE;\n      }\n      if i == 2 {\n        sig.s -= C::F::ONE;\n      }\n      sig.batch_verify(&mut OsRng, &mut batch, i, C::generator() * keys[i].deref(), challenges[i]);\n    }\n    if let Err(blame) = batch.verify_vartime_with_vartime_blame() {\n      assert!((blame == 1) || (blame == 2));\n    } else {\n      panic!(\"Batch verification considered malleated signatures valid\");\n    }\n  }\n}\n\n#[cfg(feature = \"aggregate\")]\npub(crate) fn aggregate<C: Ciphersuite>() {\n  const DST: &[u8] = b\"Schnorr Aggregator Test\";\n\n  // Create 5 signatures\n  let mut keys = vec![];\n  let mut challenges = vec![];\n  let mut aggregator = SchnorrAggregator::<C>::new(DST);\n  for i in 0 .. 5 {\n    keys.push(Zeroizing::new(C::random_nonzero_F(&mut OsRng)));\n    // In practice, this MUST be a secure challenge binding to the nonce, key, and any message\n    challenges.push(C::random_nonzero_F(&mut OsRng));\n    aggregator.aggregate(\n      challenges[i],\n      SchnorrSignature::<C>::sign(\n        &keys[i],\n        Zeroizing::new(C::random_nonzero_F(&mut OsRng)),\n        challenges[i],\n      ),\n    );\n  }\n\n  let aggregate = aggregator.complete().unwrap();\n  let aggregate =\n    SchnorrAggregate::<C>::read::<&[u8]>(&mut aggregate.serialize().as_ref()).unwrap();\n  assert!(aggregate.verify(\n    DST,\n    keys\n      .iter()\n      .map(|key| C::generator() * key.deref())\n      .zip(challenges.iter().copied())\n      .collect::<Vec<_>>()\n      .as_ref(),\n  ));\n}\n\n#[test]\nfn test() {\n  sign::<Ed25519>();\n  verify::<Ed25519>();\n  batch_verify::<Ed25519>();\n  #[cfg(feature = \"aggregate\")]\n  aggregate::<Ed25519>();\n}\n"
  },
  {
    "path": "crypto/schnorr/src/tests/rfc8032.rs",
    "content": "// RFC 8032 Ed25519 test vectors\n// The s = r + cx format modernly used for Schnorr signatures was popularized by EdDSA\n// While not all RFC 8032 signatures will work with this library, any canonical ones will, and\n// these vectors are canonical\n\nuse sha2::{Digest, Sha512};\n\nuse dalek_ff_group::{Scalar, Ed25519};\nuse ciphersuite::{group::GroupEncoding, Ciphersuite};\n\nuse crate::SchnorrSignature;\n\n// Public key, message, signature\n#[rustfmt::skip]\nconst VECTORS: [(&str, &str, &str); 5] = [\n  (\n    \"d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a\",\n    \"\",\n    \"e5564300c360ac729086e2cc806e828a84877f1eb8e5d974d873e065224901555fb8821590a33bacc61e39701cf9b46bd25bf5f0595bbe24655141438e7a100b\"\n  ),\n\n  (\n    \"3d4017c3e843895a92b70aa74d1b7ebc9c982ccf2ec4968cc0cd55f12af4660c\",\n    \"72\",\n    \"92a009a9f0d4cab8720e820b5f642540a2b27b5416503f8fb3762223ebdb69da085ac1e43e15996e458f3613d0f11d8c387b2eaeb4302aeeb00d291612bb0c00\"\n  ),\n\n  (\n    \"fc51cd8e6218a1a38da47ed00230f0580816ed13ba3303ac5deb911548908025\",\n    \"af82\",\n    \"6291d657deec24024827e69c3abe01a30ce548a284743a445e3680d7db5ac3ac18ff9b538d16f290ae67f760984dc6594a7c15e9716ed28dc027beceea1ec40a\"\n  ),\n\n  (\n    \"278117fc144c72340f67d0f2316e8386ceffbf2b2428c9c51fef7c597f1d426e\",\n    \"08b8b2b733424243760fe426a4b54908632110a66c2f6591eabd3345e3e4eb98fa6e264bf09efe12ee50f8f54e9f77b1e355f6c50544e23fb1433ddf73be84d879de7c0046dc4996d9e773f4bc9efe5738829adb26c81b37c93a1b270b20329d658675fc6ea534e0810a4432826bf58c941efb65d57a338bbd2e26640f89ffbc1a858efcb8550ee3a5e1998bd177e93a7363c344fe6b199ee5d02e82d522c4feba15452f80288a821a579116ec6dad2b3b310da903401aa62100ab5d1a36553e06203b33890cc9b832f79ef80560ccb9a39ce767967ed628c6ad573cb116dbefefd75499da96bd68a8a97b928a8bbc103b6621fcde2beca1231d206be6cd9ec7aff6f6c94fcd7204ed3455c68c83f4a41da4af2b74ef5c53f1d8ac70bdcb7ed185ce81bd84359d44254d95629e9855a94a7c1958d1f8ada5d0532ed8a5aa3fb2d17ba70eb6248e594e1a2297acbbb39d502f1a8c6eb6f1ce22b3de1a1f40cc24554119a831a9aad6079cad88425de6bde1a9187ebb6092cf67bf2b13fd65f27088d78b7e883c8759d2c4f5c65adb7553878ad575f9fad878e80a0c9ba63bcbcc2732e69485bbc9c90bfbd62481d9089beccf80cfe2df16a2cf65bd92dd597b0707e0917af48bbb75fed413d238f5555a7a569d80c3414a8d0859dc65a46128bab27af87a71314f318c782b23ebfe808b82b0ce26401d2e22f04d83d1255dc51addd3b75a2b1ae0784504df543af8969be3ea7082ff7fc9888c144da2af58429ec96031dbcad3dad9af0dcbaaaf268cb8fcffead94f3c7ca495e056a9b47acdb751fb73e666c6c655ade8297297d07ad1ba5e43f1bca32301651339e22904cc8c42f58c30c04aafdb038dda0847dd988dcda6f3bfd15c4b4c4525004aa06eeff8ca61783aacec57fb3d1f92b0fe2fd1a85f6724517b65e614ad6808d6f6ee34dff7310fdc82aebfd904b01e1dc54b2927094b2db68d6f903b68401adebf5a7e08d78ff4ef5d63653a65040cf9bfd4aca7984a74d37145986780fc0b16ac451649de6188a7dbdf191f64b5fc5e2ab47b57f7f7276cd419c17a3ca8e1b939ae49e488acba6b965610b5480109c8b17b80e1b7b750dfc7598d5d5011fd2dcc5600a32ef5b52a1ecc820e308aa342721aac0943bf6686b64b2579376504ccc493d97e6aed3fb0f9cd71a43dd497f01f17c0e2cb3797aa2a2f256656168e6c496afc5fb93246f6b1116398a346f1a641f3b041e989f7914f90cc2c7fff357876e506b50d334ba77c225bc307ba537152f3f1610e4eafe595f6d9d90d11faa933a15ef1369546868a7f3a45a96768d40fd9d03412c091c6315cf4fde7cb68606937380db2eaaa707b4c4185c32eddcdd306705e4dc1ffc872eeee475a64dfac86aba41c0618983f8741c5ef68d3a101e8a3b8cac60c905c15fc910840b94c00a0b9d0\",\n    \"0aab4c900501b3e24d7cdf4663326a3a87df5e4843b2cbdb67cbf6e460fec350aa5371b1508f9f4528ecea23c436d94b5e8fcd4f681e30a6ac00a9704a188a03\"\n  ),\n\n  (\n    \"ec172b93ad5e563bf4932c70e1245034c35467ef2efd4d64ebf819683467e2bf\",\n    \"ddaf35a193617abacc417349ae20413112e6fa4e89a97ea20a9eeee64b55d39a2192992a274fc1a836ba3c23a3feebbd454d4423643ce80e2a9ac94fa54ca49f\",\n    \"dc2a4459e7369633a52b1bf277839a00201009a3efbf3ecb69bea2186c26b58909351fc9ac90b3ecfdfbc7c66431e0303dca179c138ac17ad9bef1177331a704\"\n  ),\n];\n\n#[test]\nfn test_rfc8032() {\n  for vector in VECTORS {\n    let key = Ed25519::read_G::<&[u8]>(&mut hex::decode(vector.0).unwrap().as_ref()).unwrap();\n    let sig =\n      SchnorrSignature::<Ed25519>::read::<&[u8]>(&mut hex::decode(vector.2).unwrap().as_ref())\n        .unwrap();\n    let hram = Sha512::new_with_prefix(\n      [sig.R.to_bytes().as_ref(), &key.to_bytes(), &hex::decode(vector.1).unwrap()].concat(),\n    );\n    assert!(sig.verify(key, Scalar::from_hash(hram)));\n  }\n}\n"
  },
  {
    "path": "crypto/schnorrkel/Cargo.toml",
    "content": "[package]\nname = \"frost-schnorrkel\"\nversion = \"0.2.0\"\ndescription = \"modular-frost Algorithm compatible with Schnorrkel\"\nlicense = \"MIT\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/crypto/schnorrkel\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nkeywords = [\"frost\", \"multisig\", \"threshold\", \"schnorrkel\"]\nedition = \"2021\"\nrust-version = \"1.80\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nrand_core = \"0.6\"\nzeroize = \"^1.5\"\n\ntranscript = { package = \"flexible-transcript\", path = \"../transcript\", version = \"^0.3.2\", features = [\"merlin\"] }\n\ngroup = \"0.13\"\n\ndalek-ff-group = { path = \"../dalek-ff-group\" }\nciphersuite = { path = \"../ciphersuite\", version = \"^0.4.1\", features = [\"std\"] }\nschnorr = { package = \"schnorr-signatures\", path = \"../schnorr\", version = \"^0.5.1\" }\nfrost = { path = \"../frost\", package = \"modular-frost\", version = \"^0.10.0\", features = [\"ristretto\"] }\n\nschnorrkel = { version = \"0.11\" }\n\n[dev-dependencies]\nfrost = { path = \"../frost\", package = \"modular-frost\", features = [\"tests\"] }\n"
  },
  {
    "path": "crypto/schnorrkel/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2023 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "crypto/schnorrkel/README.md",
    "content": "# FROST Schnorrkel\n\nA Schnorrkel algorithm for [modular-frost](https://docs.rs/modular-frost).\n\nWhile the Schnorrkel algorithm has not been audited, the underlying FROST\nimplementation was\n[audited by Cypher Stack in March 2023](https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf),\nculminating in commit\n[669d2dbffc1dafb82a09d9419ea182667115df06](https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06).\nAny subsequent changes have not undergone auditing.\n"
  },
  {
    "path": "crypto/schnorrkel/src/lib.rs",
    "content": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![doc = include_str!(\"../README.md\")]\n\nuse std::io::{self, Read};\n\nuse rand_core::{RngCore, CryptoRng};\n\nuse zeroize::Zeroizing;\n\nuse transcript::{Transcript, MerlinTranscript};\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::{\n  group::{ff::PrimeField, GroupEncoding},\n  Ciphersuite,\n};\nuse schnorr::SchnorrSignature;\n\nuse ::frost::{\n  Participant, ThresholdKeys, ThresholdView, FrostError,\n  algorithm::{Hram, Algorithm, Schnorr},\n};\n\n/// The [modular-frost](https://docs.rs/modular-frost) library.\npub mod frost {\n  pub use ::frost::*;\n}\n\nuse schnorrkel::{PublicKey, Signature, context::SigningTranscript, signing_context};\n\ntype RistrettoPoint = <Ristretto as Ciphersuite>::G;\ntype Scalar = <Ristretto as Ciphersuite>::F;\n\n#[cfg(test)]\nmod tests;\n\n#[derive(Clone)]\nstruct SchnorrkelHram;\nimpl Hram<Ristretto> for SchnorrkelHram {\n  #[allow(non_snake_case)]\n  fn hram(R: &RistrettoPoint, A: &RistrettoPoint, m: &[u8]) -> Scalar {\n    let ctx_len =\n      usize::try_from(u32::from_le_bytes(m[0 .. 4].try_into().expect(\"malformed message\")))\n        .unwrap();\n\n    let mut t = signing_context(&m[4 .. (4 + ctx_len)]).bytes(&m[(4 + ctx_len) ..]);\n    t.proto_name(b\"Schnorr-sig\");\n    let convert =\n      |point: &RistrettoPoint| PublicKey::from_bytes(&point.to_bytes()).unwrap().into_compressed();\n    t.commit_point(b\"sign:pk\", &convert(A));\n    t.commit_point(b\"sign:R\", &convert(R));\n    Scalar::from_repr(t.challenge_scalar(b\"sign:c\").to_bytes()).unwrap()\n  }\n}\n\n/// FROST Schnorrkel algorithm.\n#[derive(Clone)]\npub struct Schnorrkel {\n  context: &'static [u8],\n  schnorr: Schnorr<Ristretto, MerlinTranscript, SchnorrkelHram>,\n  msg: Option<Vec<u8>>,\n}\n\nimpl Schnorrkel {\n  /// Create a new algorithm with the specified context.\n  ///\n  /// If the context is greater than or equal to 4 GB in size, this will panic.\n  pub fn new(context: &'static [u8]) -> Schnorrkel {\n    Schnorrkel {\n      context,\n      schnorr: Schnorr::new(MerlinTranscript::new(b\"FROST Schnorrkel\")),\n      msg: None,\n    }\n  }\n}\n\nimpl Algorithm<Ristretto> for Schnorrkel {\n  type Transcript = MerlinTranscript;\n  type Addendum = ();\n  type Signature = Signature;\n\n  fn transcript(&mut self) -> &mut Self::Transcript {\n    self.schnorr.transcript()\n  }\n\n  fn nonces(&self) -> Vec<Vec<<Ristretto as Ciphersuite>::G>> {\n    self.schnorr.nonces()\n  }\n\n  fn preprocess_addendum<R: RngCore + CryptoRng>(\n    &mut self,\n    _: &mut R,\n    _: &ThresholdKeys<Ristretto>,\n  ) {\n  }\n\n  fn read_addendum<R: Read>(&self, _: &mut R) -> io::Result<Self::Addendum> {\n    Ok(())\n  }\n\n  fn process_addendum(\n    &mut self,\n    _: &ThresholdView<Ristretto>,\n    _: Participant,\n    (): (),\n  ) -> Result<(), FrostError> {\n    Ok(())\n  }\n\n  fn sign_share(\n    &mut self,\n    params: &ThresholdView<Ristretto>,\n    nonce_sums: &[Vec<RistrettoPoint>],\n    nonces: Vec<Zeroizing<Scalar>>,\n    msg: &[u8],\n  ) -> Scalar {\n    self.msg = Some(msg.to_vec());\n    self.schnorr.sign_share(\n      params,\n      nonce_sums,\n      nonces,\n      &[\n        &u32::try_from(self.context.len()).expect(\"context exceeded 2^32 bytes\").to_le_bytes(),\n        self.context,\n        msg,\n      ]\n      .concat(),\n    )\n  }\n\n  #[must_use]\n  fn verify(\n    &self,\n    group_key: RistrettoPoint,\n    nonces: &[Vec<RistrettoPoint>],\n    sum: Scalar,\n  ) -> Option<Self::Signature> {\n    let mut sig = (SchnorrSignature::<Ristretto> { R: nonces[0][0], s: sum }).serialize();\n    sig[63] |= 1 << 7;\n    Some(Signature::from_bytes(&sig).unwrap()).filter(|sig| {\n      PublicKey::from_bytes(&group_key.to_bytes())\n        .unwrap()\n        .verify(&mut signing_context(self.context).bytes(self.msg.as_ref().unwrap()), sig)\n        .is_ok()\n    })\n  }\n\n  fn verify_share(\n    &self,\n    verification_share: RistrettoPoint,\n    nonces: &[Vec<RistrettoPoint>],\n    share: Scalar,\n  ) -> Result<Vec<(Scalar, RistrettoPoint)>, ()> {\n    self.schnorr.verify_share(verification_share, nonces, share)\n  }\n}\n"
  },
  {
    "path": "crypto/schnorrkel/src/tests.rs",
    "content": "use rand_core::OsRng;\n\nuse group::GroupEncoding;\nuse frost::{\n  Participant,\n  tests::{key_gen, algorithm_machines, sign},\n};\n\nuse schnorrkel::{keys::PublicKey, context::SigningContext};\n\nuse crate::Schnorrkel;\n\n#[test]\nfn test() {\n  const CONTEXT: &[u8] = b\"FROST Schnorrkel Test\";\n  const MSG: &[u8] = b\"Hello, World!\";\n\n  let keys = key_gen(&mut OsRng);\n  let key = keys[&Participant::new(1).unwrap()].group_key();\n  let algorithm = Schnorrkel::new(CONTEXT);\n  let machines = algorithm_machines(&mut OsRng, &algorithm, &keys);\n  let signature = sign(&mut OsRng, &algorithm, keys, machines, MSG);\n\n  let key = PublicKey::from_bytes(key.to_bytes().as_ref()).unwrap();\n  key.verify(&mut SigningContext::new(CONTEXT).bytes(MSG), &signature).unwrap()\n}\n"
  },
  {
    "path": "crypto/transcript/Cargo.toml",
    "content": "[package]\nname = \"flexible-transcript\"\nversion = \"0.3.4\"\ndescription = \"A simple transcript trait definition, along with viable options\"\nlicense = \"MIT\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/crypto/transcript\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nkeywords = [\"transcript\"]\nedition = \"2021\"\nrust-version = \"1.66\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nstd-shims = { path = \"../../common/std-shims\", version = \"0.1.4\", default-features = false }\n\nzeroize = { version = \"^1.5\", default-features = false }\n\ndigest = { version = \"0.10\", default-features = false, features = [\"core-api\"] }\n\nblake2 = { version = \"0.10\", default-features = false, optional = true }\nmerlin = { version = \"3\", default-features = false, optional = true }\n\n[dev-dependencies]\nsha2 = { version = \"0.10\", default-features = false }\nblake2 = { version = \"0.10\", default-features = false }\n\n[features]\nstd = [\"std-shims/std\", \"zeroize/std\", \"digest/std\", \"blake2?/std\", \"merlin?/std\"]\nrecommended = [\"blake2\"]\ntests = []\ndefault = [\"std\"]\n"
  },
  {
    "path": "crypto/transcript/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2022-2023 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "crypto/transcript/README.md",
    "content": "# Flexible Transcript\n\nFlexible Transcript is a crate offering:\n- `Transcript`, a trait offering functions transcripts should implement.\n- `DigestTranscript`, a competent transcript format instantiated against a\n  provided hash function.\n- `MerlinTranscript`, a wrapper of `merlin` into the trait (available via the\n  `merlin` feature).\n- `RecommendedTranscript`, a transcript recommended for usage in applications.\n  Currently, this is `DigestTranscript<Blake2b512>` (available via the\n  `recommended` feature).\n\nThe trait was created while working on an IETF draft which defined an incredibly\nsimple transcript format. Extensions of the protocol would quickly require a\nmore competent format, yet implementing the one specified was mandatory to meet\nthe specification. Accordingly, the library implementing the draft defined an\n`IetfTranscript`, dropping labels and not allowing successive challenges, yet\nthanks to the trait, allowed protocols building on top to provide their own\ntranscript format as needed.\n\n`DigestTranscript` takes in any hash function implementing `Digest`, offering a\nsecure transcript format around it. All items are prefixed by a flag, denoting\ntheir type, and their length.\n\n`MerlinTranscript` was used to justify the API, and if any issues existed with\n`DigestTranscript`, enable a fallback. It was also meant as a way to be\ncompatible with existing Rust projects using `merlin`.\n\nThis library was\n[audited by Cypher Stack in March 2023](https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf),\nculminating in commit\n[669d2dbffc1dafb82a09d9419ea182667115df06](https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06).\nAny subsequent changes have not undergone auditing.\n\nThis library is usable under no_std.\n"
  },
  {
    "path": "crypto/transcript/src/lib.rs",
    "content": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![doc = include_str!(\"../README.md\")]\n#![no_std]\n\n#[allow(unused_imports)]\nuse std_shims::prelude::*;\n\nuse zeroize::Zeroize;\n\nuse digest::{\n  typenum::{\n    consts::U32, marker_traits::NonZero, type_operators::IsGreaterOrEqual, operator_aliases::GrEq,\n  },\n  core_api::BlockSizeUser,\n  Digest, Output, HashMarker,\n};\n\n#[cfg(feature = \"merlin\")]\nmod merlin;\n#[cfg(feature = \"merlin\")]\npub use crate::merlin::MerlinTranscript;\n\n/// Tests for a transcript.\n#[cfg(any(test, feature = \"tests\"))]\npub mod tests;\n\n/// A transcript trait valid over a variety of transcript formats.\npub trait Transcript: Send + Clone {\n  type Challenge: Send + Sync + Clone + AsRef<[u8]>;\n\n  /// Create a new transcript with the specified name.\n  fn new(name: &'static [u8]) -> Self;\n\n  /// Apply a domain separator to the transcript.\n  fn domain_separate(&mut self, label: &'static [u8]);\n\n  /// Append a message to the transcript.\n  fn append_message<M: AsRef<[u8]>>(&mut self, label: &'static [u8], message: M);\n\n  /// Produce a challenge.\n  ///\n  /// Implementors MUST update the transcript as it does so, preventing the same challenge from\n  /// being generated multiple times.\n  fn challenge(&mut self, label: &'static [u8]) -> Self::Challenge;\n\n  /// Produce a RNG seed.\n  ///\n  /// Helper function for parties needing to generate random data from an agreed upon state.\n  ///\n  /// Implementors MAY internally call the challenge function for the needed bytes, and accordingly\n  /// produce a transcript conflict between two transcripts, one which called challenge(label) and\n  /// one which called rng_seed(label) at the same point.\n  fn rng_seed(&mut self, label: &'static [u8]) -> [u8; 32];\n}\n\n#[derive(Clone, Copy)]\nenum DigestTranscriptMember {\n  Name,\n  Domain,\n  Label,\n  Value,\n  Challenge,\n  Continued,\n  Challenged,\n}\n\nimpl DigestTranscriptMember {\n  fn as_u8(&self) -> u8 {\n    match self {\n      DigestTranscriptMember::Name => 0,\n      DigestTranscriptMember::Domain => 1,\n      DigestTranscriptMember::Label => 2,\n      DigestTranscriptMember::Value => 3,\n      DigestTranscriptMember::Challenge => 4,\n      DigestTranscriptMember::Continued => 5,\n      DigestTranscriptMember::Challenged => 6,\n    }\n  }\n}\n\n/// A trait defining cryptographic Digests with at least a 256-bit output size, assuming at least a\n/// 128-bit level of security accordingly.\npub trait SecureDigest: Digest + HashMarker {}\nimpl<D: Digest + HashMarker> SecureDigest for D\nwhere\n  // This just lets us perform the comparison\n  D::OutputSize: IsGreaterOrEqual<U32>,\n  // Perform the comparison and make sure it's true (not zero), meaning D::OutputSize is >= U32\n  // This should be U32 as it's length in bytes, not bits\n  GrEq<D::OutputSize, U32>: NonZero,\n{\n}\n\n/// A simple transcript format constructed around the specified hash algorithm.\n#[derive(Clone, Debug)]\npub struct DigestTranscript<D: Send + Clone + SecureDigest>(D);\n\nimpl<D: Send + Clone + SecureDigest> DigestTranscript<D> {\n  fn append(&mut self, kind: DigestTranscriptMember, value: &[u8]) {\n    self.0.update([kind.as_u8()]);\n    // Assumes messages don't exceed 16 exabytes\n    self.0.update(u64::try_from(value.len()).unwrap().to_le_bytes());\n    self.0.update(value);\n  }\n}\n\nimpl<D: Send + Clone + SecureDigest> Transcript for DigestTranscript<D> {\n  type Challenge = Output<D>;\n\n  fn new(name: &'static [u8]) -> Self {\n    let mut res = DigestTranscript(D::new());\n    res.append(DigestTranscriptMember::Name, name);\n    res\n  }\n\n  fn domain_separate(&mut self, label: &'static [u8]) {\n    self.append(DigestTranscriptMember::Domain, label);\n  }\n\n  fn append_message<M: AsRef<[u8]>>(&mut self, label: &'static [u8], message: M) {\n    self.append(DigestTranscriptMember::Label, label);\n    self.append(DigestTranscriptMember::Value, message.as_ref());\n  }\n\n  fn challenge(&mut self, label: &'static [u8]) -> Self::Challenge {\n    self.append(DigestTranscriptMember::Challenge, label);\n    let mut cloned = self.0.clone();\n\n    // Explicitly fork these transcripts to prevent length extension attacks from being possible\n    // (at least, without the additional ability to remove a byte from a finalized hash)\n    self.0.update([DigestTranscriptMember::Continued.as_u8()]);\n    cloned.update([DigestTranscriptMember::Challenged.as_u8()]);\n    cloned.finalize()\n  }\n\n  fn rng_seed(&mut self, label: &'static [u8]) -> [u8; 32] {\n    let mut seed = [0; 32];\n    seed.copy_from_slice(&self.challenge(label)[.. 32]);\n    seed\n  }\n}\n\n// Digest doesn't implement Zeroize\n// Implement Zeroize for DigestTranscript by writing twice the block size to the digest in an\n// attempt to overwrite the internal hash state/any leftover bytes\nimpl<D: Send + Clone + SecureDigest> Zeroize for DigestTranscript<D>\nwhere\n  D: BlockSizeUser,\n{\n  fn zeroize(&mut self) {\n    // Update in 4-byte chunks to reduce call quantity and enable word-level update optimizations\n    const WORD_SIZE: usize = 4;\n\n    // block_size returns the block_size in bytes\n    // Use a ceil div in case the block size isn't evenly divisible by our word size\n    let words = D::block_size().div_ceil(WORD_SIZE);\n    for _ in 0 .. (2 * words) {\n      self.0.update([255; WORD_SIZE]);\n    }\n\n    // Hopefully, the hash state is now overwritten to the point no data is recoverable\n    // These writes may be optimized out if they're never read\n    // Attempt to get them marked as read\n\n    fn mark_read<D: Send + Clone + SecureDigest>(transcript: &DigestTranscript<D>) {\n      // Just get a challenge from the state\n      let mut challenge = core::hint::black_box(transcript.0.clone().finalize());\n      challenge.as_mut().zeroize();\n    }\n\n    mark_read(self)\n  }\n}\n\n/// The recommended transcript, guaranteed to be secure against length-extension attacks.\n#[cfg(feature = \"recommended\")]\npub type RecommendedTranscript = DigestTranscript<blake2::Blake2b512>;\n"
  },
  {
    "path": "crypto/transcript/src/merlin.rs",
    "content": "use core::fmt::{Debug, Formatter};\n\nuse crate::Transcript;\n\n/// A wrapper around a Merlin transcript which satisfiees the Transcript API.\n///\n/// Challenges are fixed to 64 bytes, despite Merlin supporting variable length challenges.\n///\n/// This implementation is intended to remain in the spirit of Merlin more than it's intended to be\n/// in the spirit of the provided DigestTranscript. While DigestTranscript uses flags for each of\n/// its different field types, the domain_separate function simply appends a message with a label\n/// of \"dom-sep\", Merlin's preferred domain separation label. Since this could introduce transcript\n/// conflicts between a domain separation and a message with a label of \"dom-sep\", the\n/// append_message function uses an assertion to prevent such labels.\n#[derive(Clone)]\npub struct MerlinTranscript(merlin::Transcript);\n// Merlin doesn't implement Debug so provide a stub which won't panic\nimpl Debug for MerlinTranscript {\n  fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), core::fmt::Error> {\n    fmt.debug_struct(\"MerlinTranscript\").finish_non_exhaustive()\n  }\n}\n\nimpl Transcript for MerlinTranscript {\n  // Uses a challenge length of 64 bytes to support wide reduction on commonly used EC scalars\n  // From a security level standpoint (Merlin targets 128-bits), this should just be 32 bytes\n  // From a Merlin standpoint, this should be variable per call\n  // From a practical standpoint, this should be practical\n  type Challenge = [u8; 64];\n\n  fn new(name: &'static [u8]) -> Self {\n    MerlinTranscript(merlin::Transcript::new(name))\n  }\n\n  fn domain_separate(&mut self, label: &'static [u8]) {\n    self.0.append_message(b\"dom-sep\", label);\n  }\n\n  fn append_message<M: AsRef<[u8]>>(&mut self, label: &'static [u8], message: M) {\n    assert!(\n      label != \"dom-sep\".as_bytes(),\n      \"\\\"dom-sep\\\" is reserved for the domain_separate function\",\n    );\n    self.0.append_message(label, message.as_ref());\n  }\n\n  fn challenge(&mut self, label: &'static [u8]) -> Self::Challenge {\n    let mut challenge = [0; 64];\n    self.0.challenge_bytes(label, &mut challenge);\n    challenge\n  }\n\n  fn rng_seed(&mut self, label: &'static [u8]) -> [u8; 32] {\n    let mut seed = [0; 32];\n    seed.copy_from_slice(&self.challenge(label)[.. 32]);\n    seed\n  }\n}\n"
  },
  {
    "path": "crypto/transcript/src/tests.rs",
    "content": "use crate::Transcript;\n\n/// Test the sanity of a transcript.\n///\n/// This will panic if sanity checks fail.\npub fn test_transcript<T: Transcript<Challenge: PartialEq>>() {\n  // Ensure distinct names cause distinct challenges\n  {\n    let mut t1 = T::new(b\"1\");\n    let mut t2 = T::new(b\"2\");\n    assert!(t1.challenge(b\"c\") != t2.challenge(b\"c\"));\n  }\n\n  // Ensure names can't lead into labels\n  {\n    let mut t1 = T::new(b\"12\");\n    let c1 = t1.challenge(b\"c\");\n    let mut t2 = T::new(b\"1\");\n    let c2 = t2.challenge(b\"2c\");\n    assert!(c1 != c2);\n  }\n\n  let t = || T::new(b\"name\");\n  let c = |mut t: T| t.challenge(b\"c\");\n\n  // Ensure domain separators do something\n  {\n    let mut t1 = t();\n    t1.domain_separate(b\"d\");\n    assert!(c(t1) != c(t()));\n  }\n\n  // Ensure distinct domain separators create distinct challenges\n  {\n    let mut t1 = t();\n    let mut t2 = t();\n    t1.domain_separate(b\"d1\");\n    t2.domain_separate(b\"d2\");\n    assert!(c(t1) != c(t2));\n  }\n\n  // Ensure distinct messages create distinct challenges\n  {\n    // By label\n    {\n      let mut t1 = t();\n      let mut t2 = t();\n      t1.append_message(b\"msg\", b\"a\");\n      t2.append_message(b\"msg\", b\"b\");\n      assert!(c(t1) != c(t2));\n    }\n\n    // By value\n    {\n      let mut t1 = t();\n      let mut t2 = t();\n      t1.append_message(b\"a\", b\"val\");\n      t2.append_message(b\"b\", b\"val\");\n      assert!(c(t1) != c(t2));\n    }\n  }\n\n  // Ensure challenges advance the transcript\n  {\n    let mut t = t();\n    let c1 = t.challenge(b\"c\");\n    let c2 = t.challenge(b\"c\");\n    assert!(c1 != c2);\n  }\n\n  // Ensure distinct challenge labels produce distinct challenges\n  assert!(t().challenge(b\"a\") != t().challenge(b\"b\"));\n\n  // Ensure RNG seed calls advance the transcript\n  {\n    let mut t = t();\n    let s1 = t.rng_seed(b\"s\");\n    let s2 = t.rng_seed(b\"s\");\n    assert!(s1 != s2);\n  }\n\n  // Ensure distinct RNG seed labels produce distinct seeds\n  assert!(t().rng_seed(b\"a\") != t().rng_seed(b\"b\"));\n}\n\n#[test]\nfn test_digest() {\n  test_transcript::<crate::DigestTranscript<sha2::Sha256>>();\n  test_transcript::<crate::DigestTranscript<blake2::Blake2b512>>();\n}\n\n#[cfg(feature = \"recommended\")]\n#[test]\nfn test_recommended() {\n  test_transcript::<crate::RecommendedTranscript>();\n}\n\n#[cfg(feature = \"merlin\")]\n#[test]\nfn test_merlin() {\n  test_transcript::<crate::MerlinTranscript>();\n}\n"
  },
  {
    "path": "deny.toml",
    "content": "[advisories]\nversion = 2\n\ndb-path = \"~/.cargo/advisory-db\"\ndb-urls = [\"https://github.com/rustsec/advisory-db\"]\n\nyanked = \"deny\"\n\nignore = [\n  \"RUSTSEC-2022-0061\", # https://github.com/serai-dex/serai/227\n  \"RUSTSEC-2024-0370\", # proc-macro-error is unmaintained\n  \"RUSTSEC-2024-0436\", # paste is unmaintained\n  \"RUSTSEC-2024-0384\", # instant is unmaintained, fixed on `next`\n  \"RUSTSEC-2025-0057\", # fxhash is unmaintained, fixed with bytecodealliance/wasmtime/pull/11634\n]\n\n[licenses]\nversion = 2\n\nallow = [\n  # Effective public domain\n  \"CC0-1.0\",\n  \"Unlicense\",\n\n  # Attribution required\n  \"MIT\",\n  \"MITNFA\",\n  \"BSD-2-Clause\",\n  \"BSD-3-Clause\",\n  \"ISC\",\n  \"Zlib\",\n  \"Unicode-3.0\",\n  \"CDLA-Permissive-2.0\",\n\n  # Non-invasive copyleft\n  # \"MPL-2.0\", # Commented as it's not currently in-use within the Serai tree\n  \"Apache-2.0\",\n  \"Apache-2.0 WITH LLVM-exception\",\n  \"GPL-3.0-or-later WITH Classpath-exception-2.0\",\n]\n\nexceptions = [\n  { allow = [\"AGPL-3.0-only\"], name = \"serai-env\" },\n\n  { allow = [\"AGPL-3.0-only\"], name = \"ethereum-serai\" },\n  { allow = [\"AGPL-3.0-only\"], name = \"serai-ethereum-relayer\" },\n\n  { allow = [\"AGPL-3.0-only\"], name = \"serai-message-queue\" },\n\n  { allow = [\"AGPL-3.0-only\"], name = \"serai-processor-messages\" },\n  { allow = [\"AGPL-3.0-only\"], name = \"serai-processor\" },\n\n  { allow = [\"AGPL-3.0-only\"], name = \"tributary-chain\" },\n  { allow = [\"AGPL-3.0-only\"], name = \"serai-coordinator\" },\n\n  { allow = [\"AGPL-3.0-only\"], name = \"pallet-session\" },\n\n  { allow = [\"AGPL-3.0-only\"], name = \"serai-coins-pallet\" },\n  { allow = [\"AGPL-3.0-only\"], name = \"serai-dex-pallet\" },\n\n  { allow = [\"AGPL-3.0-only\"], name = \"serai-genesis-liquidity-pallet\" },\n  { allow = [\"AGPL-3.0-only\"], name = \"serai-emissions-pallet\" },\n\n  { allow = [\"AGPL-3.0-only\"], name = \"serai-economic-security-pallet\" },\n\n  { allow = [\"AGPL-3.0-only\"], name = \"serai-in-instructions-pallet\" },\n\n  { allow = [\"AGPL-3.0-only\"], name = \"serai-validator-sets-pallet\" },\n\n  { allow = [\"AGPL-3.0-only\"], name = \"serai-signals-pallet\" },\n\n  { allow = [\"AGPL-3.0-only\"], name = \"serai-runtime\" },\n  { allow = [\"AGPL-3.0-only\"], name = \"serai-node\" },\n\n  { allow = [\"AGPL-3.0-only\"], name = \"serai-orchestrator\" },\n\n  { allow = [\"AGPL-3.0-only\"], name = \"mini-serai\" },\n\n  { allow = [\"AGPL-3.0-only\"], name = \"serai-docker-tests\" },\n  { allow = [\"AGPL-3.0-only\"], name = \"serai-message-queue-tests\" },\n  { allow = [\"AGPL-3.0-only\"], name = \"serai-processor-tests\" },\n  { allow = [\"AGPL-3.0-only\"], name = \"serai-coordinator-tests\" },\n  { allow = [\"AGPL-3.0-only\"], name = \"serai-full-stack-tests\" },\n  { allow = [\"AGPL-3.0-only\"], name = \"serai-reproducible-runtime-tests\" },\n]\n\n[[licenses.clarify]]\nname = \"ring\"\nversion = \"*\"\nexpression = \"MIT AND ISC AND OpenSSL\"\nlicense-files = [\n  { path = \"LICENSE\", hash = 0xbd0eed23 }\n]\n\n[bans]\nmultiple-versions = \"warn\"\nwildcards = \"warn\"\nhighlight = \"all\"\ndeny = [\n  { name = \"serde_derive\", version = \">=1.0.172, <1.0.185\" },\n  { name = \"hashbrown\", version = \"=0.15.0\" },\n  # Legacy which _no one_ should use anymore\n  { name = \"is-terminal\", version = \"*\" },\n  # Stop introduction into the tree without realizing it\n  { name = \"once_cell_polyfill\", version = \"*\" },\n]\n\n[sources]\nunknown-registry = \"deny\"\nunknown-git = \"deny\"\nallow-registry = [\"https://github.com/rust-lang/crates.io-index\"]\nallow-git = [\n  \"https://github.com/rust-lang-nursery/lazy-static.rs\",\n  \"https://github.com/monero-oxide/monero-oxide\",\n  \"https://github.com/serai-dex/patch-polkadot-sdk\",\n]\n"
  },
  {
    "path": "docs/.gitignore",
    "content": "_site/\n.sass-cache/\n.jekyll-cache/\n.jekyll-metadata\n\n.bundle/\nvendor/\n"
  },
  {
    "path": "docs/.ruby-version",
    "content": "3.3.4\n"
  },
  {
    "path": "docs/Gemfile",
    "content": "source 'https://rubygems.org'\n\ngem \"jekyll\", \"~> 4.3.3\"\ngem \"just-the-docs\", \"0.8.2\"\n"
  },
  {
    "path": "docs/_config.yml",
    "content": "title: Serai Documentation\ndescription: Documentation for the Serai protocol.\ntheme: just-the-docs\n\nurl: https://docs.serai.exchange\n\ncallouts:\n  warning:\n    title: Warning\n    color: red\n\n  definition:\n    title: Definition\n    color: blue\n"
  },
  {
    "path": "docs/amm/index.md",
    "content": "---\ntitle: Automatic Market Makers\nlayout: default\nnav_order: 2\n---\n\n# Automatic Market Makers\n\n*text on how AMMs work*\n\nSerai uses a symmetric liquidity pool with the `xy=k` formula.\n\nConcentrated liquidity would presumably offer less slippage on swaps, and there are\n[discussions to evolve to a concentrated liquidity/order book environment](https://github.com/serai-dex/serai/issues/420).\nUnfortunately, it effectively requires active management of provided liquidity.\nThis disenfranchises small liquidity providers who may not have the knowledge\nand resources necessary to perform such management. Since Serai is expected to\nhave a community-bootstrapped start, starting with concentrated liquidity would\naccordingly be contradictory.\n"
  },
  {
    "path": "docs/cross_chain/index.md",
    "content": "---\ntitle: Cross-Chain Architecture\nlayout: default\nnav_order: 3\n---\n\n# Cross-Chain Architecture\n"
  },
  {
    "path": "docs/economics/genesis.md",
    "content": "---\ntitle: Genesis\nlayout: default\nnav_order: 1\nparent: Economics\n---\n"
  },
  {
    "path": "docs/economics/index.md",
    "content": "---\ntitle: Economics\nlayout: default\nnav_order: 4\nhas_children: true\n---\n\n# Economics\n\nSerai's economics change depending on which of three eras is currently\noccurring.\n\n## Genesis Era\n\nThe network starts with the \"Genesis\" era, where the goal of the network is to\nattract the liquidity necessary to facilitate swaps. This period will last for\n30 days and will let anyone add liquidity to the protocol. Only with its\nconclusion will SRI start being distributed.\n\nAfter the Genesis era, the network enters the \"Pre-Economic Security\" era.\n\n## Pre-Economic Security\n\n{: .definition-title }\n> Definition: Economic Security\n>\n> Economic security is derived from it being unprofitable to misbehave.\n> This is by the economic penalty which is presumed to occur upon misbehavior\n> exceeding the value which would presumably be gained.\n> Accordingly, rational actors would behave properly, causing the protocol to\n> maintain its integrity.\n>\n> For Serai specifically, the stake required to produce unintended signatures\n> must exceed the value accessible via producing unintended signatures.\n\nWith liquidity provided, and swaps enabled, the goal is to have validators stake\nsufficiently for economic security to be achieved. This is primarily via\noffering freshly minted, staked SRI to would-be validators who decide to swap\nexternal coins for their stake.\n\n## Post-Economic Security\n\nHaving achieved economic security, the protocol changes its economics one last\ntime (barring future upgrades to the protocol) to a 'normal' state of\noperations.\n"
  },
  {
    "path": "docs/economics/post.md",
    "content": "---\ntitle: Post-Economic Security\nlayout: default\nnav_order: 3\nparent: Economics\n---\n"
  },
  {
    "path": "docs/economics/pre.md",
    "content": "---\ntitle: Pre-Economic Security\nlayout: default\nnav_order: 2\nparent: Economics\n---\n"
  },
  {
    "path": "docs/index.md",
    "content": "---\ntitle: Home\nlayout: home\nnav_order: 1\n---\n\n{: .warning }\nThis documentation site is still under active development and may have missing\nsections, errors, and typos. Even once this documentation site is 'complete', it\nmay become out-of-date (as Serai is an evolving protocol yet to release) or have\nminor errors.\n\n# Serai\n\nSerai is a fairly launched cross-chain decentralized exchange, integrating\nBitcoin (BTC), Ethereum (ETH, DAI), and Monero (XMR).\n\nThe Serai mainnet has yet to launch, and until then, all details are subject to\nchange.\n\nPrior to the Serai mainnet launching, SRI, Serai's native coin, will not\nexist. As a fairly launched project, SRI will have no ICO, no IEO, no presale,\nno developers' tax/fund, and no airdrop for out-of-mainnet activity.\n\nOut-of-mainnet activity includes:\n\n- Being a community member (such as on Discord or on Twitter)\n- Participating in testnets\n- Contributing to the GitHub\n\nNone of these will be awarded any airdrop. All distributions of SRI will happen\non-chain per the protocols' defined rules, based on on-chain activity.\n"
  },
  {
    "path": "docs/infrastructure/coordinator.md",
    "content": "---\ntitle: Coordinator\nlayout: default\nnav_order: 3\nparent: Infrastructure\n---\n\n# Coordinator\n\nThe coordinator is a local service which communicates with other validators'\ncoordinators. It provides a verifiable broadcast layer for various consensus\nmessages, such as agreement on external blockchains, key generation and signing\nprotocols, and the latest Serai block.\n\nThe verifiable broadcast layer is implemented via a blockchain, referred to as a\nTributary, which is agreed upon using Tendermint consensus. This consensus is\nnot as offered by Tendermint Core/CometBFT, as used in the Cosmos SDK\n(historically/presently), yet by our own implementation designed to be used as a\nlibrary and not as another daemon. Tributaries are ephemeral, only used by the\ncurrent validators, and deleted upon the next epoch. All of the results from it\nare verifiable via the external network and the Serai blockchain alone.\n"
  },
  {
    "path": "docs/infrastructure/index.md",
    "content": "---\ntitle: Infrastructure\nlayout: default\nnav_order: 6\nhas_children: true\n---\n"
  },
  {
    "path": "docs/infrastructure/message_queue.md",
    "content": "---\ntitle: Message Queue\nlayout: default\nnav_order: 1\nparent: Infrastructure\n---\n\n# Message Queue\n\nThe Message Queue is a microservice to authenticate and relay messages between\nservices. It offers just three functions:\n\n1) Queue a message.\n\n2) Receive the next message.\n\n3) Acknowledge a message, removing it from the queue.\n\nThis ensures messages are delivered between services, with their order\npreserved. This also ensures that if a service reboots while handling a message,\nit'll still handle the message once rebooted (and the message will not be lost).\n\nThe Message Queue also aims to offer increased liveliness and performance.\nIf services directly communicated, the rate at which one service could operate\nwould always be bottlenecked by the service it communicates with. If the\nreceiving service ever went offline, the sending service wouldn't be able to\ndeliver messages until the receiver came back online, halting its own work. By\ndefining a dedicated microservice, with a lack of complex logic, it's much less\nlikely to go offline or suffer from degraded performance.\n"
  },
  {
    "path": "docs/infrastructure/processor.md",
    "content": "---\ntitle: Processor\nlayout: default\nnav_order: 2\nparent: Infrastructure\n---\n\n# Processor\n\nThe processor performs several important tasks with regards to the external\nnetwork. Each of them are documented in the following sections.\n\n## Key Generation\n\n## Scanning\n\n## Signing Batches\n\n## Planning Transactions\n\n## Cosigning\n"
  },
  {
    "path": "docs/infrastructure/serai.md",
    "content": "---\ntitle: Serai\nlayout: default\nnav_order: 4\nparent: Infrastructure\n---\n"
  },
  {
    "path": "docs/integrating/index.md",
    "content": "---\ntitle: Integrating with Serai\nlayout: default\nnav_order: 7\nhas_children: true\n---\n"
  },
  {
    "path": "docs/protocol_changes/index.md",
    "content": "---\ntitle: Protocol Changes\nlayout: default\nnav_order: 5\n---\n\n# Protocol Changes\n\nThe protocol has no central authority nor organization nor actors (such as\nliquidity providers/validators) who can compel new protocol rules. The Serai\nprotocol is as-written with all granted functionality and declared rules\npresent.\n\nValidators are explicitly granted the ability to signal for two things to occur:\n\n### 1) Halt another validator set.\n\nThis will presumably occur if another validator set turns malicious and is the\nexpected incident response in order to apply an economic penalty of ideally\ngreater value than damage wrecked. Halting a validator set prevents further\npublication of `Batch`s, preventing improper actions on the Serai blockchain,\nand preventing validators from unstaking (as unstaking only occurs once future\nvalidator sets have accepted responsibility, and accepting responsibility\nrequires `Batch` publication). This effectively burns the malicious validators'\nstake.\n\n### 2) Retire the protocol.\n\nA supermajority of validators may favor a signal (an opaque 32-byte ID). A\ncommon signal gaining sufficient favor will cause the protocol to stop producing\nblocks in two weeks.\n\nNodes will presumably, as individual entities, hard fork to new consensus rules.\nThese rules presumably will remove the rule to stop producing blocks in two\nweeks, they may declare new validators, and they may declare new functionality\nentirely.\n\nWhile nodes individually hard fork, across every hard fork the state of the\nvarious `sriXYZ` coins (such as `sriBTC`, `sriETH`, `sriDAI`, and `sriXMR`)\nremains intact (unless the new rules modify such state). These coins can still\nbe burned with instructions (unless the new rules prevent that) and if a\nvalidator set doesn't send `XYZ` as expected, they can be halted (effectively\nburning their `SRI` stake). Accordingly, every node decides if and how to future\nparticipate, with the abilities and powers they declare themselves to have.\n"
  },
  {
    "path": "docs/validator/index.md",
    "content": "---\ntitle: Running a Validator\nlayout: default\nnav_order: 8\nhas_children: true\n---\n"
  },
  {
    "path": "message-queue/Cargo.toml",
    "content": "[package]\nname = \"serai-message-queue\"\nversion = \"0.1.0\"\ndescription = \"A message queue for Serai focused on consistency\"\nlicense = \"AGPL-3.0-only\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/message-queue\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nkeywords = []\nedition = \"2021\"\npublish = false\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\n# Macros\nonce_cell = { version = \"1\", default-features = false }\n\n# Encoders\nhex = { version = \"0.4\", default-features = false, features = [\"std\"] }\nborsh = { version = \"1\", default-features = false, features = [\"std\", \"derive\", \"de_strict_order\"] }\n\n# Libs\nzeroize = { version = \"1\", default-features = false, features = [\"std\"] }\nrand_core = { version = \"0.6\", default-features = false, features = [\"std\"] }\n\n# Cryptography\ntranscript = { package = \"flexible-transcript\", path = \"../crypto/transcript\", default-features = false, features = [\"std\", \"recommended\"] }\ndalek-ff-group = { path = \"../crypto/dalek-ff-group\", default-features = false, features = [\"std\"] }\nciphersuite = { path = \"../crypto/ciphersuite\", default-features = false, features = [\"std\"] }\nschnorr-signatures = { path = \"../crypto/schnorr\", default-features = false, features = [\"std\"] }\n\n# Application\nlog = { version = \"0.4\", default-features = false, features = [\"std\"] }\nenv_logger = { version = \"0.10\", default-features = false, features = [\"humantime\"] }\n\n# Uses a single threaded runtime since this shouldn't ever be CPU-bound\ntokio = { version = \"1\", default-features = false, features = [\"rt\", \"time\", \"io-util\", \"net\", \"macros\"] }\n\nzalloc = { path = \"../common/zalloc\" }\nserai-db = { path = \"../common/db\", optional = true }\n\nserai-env = { path = \"../common/env\" }\n\nserai-primitives = { path = \"../substrate/primitives\", features = [\"borsh\"] }\n\n[features]\nparity-db = [\"serai-db/parity-db\"]\nrocksdb = [\"serai-db/rocksdb\"]\n"
  },
  {
    "path": "message-queue/LICENSE",
    "content": "AGPL-3.0-only license\n\nCopyright (c) 2023 Luke Parker\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License Version 3 as\npublished by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n"
  },
  {
    "path": "message-queue/README.md",
    "content": "# Message Log\n\nA message log for various services to communicate over.\n\nEach message is checked to be of the claimed origin. Then, it's added to the\nrecipient's message queue. This queue is sequentially handled, FIFO, only\ndropping messages once the recipient acknowledges it's been handled.\n\nA client which publishes an event specifies its own ID for the publication. If\nmultiple publications with the same ID occur, they are assumed repeats and\ndropped.\n\nThis library always panics as its error-cases should be unreachable, given its\nintranet status.\n"
  },
  {
    "path": "message-queue/src/client.rs",
    "content": "use core::ops::Deref;\n\nuse zeroize::{Zeroize, Zeroizing};\nuse rand_core::OsRng;\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::{\n  group::ff::{Field, PrimeField},\n  Ciphersuite,\n};\nuse schnorr_signatures::SchnorrSignature;\n\nuse tokio::{\n  io::{AsyncReadExt, AsyncWriteExt},\n  net::TcpStream,\n};\n\nuse serai_env as env;\n\n#[rustfmt::skip]\nuse crate::{Service, Metadata, QueuedMessage, MessageQueueRequest, message_challenge, ack_challenge};\n\npub struct MessageQueue {\n  pub service: Service,\n  priv_key: Zeroizing<<Ristretto as Ciphersuite>::F>,\n  pub_key: <Ristretto as Ciphersuite>::G,\n  url: String,\n}\n\nimpl MessageQueue {\n  pub fn new(\n    service: Service,\n    mut url: String,\n    priv_key: Zeroizing<<Ristretto as Ciphersuite>::F>,\n  ) -> MessageQueue {\n    // Allow MESSAGE_QUEUE_RPC to either be a full URL or just a hostname\n    // While we could stitch together multiple variables, our control over this service makes this\n    // fine\n    if !url.contains(':') {\n      url += \":2287\";\n    }\n\n    MessageQueue { service, pub_key: Ristretto::generator() * priv_key.deref(), priv_key, url }\n  }\n\n  pub fn from_env(service: Service) -> MessageQueue {\n    let url = env::var(\"MESSAGE_QUEUE_RPC\").expect(\"message-queue RPC wasn't specified\");\n\n    let priv_key: Zeroizing<<Ristretto as Ciphersuite>::F> = {\n      let key_str =\n        Zeroizing::new(env::var(\"MESSAGE_QUEUE_KEY\").expect(\"message-queue key wasn't specified\"));\n      let key_bytes = Zeroizing::new(\n        hex::decode(&key_str).expect(\"invalid message-queue key specified (wasn't hex)\"),\n      );\n      let mut bytes = <<Ristretto as Ciphersuite>::F as PrimeField>::Repr::default();\n      bytes.copy_from_slice(&key_bytes);\n      let key = Zeroizing::new(\n        Option::from(<<Ristretto as Ciphersuite>::F as PrimeField>::from_repr(bytes))\n          .expect(\"invalid message-queue key specified\"),\n      );\n      bytes.zeroize();\n      key\n    };\n\n    Self::new(service, url, priv_key)\n  }\n\n  #[must_use]\n  async fn send(socket: &mut TcpStream, msg: MessageQueueRequest) -> bool {\n    let msg = borsh::to_vec(&msg).unwrap();\n    let Ok(()) = socket.write_all(&u32::try_from(msg.len()).unwrap().to_le_bytes()).await else {\n      log::warn!(\"couldn't send the message len\");\n      return false;\n    };\n    let Ok(()) = socket.write_all(&msg).await else {\n      log::warn!(\"couldn't write the message\");\n      return false;\n    };\n    true\n  }\n\n  pub async fn queue(&self, metadata: Metadata, msg: Vec<u8>) {\n    // TODO: Should this use OsRng? Deterministic or deterministic + random may be better.\n    let nonce = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng));\n    let nonce_pub = Ristretto::generator() * nonce.deref();\n    let sig = SchnorrSignature::<Ristretto>::sign(\n      &self.priv_key,\n      nonce,\n      message_challenge(\n        metadata.from,\n        self.pub_key,\n        metadata.to,\n        &metadata.intent,\n        &msg,\n        nonce_pub,\n      ),\n    )\n    .serialize();\n\n    let msg = MessageQueueRequest::Queue { meta: metadata, msg, sig };\n    let mut first = true;\n    loop {\n      // Sleep, so we don't hammer re-attempts\n      if !first {\n        tokio::time::sleep(core::time::Duration::from_secs(5)).await;\n      }\n      first = false;\n\n      let Ok(mut socket) = TcpStream::connect(&self.url).await else { continue };\n      if !Self::send(&mut socket, msg.clone()).await {\n        continue;\n      }\n      if socket.read_u8().await.ok() != Some(1) {\n        continue;\n      }\n      break;\n    }\n  }\n\n  pub async fn next(&self, from: Service) -> QueuedMessage {\n    let msg = MessageQueueRequest::Next { from, to: self.service };\n    let mut first = true;\n    'outer: loop {\n      if !first {\n        tokio::time::sleep(core::time::Duration::from_secs(5)).await;\n      }\n      first = false;\n\n      log::trace!(\"opening socket to message-queue for next\");\n      let mut socket = match TcpStream::connect(&self.url).await {\n        Ok(socket) => socket,\n        Err(e) => {\n          log::warn!(\"couldn't connect to message-queue server: {e:?}\");\n          continue;\n        }\n      };\n      log::trace!(\"opened socket for next\");\n\n      loop {\n        if !Self::send(&mut socket, msg.clone()).await {\n          continue 'outer;\n        }\n        let status = match socket.read_u8().await {\n          Ok(status) => status,\n          Err(e) => {\n            log::warn!(\"couldn't read status u8: {e:?}\");\n            continue 'outer;\n          }\n        };\n        // If there wasn't a message, check again in 1s\n        // TODO: Use a notification system here\n        if status == 0 {\n          tokio::time::sleep(core::time::Duration::from_secs(1)).await;\n          continue;\n        }\n        assert_eq!(status, 1);\n        break;\n      }\n\n      // Timeout after 5 seconds in case there's an issue with the length handling\n      let Ok(msg) = tokio::time::timeout(core::time::Duration::from_secs(5), async {\n        // Read the message length\n        let len = match socket.read_u32_le().await {\n          Ok(len) => len,\n          Err(e) => {\n            log::warn!(\"couldn't read len: {e:?}\");\n            return vec![];\n          }\n        };\n        let mut buf = vec![0; usize::try_from(len).unwrap()];\n        // Read the message\n        let Ok(_) = socket.read_exact(&mut buf).await else {\n          log::warn!(\"couldn't read the message\");\n          return vec![];\n        };\n        buf\n      })\n      .await\n      else {\n        continue;\n      };\n      if msg.is_empty() {\n        continue;\n      }\n\n      let msg: QueuedMessage = borsh::from_slice(msg.as_slice()).unwrap();\n\n      // Verify the message\n      // Verify the sender is sane\n      if matches!(self.service, Service::Processor(_)) {\n        assert_eq!(\n          msg.from,\n          Service::Coordinator,\n          \"non-coordinator sent us (a processor) a message\"\n        );\n      } else {\n        assert!(\n          matches!(msg.from, Service::Processor(_)),\n          \"non-processor sent us (coordinator) a message\"\n        );\n      }\n      // TODO: Verify the sender's signature\n\n      return msg;\n    }\n  }\n\n  pub async fn ack(&self, from: Service, id: u64) {\n    // TODO: Should this use OsRng? Deterministic or deterministic + random may be better.\n    let nonce = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng));\n    let nonce_pub = Ristretto::generator() * nonce.deref();\n    let sig = SchnorrSignature::<Ristretto>::sign(\n      &self.priv_key,\n      nonce,\n      ack_challenge(self.service, self.pub_key, from, id, nonce_pub),\n    )\n    .serialize();\n\n    let msg = MessageQueueRequest::Ack { from, to: self.service, id, sig };\n    let mut first = true;\n    loop {\n      if !first {\n        tokio::time::sleep(core::time::Duration::from_secs(5)).await;\n      }\n      first = false;\n\n      let Ok(mut socket) = TcpStream::connect(&self.url).await else { continue };\n      if !Self::send(&mut socket, msg.clone()).await {\n        continue;\n      }\n      if socket.read_u8().await.ok() != Some(1) {\n        continue;\n      }\n      break;\n    }\n  }\n}\n"
  },
  {
    "path": "message-queue/src/lib.rs",
    "content": "mod messages;\npub use messages::*;\n\npub mod client;\n"
  },
  {
    "path": "message-queue/src/main.rs",
    "content": "pub(crate) use std::{\n  sync::{Arc, RwLock},\n  collections::HashMap,\n};\n\nuse dalek_ff_group::Ristretto;\npub(crate) use ciphersuite::{group::GroupEncoding, Ciphersuite};\npub(crate) use schnorr_signatures::SchnorrSignature;\n\npub(crate) use serai_primitives::ExternalNetworkId;\n\npub(crate) use tokio::{\n  io::{AsyncReadExt, AsyncWriteExt},\n  net::TcpListener,\n};\n\nuse serai_db::{Get, DbTxn, Db as DbTrait};\n\npub(crate) use crate::messages::*;\n\npub(crate) use crate::queue::Queue;\n\n#[cfg(all(feature = \"parity-db\", not(feature = \"rocksdb\")))]\npub(crate) type Db = Arc<serai_db::ParityDb>;\n#[cfg(feature = \"rocksdb\")]\npub(crate) type Db = serai_db::RocksDB;\n\n#[allow(clippy::type_complexity)]\nmod clippy {\n  use super::*;\n  use once_cell::sync::Lazy;\n  pub(crate) static KEYS: Lazy<Arc<RwLock<HashMap<Service, <Ristretto as Ciphersuite>::G>>>> =\n    Lazy::new(|| Arc::new(RwLock::new(HashMap::new())));\n  pub(crate) static QUEUES: Lazy<Arc<RwLock<HashMap<(Service, Service), RwLock<Queue<Db>>>>>> =\n    Lazy::new(|| Arc::new(RwLock::new(HashMap::new())));\n}\npub(crate) use self::clippy::*;\n\nmod messages;\nmod queue;\n\n#[global_allocator]\nstatic ALLOCATOR: zalloc::ZeroizingAlloc<std::alloc::System> =\n  zalloc::ZeroizingAlloc(std::alloc::System);\n\n// queue RPC method\n/*\n  Queues a message to be delivered from a processor to a coordinator, or vice versa.\n\n  Messages are authenticated to be coming from the claimed service. Recipient services SHOULD\n  independently verify signatures.\n\n  The metadata specifies an intent. Only one message, for a specified intent, will be delivered.\n  This allows services to safely send messages multiple times without them being delivered\n  multiple times.\n\n  The message will be ordered by this service, with the order having no guarantees other than\n  successful ordering by the time this call returns.\n*/\npub(crate) fn queue_message(\n  db: &mut Db,\n  meta: &Metadata,\n  msg: Vec<u8>,\n  sig: SchnorrSignature<Ristretto>,\n) {\n  {\n    let from = KEYS.read().unwrap()[&meta.from];\n    assert!(\n      sig.verify(from, message_challenge(meta.from, from, meta.to, &meta.intent, &msg, sig.R))\n    );\n  }\n\n  // Assert one, and only one of these, is the coordinator\n  assert!(matches!(meta.from, Service::Coordinator) ^ matches!(meta.to, Service::Coordinator));\n\n  // Verify (from, to, intent) hasn't been prior seen\n  fn key(domain: &'static [u8], key: impl AsRef<[u8]>) -> Vec<u8> {\n    [&[u8::try_from(domain.len()).unwrap()], domain, key.as_ref()].concat()\n  }\n  fn intent_key(from: Service, to: Service, intent: &[u8]) -> Vec<u8> {\n    key(b\"intent_seen\", borsh::to_vec(&(from, to, intent)).unwrap())\n  }\n  let mut txn = db.txn();\n  let intent_key = intent_key(meta.from, meta.to, &meta.intent);\n  if Get::get(&txn, &intent_key).is_some() {\n    log::warn!(\n      \"Prior queued message attempted to be queued again. From: {:?} To: {:?} Intent: {}\",\n      meta.from,\n      meta.to,\n      hex::encode(&meta.intent)\n    );\n    return;\n  }\n  DbTxn::put(&mut txn, intent_key, []);\n\n  // Queue it\n  let id = QUEUES.read().unwrap()[&(meta.from, meta.to)].write().unwrap().queue_message(\n    &mut txn,\n    QueuedMessage {\n      from: meta.from,\n      // Temporary value which queue_message will override\n      id: u64::MAX,\n      msg,\n      sig: sig.serialize(),\n    },\n  );\n\n  log::info!(\"Queued message. From: {:?} To: {:?} ID: {id}\", meta.from, meta.to);\n  DbTxn::commit(txn);\n}\n\n// next RPC method\n/*\n  Gets the next message in queue for the named services.\n\n  This is not authenticated due to the fact every nonce would have to be saved to prevent\n  replays, or a challenge-response protocol implemented. Neither are worth doing when there\n  should be no sensitive data on this server.\n*/\npub(crate) fn get_next_message(from: Service, to: Service) -> Option<QueuedMessage> {\n  let queue_outer = QUEUES.read().unwrap();\n  let queue = queue_outer[&(from, to)].read().unwrap();\n  let next = queue.last_acknowledged().map_or(0, |i| i + 1);\n  queue.get_message(next)\n}\n\n// ack RPC method\n/*\n  Acknowledges a message as received and handled, meaning it'll no longer be returned as the next\n  message.\n*/\npub(crate) fn ack_message(from: Service, to: Service, id: u64, sig: SchnorrSignature<Ristretto>) {\n  {\n    let to_key = KEYS.read().unwrap()[&to];\n    assert!(sig.verify(to_key, ack_challenge(to, to_key, from, id, sig.R)));\n  }\n\n  // Is it:\n  // The acknowledged message should be > last acknowledged OR\n  // The acknowledged message should be >=\n  // It's the first if we save messages as acknowledged before acknowledging them\n  // It's the second if we acknowledge messages before saving them as acknowledged\n  // TODO: Check only a proper message is being acked\n\n  log::info!(\"Acknowledging From: {:?} To: {:?} ID: {}\", from, to, id);\n\n  QUEUES.read().unwrap()[&(from, to)].write().unwrap().ack_message(id)\n}\n\n#[tokio::main(flavor = \"current_thread\")]\nasync fn main() {\n  // Override the panic handler with one which will panic if any tokio task panics\n  {\n    let existing = std::panic::take_hook();\n    std::panic::set_hook(Box::new(move |panic| {\n      existing(panic);\n      const MSG: &str = \"exiting the process due to a task panicking\";\n      println!(\"{MSG}\");\n      log::error!(\"{MSG}\");\n      std::process::exit(1);\n    }));\n  }\n\n  if std::env::var(\"RUST_LOG\").is_err() {\n    std::env::set_var(\"RUST_LOG\", serai_env::var(\"RUST_LOG\").unwrap_or_else(|| \"info\".to_string()));\n  }\n  env_logger::init();\n\n  log::info!(\"Starting message-queue service...\");\n\n  // Open the DB\n  #[allow(unused_variables, unreachable_code)]\n  let db = {\n    #[cfg(all(feature = \"parity-db\", feature = \"rocksdb\"))]\n    panic!(\"built with parity-db and rocksdb\");\n    #[cfg(all(feature = \"parity-db\", not(feature = \"rocksdb\")))]\n    let db =\n      serai_db::new_parity_db(&serai_env::var(\"DB_PATH\").expect(\"path to DB wasn't specified\"));\n    #[cfg(feature = \"rocksdb\")]\n    let db =\n      serai_db::new_rocksdb(&serai_env::var(\"DB_PATH\").expect(\"path to DB wasn't specified\"));\n    db\n  };\n\n  let read_key = |str| {\n    let key = serai_env::var(str)?;\n\n    let mut repr = <<Ristretto as Ciphersuite>::G as GroupEncoding>::Repr::default();\n    repr.as_mut().copy_from_slice(&hex::decode(key).unwrap());\n    Some(<Ristretto as Ciphersuite>::G::from_bytes(&repr).unwrap())\n  };\n\n  let register_service = |service, key| {\n    KEYS.write().unwrap().insert(service, key);\n    let mut queues = QUEUES.write().unwrap();\n    if service == Service::Coordinator {\n      for network in serai_primitives::EXTERNAL_NETWORKS {\n        queues.insert(\n          (service, Service::Processor(network)),\n          RwLock::new(Queue(db.clone(), service, Service::Processor(network))),\n        );\n      }\n    } else {\n      queues.insert(\n        (service, Service::Coordinator),\n        RwLock::new(Queue(db.clone(), service, Service::Coordinator)),\n      );\n    }\n  };\n\n  // Make queues for each ExternalNetworkId\n  for network in serai_primitives::EXTERNAL_NETWORKS {\n    // Use a match so we error if the list of NetworkIds changes\n    let Some(key) = read_key(match network {\n      ExternalNetworkId::Bitcoin => \"BITCOIN_KEY\",\n      ExternalNetworkId::Ethereum => \"ETHEREUM_KEY\",\n      ExternalNetworkId::Monero => \"MONERO_KEY\",\n    }) else {\n      continue;\n    };\n\n    register_service(Service::Processor(network), key);\n  }\n\n  // And the coordinator's\n  register_service(Service::Coordinator, read_key(\"COORDINATOR_KEY\").unwrap());\n\n  // Start server\n  // 5132 ^ ((b'M' << 8) | b'Q')\n  let server = TcpListener::bind(\"0.0.0.0:2287\").await.unwrap();\n\n  loop {\n    let (mut socket, _) = server.accept().await.unwrap();\n    // TODO: Add a magic value with a key at the start of the connection to make this authed\n    let mut db = db.clone();\n    tokio::spawn(async move {\n      while let Ok(msg_len) = socket.read_u32_le().await {\n        let mut buf = vec![0; usize::try_from(msg_len).unwrap()];\n        let Ok(_) = socket.read_exact(&mut buf).await else { break };\n        let msg = borsh::from_slice(&buf).unwrap();\n\n        match msg {\n          MessageQueueRequest::Queue { meta, msg, sig } => {\n            queue_message(\n              &mut db,\n              &meta,\n              msg,\n              SchnorrSignature::<Ristretto>::read(&mut sig.as_slice()).unwrap(),\n            );\n            let Ok(()) = socket.write_all(&[1]).await else { break };\n          }\n          MessageQueueRequest::Next { from, to } => match get_next_message(from, to) {\n            Some(msg) => {\n              let Ok(()) = socket.write_all(&[1]).await else { break };\n              let msg = borsh::to_vec(&msg).unwrap();\n              let len = u32::try_from(msg.len()).unwrap();\n              let Ok(()) = socket.write_all(&len.to_le_bytes()).await else { break };\n              let Ok(()) = socket.write_all(&msg).await else { break };\n            }\n            None => {\n              let Ok(()) = socket.write_all(&[0]).await else { break };\n            }\n          },\n          MessageQueueRequest::Ack { from, to, id, sig } => {\n            ack_message(\n              from,\n              to,\n              id,\n              SchnorrSignature::<Ristretto>::read(&mut sig.as_slice()).unwrap(),\n            );\n            let Ok(()) = socket.write_all(&[1]).await else { break };\n          }\n        }\n      }\n    });\n  }\n}\n"
  },
  {
    "path": "message-queue/src/messages.rs",
    "content": "use transcript::{Transcript, RecommendedTranscript};\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::{group::GroupEncoding, Ciphersuite};\n\nuse borsh::{BorshSerialize, BorshDeserialize};\n\nuse serai_primitives::ExternalNetworkId;\n\n#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, BorshSerialize, BorshDeserialize)]\npub enum Service {\n  Processor(ExternalNetworkId),\n  Coordinator,\n}\n\n#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]\npub struct QueuedMessage {\n  pub from: Service,\n  pub id: u64,\n  pub msg: Vec<u8>,\n  pub sig: Vec<u8>,\n}\n\n#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]\npub struct Metadata {\n  pub from: Service,\n  pub to: Service,\n  pub intent: Vec<u8>,\n}\n\n#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]\npub enum MessageQueueRequest {\n  Queue { meta: Metadata, msg: Vec<u8>, sig: Vec<u8> },\n  Next { from: Service, to: Service },\n  Ack { from: Service, to: Service, id: u64, sig: Vec<u8> },\n}\n\npub fn message_challenge(\n  from: Service,\n  from_key: <Ristretto as Ciphersuite>::G,\n  to: Service,\n  intent: &[u8],\n  msg: &[u8],\n  nonce: <Ristretto as Ciphersuite>::G,\n) -> <Ristretto as Ciphersuite>::F {\n  let mut transcript = RecommendedTranscript::new(b\"Serai Message Queue v0.1 Message\");\n  transcript.domain_separate(b\"metadata\");\n  transcript.append_message(b\"from\", borsh::to_vec(&from).unwrap());\n  transcript.append_message(b\"from_key\", from_key.to_bytes());\n  transcript.append_message(b\"to\", borsh::to_vec(&to).unwrap());\n  transcript.append_message(b\"intent\", intent);\n  transcript.domain_separate(b\"message\");\n  transcript.append_message(b\"msg\", msg);\n  transcript.domain_separate(b\"signature\");\n  transcript.append_message(b\"nonce\", nonce.to_bytes());\n  <Ristretto as Ciphersuite>::hash_to_F(b\"message_challenge\", &transcript.challenge(b\"challenge\"))\n}\n\npub fn ack_challenge(\n  to: Service,\n  to_key: <Ristretto as Ciphersuite>::G,\n  from: Service,\n  id: u64,\n  nonce: <Ristretto as Ciphersuite>::G,\n) -> <Ristretto as Ciphersuite>::F {\n  let mut transcript = RecommendedTranscript::new(b\"Serai Message Queue v0.1 Acknowledgement\");\n  transcript.domain_separate(b\"metadata\");\n  transcript.append_message(b\"to\", borsh::to_vec(&to).unwrap());\n  transcript.append_message(b\"to_key\", to_key.to_bytes());\n  transcript.append_message(b\"from\", borsh::to_vec(&from).unwrap());\n  transcript.domain_separate(b\"message\");\n  transcript.append_message(b\"id\", id.to_le_bytes());\n  transcript.domain_separate(b\"signature\");\n  transcript.append_message(b\"nonce\", nonce.to_bytes());\n  <Ristretto as Ciphersuite>::hash_to_F(b\"ack_challenge\", &transcript.challenge(b\"challenge\"))\n}\n"
  },
  {
    "path": "message-queue/src/queue.rs",
    "content": "use serai_db::{DbTxn, Db};\n\nuse crate::messages::*;\n\n#[derive(Clone, Debug)]\npub(crate) struct Queue<D: Db>(pub(crate) D, pub(crate) Service, pub(crate) Service);\nimpl<D: Db> Queue<D> {\n  fn key(domain: &'static [u8], key: impl AsRef<[u8]>) -> Vec<u8> {\n    [&[u8::try_from(domain.len()).unwrap()], domain, key.as_ref()].concat()\n  }\n\n  fn message_count_key(&self) -> Vec<u8> {\n    Self::key(b\"message_count\", borsh::to_vec(&(self.1, self.2)).unwrap())\n  }\n  pub(crate) fn message_count(&self) -> u64 {\n    self\n      .0\n      .get(self.message_count_key())\n      .map_or(0, |bytes| u64::from_le_bytes(bytes.try_into().unwrap()))\n  }\n\n  fn last_acknowledged_key(&self) -> Vec<u8> {\n    Self::key(b\"last_acknowledged\", borsh::to_vec(&(self.1, self.2)).unwrap())\n  }\n  pub(crate) fn last_acknowledged(&self) -> Option<u64> {\n    self\n      .0\n      .get(self.last_acknowledged_key())\n      .map(|bytes| u64::from_le_bytes(bytes.try_into().unwrap()))\n  }\n\n  fn message_key(&self, id: u64) -> Vec<u8> {\n    Self::key(b\"message\", borsh::to_vec(&(self.1, self.2, id)).unwrap())\n  }\n  // TODO: This is fine as-used, yet gets from the DB while having a txn. It should get from the\n  // txn\n  pub(crate) fn queue_message(\n    &mut self,\n    txn: &mut D::Transaction<'_>,\n    mut msg: QueuedMessage,\n  ) -> u64 {\n    let id = self.message_count();\n    msg.id = id;\n    let msg_key = self.message_key(id);\n    let msg_count_key = self.message_count_key();\n\n    txn.put(msg_key, borsh::to_vec(&msg).unwrap());\n    txn.put(msg_count_key, (id + 1).to_le_bytes());\n\n    id\n  }\n\n  pub(crate) fn get_message(&self, id: u64) -> Option<QueuedMessage> {\n    let msg: Option<QueuedMessage> =\n      self.0.get(self.message_key(id)).map(|bytes| borsh::from_slice(&bytes).unwrap());\n    if let Some(msg) = msg.as_ref() {\n      assert_eq!(msg.id, id, \"message stored at {id} has ID {}\", msg.id);\n    }\n    msg\n  }\n\n  pub(crate) fn ack_message(&mut self, id: u64) {\n    let ack_key = self.last_acknowledged_key();\n    let mut txn = self.0.txn();\n    txn.put(ack_key, id.to_le_bytes());\n    txn.commit();\n  }\n}\n"
  },
  {
    "path": "mini/Cargo.toml",
    "content": "[package]\nname = \"mini-serai\"\nversion = \"0.1.0\"\ndescription = \"A miniature version of Serai used to test for race conditions\"\nlicense = \"AGPL-3.0-only\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/mini\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nkeywords = []\nedition = \"2021\"\npublish = false\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nloom = \"0.7\"\n"
  },
  {
    "path": "mini/LICENSE",
    "content": "AGPL-3.0-only license\n\nCopyright (c) 2023 Luke Parker\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License Version 3 as\npublished by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n"
  },
  {
    "path": "mini/README.md",
    "content": "# Mini Serai\n\nA miniature version of the Serai stack, intended to demonstrate a lack of\nsystem-wide race conditions in the officially stated flows.\n\n### Why\n\nWhen working on multiple multisigs, a race condition was noted. Originally, the\ndocumentation stated that the activation block of the new multisig would be the\nblock after the next `Batch`'s block. This introduced a race condition, where\nsince multiple `Batch`s can be signed at the same time, multiple `Batch`s can\nexist in the mempool at the same time. This could cause `Batch`s [1, 2] to\nexist in the mempool, 1 to be published (causing 2 to be the activation block of\nthe new multisig), yet then the already signed 2 to be published (despite\nno longer being accurate as it only had events for a subset of keys).\n\nThis effort initially modeled and tested this single race condition, yet aims to\ngrow to the entire system. Then we just have to prove the actual Serai stack's\nflow reduces to the miniature flow modeled here. While further efforts are\nneeded to prove Serai's implementation of the flow is itself free of race\nconditions, this is a layer of defense over the theory.\n\n### How\n\n[loom](https://docs.rs/loom) is a library which will execute a block of code\nwith every possible combination of orders in order to test results aren't\ninvalidated by order of execution.\n"
  },
  {
    "path": "mini/src/lib.rs",
    "content": "use std::sync::{Arc as StdArc, RwLock as StdRwLock};\n\nuse loom::{\n  thread::{self, JoinHandle},\n  sync::{Arc, RwLock, mpsc},\n};\n\n#[cfg(test)]\nmod tests;\n\n#[derive(Clone, PartialEq, Eq, Hash, Debug)]\npub struct Batch {\n  block: u64,\n  keys: Vec<u64>,\n}\n\n#[derive(Clone, PartialEq, Eq, Hash, Debug)]\npub enum Event {\n  IncludedBatch(Batch),\n  // Allows if let else on this without clippy believing it's redundant\n  __Ignore,\n}\n\n// The amount of blocks to scan after we publish a batch, before confirming the batch was\n// included.\n// Prevents race conditions on rotation regarding when the new keys activate.\nconst BATCH_FTL: u64 = 3;\n\n#[derive(Debug)]\npub struct Serai {\n  handle: JoinHandle<()>,\n  remaining_ticks: Arc<RwLock<usize>>,\n  // Activation block, ID\n  pub active_keys: Arc<RwLock<Vec<(u64, u64)>>>,\n  pub mempool_batches: Arc<RwLock<Vec<Batch>>>,\n  pub events: mpsc::Receiver<Event>,\n  all_events_unsafe: StdArc<StdRwLock<Vec<Event>>>,\n}\n\nimpl Serai {\n  #[allow(clippy::new_without_default)]\n  pub fn new(ticks: usize, mut queued_key: bool) -> Serai {\n    let remaining_ticks = Arc::new(RwLock::new(ticks));\n\n    let active_keys = Arc::new(RwLock::new(vec![(0, 0)]));\n    let mempool_batches = Arc::new(RwLock::new(vec![]));\n    let (events_sender, events_receiver) = mpsc::channel();\n    let all_events_unsafe = StdArc::new(StdRwLock::new(vec![]));\n\n    let handle = thread::spawn({\n      let remaining_ticks = remaining_ticks.clone();\n\n      let active_keys = active_keys.clone();\n      let mempool_batches = mempool_batches.clone();\n      let all_events_unsafe = all_events_unsafe.clone();\n\n      move || {\n        while {\n          let mut remaining_ticks = remaining_ticks.write().unwrap();\n          let ticking = *remaining_ticks != 0;\n          *remaining_ticks = remaining_ticks.saturating_sub(1);\n          ticking\n        } {\n          let mut batches = mempool_batches.write().unwrap();\n          if !batches.is_empty() {\n            let batch: Batch = batches.remove(0);\n\n            // Activate keys after the FTL\n            if queued_key {\n              let mut active_keys = active_keys.write().unwrap();\n              let len = active_keys.len().try_into().unwrap();\n              // TODO: active_keys is under Serai, yet the processor is the one actually with the\n              // context on when it activates\n              // This should be re-modeled as an event\n              active_keys.push((batch.block + BATCH_FTL, len));\n            }\n            queued_key = false;\n\n            let event = Event::IncludedBatch(batch);\n            events_sender.send(event.clone()).unwrap();\n            all_events_unsafe.write().unwrap().push(event);\n          }\n        }\n      }\n    });\n\n    Serai {\n      handle,\n      remaining_ticks,\n      mempool_batches,\n      active_keys,\n      events: events_receiver,\n      all_events_unsafe,\n    }\n  }\n\n  pub fn exhausted(&self) -> bool {\n    *self.remaining_ticks.read().unwrap() == 0\n  }\n\n  pub fn join(self) -> Vec<Event> {\n    self.handle.join().unwrap();\n\n    self.all_events_unsafe.read().unwrap().clone()\n  }\n}\n\n#[derive(Debug)]\npub struct Processor {\n  handle: JoinHandle<Serai>,\n}\n\nimpl Processor {\n  pub fn new(serai: Serai, blocks: u64) -> Processor {\n    let handle = thread::spawn(move || {\n      let mut last_finalized_block = 0;\n      for b in 0 .. blocks {\n        // If this block is too far ahead of Serai's last block, wait for Serai to process\n        // Note this wait only has to occur if we have a Batch which has yet to be included\n        // mini just publishes a Batch for every Block at this point in time, meaning it always has\n        // to wait\n        while b >= (last_finalized_block + BATCH_FTL) {\n          if serai.exhausted() {\n            return serai;\n          }\n          let Ok(event) = serai.events.recv() else { return serai };\n          if let Event::IncludedBatch(Batch { block, .. }) = event {\n            last_finalized_block = block;\n          }\n        }\n        serai.mempool_batches.write().unwrap().push(Batch {\n          block: b,\n          keys: serai\n            .active_keys\n            .read()\n            .unwrap()\n            .iter()\n            .filter_map(|(activation_block, id)| Some(*id).filter(|_| b >= *activation_block))\n            .collect(),\n        });\n      }\n      serai\n    });\n    Processor { handle }\n  }\n\n  pub fn join(self) -> Serai {\n    self.handle.join().unwrap()\n  }\n}\n"
  },
  {
    "path": "mini/src/tests/activation_race/mod.rs",
    "content": "use std::{\n  collections::HashSet,\n  sync::{Arc as StdArc, RwLock as StdRwLock},\n};\n\nuse crate::*;\n\n#[test]\nfn activation_race() {\n  #[derive(Debug)]\n  struct EagerProcessor {\n    handle: JoinHandle<Serai>,\n  }\n\n  impl EagerProcessor {\n    fn new(serai: Serai, batches: u64) -> EagerProcessor {\n      let handle = thread::spawn(move || {\n        for b in 0 .. batches {\n          serai.mempool_batches.write().unwrap().push(Batch {\n            block: b,\n            keys: serai\n              .active_keys\n              .read()\n              .unwrap()\n              .iter()\n              .filter_map(|(activation_block, id)| Some(*id).filter(|_| b >= *activation_block))\n              .collect(),\n          });\n        }\n        serai\n      });\n      EagerProcessor { handle }\n    }\n\n    fn join(self) -> Serai {\n      self.handle.join().unwrap()\n    }\n  }\n\n  let results = StdArc::new(StdRwLock::new(HashSet::new()));\n\n  loom::model({\n    let results = results.clone();\n    move || {\n      let serai = Serai::new(4, true);\n      let processor = EagerProcessor::new(serai, 4);\n      let serai = processor.join();\n      let events = serai.join();\n\n      results.write().unwrap().insert(events);\n    }\n  });\n\n  let results: HashSet<_> = results.read().unwrap().clone();\n  assert_eq!(results.len(), 6);\n  for result in results {\n    for (b, batch) in result.into_iter().enumerate() {\n      if b < 3 {\n        assert_eq!(\n          batch,\n          Event::IncludedBatch(Batch { block: b.try_into().unwrap(), keys: vec![0] })\n        );\n      } else {\n        let Event::IncludedBatch(batch) = batch else { panic!(\"unexpected event\") };\n        assert_eq!(batch.block, b.try_into().unwrap());\n        assert!((batch.keys == vec![0]) || (batch.keys == vec![0, 1]));\n      }\n    }\n  }\n}\n\n#[test]\nfn sequential_solves_activation_race() {\n  #[derive(Debug)]\n  struct DelayedProcessor {\n    handle: JoinHandle<Serai>,\n  }\n\n  impl DelayedProcessor {\n    fn new(serai: Serai, batches: u64) -> DelayedProcessor {\n      let handle = thread::spawn(move || {\n        for b in 0 .. batches {\n          let batch = {\n            let mut batches = serai.mempool_batches.write().unwrap();\n            let batch = Batch {\n              block: b,\n              keys: serai\n                .active_keys\n                .read()\n                .unwrap()\n                .iter()\n                .filter_map(|(activation_block, id)| Some(*id).filter(|_| b >= *activation_block))\n                .collect(),\n            };\n            batches.push(batch.clone());\n            batch\n          };\n\n          while (!serai.exhausted()) &&\n            (serai.events.recv().unwrap() != Event::IncludedBatch(batch.clone()))\n          {\n            loom::thread::yield_now();\n          }\n        }\n        serai\n      });\n      DelayedProcessor { handle }\n    }\n\n    fn join(self) -> Serai {\n      self.handle.join().unwrap()\n    }\n  }\n\n  let results = StdArc::new(StdRwLock::new(HashSet::new()));\n\n  loom::model({\n    let results = results.clone();\n    move || {\n      let serai = Serai::new(4, true);\n      let processor = DelayedProcessor::new(serai, 4);\n      let serai = processor.join();\n      let events = serai.join();\n\n      results.write().unwrap().insert(events);\n    }\n  });\n\n  let results: HashSet<_> = results.read().unwrap().clone();\n  assert_eq!(results.len(), 5);\n  for result in results {\n    for (b, batch) in result.into_iter().enumerate() {\n      assert_eq!(\n        batch,\n        Event::IncludedBatch(Batch {\n          block: b.try_into().unwrap(),\n          keys: if b < 3 { vec![0] } else { vec![0, 1] }\n        }),\n      );\n    }\n  }\n}\n\n#[test]\nfn ftl_solves_activation_race() {\n  let results = StdArc::new(StdRwLock::new(HashSet::new()));\n\n  loom::model({\n    let results = results.clone();\n    move || {\n      let serai = Serai::new(4, true);\n      // Uses Processor since this Processor has this algorithm implemented\n      let processor = Processor::new(serai, 4);\n      let serai = processor.join();\n      let events = serai.join();\n\n      results.write().unwrap().insert(events);\n    }\n  });\n\n  let results: HashSet<_> = results.read().unwrap().clone();\n  assert_eq!(results.len(), 5);\n  for result in results {\n    for (b, batch) in result.into_iter().enumerate() {\n      assert_eq!(\n        batch,\n        Event::IncludedBatch(Batch {\n          block: b.try_into().unwrap(),\n          keys: if b < 3 { vec![0] } else { vec![0, 1] }\n        }),\n      );\n    }\n  }\n}\n"
  },
  {
    "path": "mini/src/tests/mod.rs",
    "content": "mod activation_race;\n"
  },
  {
    "path": "networks/bitcoin/Cargo.toml",
    "content": "[package]\nname = \"bitcoin-serai\"\nversion = \"0.4.0\"\ndescription = \"A Bitcoin library for FROST-signing transactions\"\nlicense = \"MIT\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/networks/bitcoin\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\", \"Vrx <vrx00@proton.me>\"]\nedition = \"2021\"\nrust-version = \"1.80\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nstd-shims = { version = \"0.1.1\", path = \"../../common/std-shims\", default-features = false }\n\nthiserror = { version = \"1\", default-features = false, optional = true }\n\nsubtle = { version = \"2\", default-features = false }\nzeroize = { version = \"^1.5\", default-features = false }\nrand_core = { version = \"0.6\", default-features = false }\n\nbitcoin = { version = \"0.32\", default-features = false }\n\nk256 = { version = \"^0.13.1\", default-features = false, features = [\"arithmetic\", \"bits\"] }\nfrost = { package = \"modular-frost\", path = \"../../crypto/frost\", version = \"0.10\", default-features = false, features = [\"secp256k1\"], optional = true }\n\nhex = { version = \"0.4\", default-features = false, optional = true }\nserde = { version = \"1\", default-features = false, features = [\"derive\"], optional = true }\nserde_json = { version = \"1\", default-features = false, optional = true }\nsimple-request = { path = \"../../common/request\", version = \"0.1\", default-features = false, features = [\"tls\", \"basic-auth\"], optional = true }\n\n[dev-dependencies]\nsecp256k1 = { version = \"0.29\", default-features = false, features = [\"std\"] }\n\nfrost = { package = \"modular-frost\", path = \"../../crypto/frost\", features = [\"tests\"] }\n\ntokio = { version = \"1\", features = [\"macros\"] }\n\n[features]\nstd = [\n  \"std-shims/std\",\n\n  \"thiserror\",\n\n  \"subtle/std\",\n  \"zeroize/std\",\n  \"rand_core/std\",\n\n  \"bitcoin/std\",\n  \"bitcoin/serde\",\n\n  \"k256/std\",\n  \"frost\",\n\n  \"hex/std\",\n  \"serde/std\",\n  \"serde_json/std\",\n  \"simple-request\",\n]\nhazmat = []\ndefault = [\"std\"]\n"
  },
  {
    "path": "networks/bitcoin/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2022-2023 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "networks/bitcoin/README.md",
    "content": "# bitcoin-serai\n\nAn application of [modular-frost](https://docs.rs/modular-frost) to Bitcoin\ntransactions, enabling extremely-efficient multisigs.\n"
  },
  {
    "path": "networks/bitcoin/src/crypto.rs",
    "content": "use subtle::{Choice, ConstantTimeEq, ConditionallySelectable};\n\nuse k256::{\n  elliptic_curve::sec1::{Tag, ToEncodedPoint},\n  ProjectivePoint,\n};\n\nuse bitcoin::key::XOnlyPublicKey;\n\n/// Get the x coordinate of a non-infinity point.\n///\n/// Panics on invalid input.\nfn x(key: &ProjectivePoint) -> [u8; 32] {\n  let encoded = key.to_encoded_point(true);\n  (*encoded.x().expect(\"point at infinity\")).into()\n}\n\n/// Convert a non-infinity point to a XOnlyPublicKey (dropping its sign).\n///\n/// Panics on invalid input.\npub(crate) fn x_only(key: &ProjectivePoint) -> XOnlyPublicKey {\n  XOnlyPublicKey::from_slice(&x(key)).expect(\"x_only was passed a point which was infinity or odd\")\n}\n\n/// Return if a point must be negated to have an even Y coordinate and be eligible for use.\npub(crate) fn needs_negation(key: &ProjectivePoint) -> Choice {\n  u8::from(key.to_encoded_point(true).tag()).ct_eq(&u8::from(Tag::CompressedOddY))\n}\n\n#[cfg(feature = \"std\")]\nmod frost_crypto {\n  use core::fmt::Debug;\n  use std_shims::{vec::Vec, io};\n\n  use zeroize::Zeroizing;\n  use rand_core::{RngCore, CryptoRng};\n\n  use bitcoin::hashes::{HashEngine, Hash, sha256::Hash as Sha256};\n\n  use k256::{elliptic_curve::ops::Reduce, U256, Scalar};\n\n  use frost::{\n    curve::{Ciphersuite, Secp256k1},\n    Participant, ThresholdKeys, ThresholdView, FrostError,\n    algorithm::{Hram as HramTrait, Algorithm, IetfSchnorr as FrostSchnorr},\n  };\n\n  use super::*;\n\n  /// A BIP-340 compatible HRAm for use with the modular-frost Schnorr Algorithm.\n  ///\n  /// If passed an odd nonce, the challenge will be negated.\n  ///\n  /// If either `R` or `A` is the point at infinity, this will panic.\n  #[derive(Clone, Copy, Debug)]\n  pub struct Hram;\n  #[allow(non_snake_case)]\n  impl HramTrait<Secp256k1> for Hram {\n    fn hram(R: &ProjectivePoint, A: &ProjectivePoint, m: &[u8]) -> Scalar {\n      const TAG_HASH: Sha256 = Sha256::const_hash(b\"BIP0340/challenge\");\n\n      let mut data = Sha256::engine();\n      data.input(TAG_HASH.as_ref());\n      data.input(TAG_HASH.as_ref());\n      data.input(&x(R));\n      data.input(&x(A));\n      data.input(m);\n\n      let c = Scalar::reduce(U256::from_be_slice(Sha256::from_engine(data).as_ref()));\n      // If the nonce was odd, sign `r - cx` instead of `r + cx`, allowing us to negate `s` at the\n      // end to sign as `-r + cx`\n      <_>::conditional_select(&c, &-c, needs_negation(R))\n    }\n  }\n\n  /// BIP-340 Schnorr signature algorithm.\n  ///\n  /// This may panic if called with nonces/a group key which are the point at infinity (which have\n  /// a negligible probability for a well-reasoned caller, even with malicious participants\n  /// present).\n  ///\n  /// `verify`, `verify_share` MUST be called after `sign_share` is called. Otherwise, this library\n  /// MAY panic.\n  #[derive(Clone)]\n  pub struct Schnorr(FrostSchnorr<Secp256k1, Hram>);\n  impl Schnorr {\n    /// Construct a Schnorr algorithm continuing the specified transcript.\n    #[allow(clippy::new_without_default)]\n    pub fn new() -> Schnorr {\n      Schnorr(FrostSchnorr::ietf())\n    }\n  }\n\n  impl Algorithm<Secp256k1> for Schnorr {\n    type Transcript = <FrostSchnorr<Secp256k1, Hram> as Algorithm<Secp256k1>>::Transcript;\n    type Addendum = ();\n    type Signature = [u8; 64];\n\n    fn transcript(&mut self) -> &mut Self::Transcript {\n      self.0.transcript()\n    }\n\n    fn nonces(&self) -> Vec<Vec<ProjectivePoint>> {\n      self.0.nonces()\n    }\n\n    fn preprocess_addendum<R: RngCore + CryptoRng>(\n      &mut self,\n      rng: &mut R,\n      keys: &ThresholdKeys<Secp256k1>,\n    ) {\n      self.0.preprocess_addendum(rng, keys)\n    }\n\n    fn read_addendum<R: io::Read>(&self, reader: &mut R) -> io::Result<Self::Addendum> {\n      self.0.read_addendum(reader)\n    }\n\n    fn process_addendum(\n      &mut self,\n      view: &ThresholdView<Secp256k1>,\n      i: Participant,\n      addendum: (),\n    ) -> Result<(), FrostError> {\n      self.0.process_addendum(view, i, addendum)\n    }\n\n    fn sign_share(\n      &mut self,\n      params: &ThresholdView<Secp256k1>,\n      nonce_sums: &[Vec<<Secp256k1 as Ciphersuite>::G>],\n      nonces: Vec<Zeroizing<<Secp256k1 as Ciphersuite>::F>>,\n      msg: &[u8],\n    ) -> <Secp256k1 as Ciphersuite>::F {\n      self.0.sign_share(params, nonce_sums, nonces, msg)\n    }\n\n    #[must_use]\n    fn verify(\n      &self,\n      group_key: ProjectivePoint,\n      nonces: &[Vec<ProjectivePoint>],\n      sum: Scalar,\n    ) -> Option<Self::Signature> {\n      self.0.verify(group_key, nonces, sum).map(|mut sig| {\n        sig.s = <_>::conditional_select(&sum, &-sum, needs_negation(&sig.R));\n        // Convert to a Bitcoin signature by dropping the byte for the point's sign bit\n        sig.serialize()[1 ..].try_into().unwrap()\n      })\n    }\n\n    fn verify_share(\n      &self,\n      verification_share: ProjectivePoint,\n      nonces: &[Vec<ProjectivePoint>],\n      share: Scalar,\n    ) -> Result<Vec<(Scalar, ProjectivePoint)>, ()> {\n      self.0.verify_share(verification_share, nonces, share)\n    }\n  }\n}\n#[cfg(feature = \"std\")]\npub use frost_crypto::*;\n"
  },
  {
    "path": "networks/bitcoin/src/lib.rs",
    "content": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![doc = include_str!(\"../README.md\")]\n#![cfg_attr(not(feature = \"std\"), no_std)]\n\n#[cfg(not(feature = \"std\"))]\nextern crate alloc;\n\n/// The bitcoin Rust library.\npub use bitcoin;\n\n/// Cryptographic helpers.\n#[cfg(feature = \"hazmat\")]\npub mod crypto;\n#[cfg(not(feature = \"hazmat\"))]\npub(crate) mod crypto;\n\n/// Wallet functionality to create transactions.\npub mod wallet;\n/// A minimal asynchronous Bitcoin RPC client.\n#[cfg(feature = \"std\")]\npub mod rpc;\n\n#[cfg(test)]\nmod tests;\n"
  },
  {
    "path": "networks/bitcoin/src/rpc.rs",
    "content": "use core::fmt::Debug;\nuse std::collections::HashSet;\n\nuse thiserror::Error;\n\nuse serde::{Deserialize, de::DeserializeOwned};\nuse serde_json::json;\n\nuse simple_request::{hyper, Request, Client};\n\nuse bitcoin::{\n  hashes::{Hash, hex::FromHex},\n  consensus::encode,\n  Txid, Transaction, BlockHash, Block,\n};\n\n#[derive(Clone, PartialEq, Eq, Debug, Deserialize)]\npub struct Error {\n  code: isize,\n  message: String,\n}\n\n#[derive(Clone, Debug, Deserialize)]\n#[serde(untagged)]\nenum RpcResponse<T> {\n  Ok { result: T },\n  Err { error: Error },\n}\n\n/// A minimal asynchronous Bitcoin RPC client.\n#[derive(Clone, Debug)]\npub struct Rpc {\n  client: Client,\n  url: String,\n}\n\n#[derive(Clone, PartialEq, Eq, Debug, Error)]\npub enum RpcError {\n  #[error(\"couldn't connect to node\")]\n  ConnectionError,\n  #[error(\"request had an error: {0:?}\")]\n  RequestError(Error),\n  #[error(\"node replied with invalid JSON\")]\n  InvalidJson(serde_json::error::Category),\n  #[error(\"node sent an invalid response ({0})\")]\n  InvalidResponse(&'static str),\n  #[error(\"node was missing expected methods\")]\n  MissingMethods(HashSet<&'static str>),\n}\n\nimpl Rpc {\n  /// Create a new connection to a Bitcoin RPC.\n  ///\n  /// An RPC call is performed to ensure the node is reachable (and that an invalid URL wasn't\n  /// provided).\n  ///\n  /// Additionally, a set of expected methods is checked to be offered by the Bitcoin RPC. If these\n  /// methods aren't provided, an error with the missing methods is returned. This ensures all RPC\n  /// routes explicitly provided by this library are at least possible.\n  ///\n  /// Each individual RPC route may still fail at time-of-call, regardless of the arguments\n  /// provided to this library, if the RPC has an incompatible argument layout. That is not checked\n  /// at time of RPC creation.\n  pub async fn new(url: String) -> Result<Rpc, RpcError> {\n    let rpc = Rpc { client: Client::with_connection_pool(), url };\n\n    // Make an RPC request to verify the node is reachable and sane\n    let res: String = rpc.rpc_call(\"help\", json!([])).await?;\n\n    // Verify all methods we expect are present\n    // If we had a more expanded RPC, due to differences in RPC versions, it wouldn't make sense to\n    // error if all methods weren't present\n    // We only provide a very minimal set of methods which have been largely consistent, hence why\n    // this is sane\n    let mut expected_methods = HashSet::from([\n      \"help\",\n      \"getblockcount\",\n      \"getblockhash\",\n      \"getblockheader\",\n      \"getblock\",\n      \"sendrawtransaction\",\n      \"getrawtransaction\",\n    ]);\n    for line in res.split('\\n') {\n      // This doesn't check if the arguments are as expected\n      // This is due to Bitcoin supporting a large amount of optional arguments, which\n      // occasionally change, with their own mechanism of text documentation, making matching off\n      // it a quite involved task\n      // Instead, once we've confirmed the methods are present, we assume our arguments are aligned\n      // Else we'll error at time of call\n      if expected_methods.remove(line.split(' ').next().unwrap_or(\"\")) &&\n        expected_methods.is_empty()\n      {\n        break;\n      }\n    }\n    if !expected_methods.is_empty() {\n      Err(RpcError::MissingMethods(expected_methods))?;\n    };\n\n    Ok(rpc)\n  }\n\n  /// Perform an arbitrary RPC call.\n  pub async fn rpc_call<Response: DeserializeOwned + Debug>(\n    &self,\n    method: &str,\n    params: serde_json::Value,\n  ) -> Result<Response, RpcError> {\n    let mut request = Request::from(\n      hyper::Request::post(&self.url)\n        .header(\"Content-Type\", \"application/json\")\n        .body(\n          serde_json::to_vec(&json!({ \"jsonrpc\": \"2.0\", \"method\": method, \"params\": params }))\n            .unwrap()\n            .into(),\n        )\n        .unwrap(),\n    );\n    request.with_basic_auth();\n    let mut res = self\n      .client\n      .request(request)\n      .await\n      .map_err(|_| RpcError::ConnectionError)?\n      .body()\n      .await\n      .map_err(|_| RpcError::ConnectionError)?;\n\n    let res: RpcResponse<Response> =\n      serde_json::from_reader(&mut res).map_err(|e| RpcError::InvalidJson(e.classify()))?;\n    match res {\n      RpcResponse::Ok { result } => Ok(result),\n      RpcResponse::Err { error } => Err(RpcError::RequestError(error)),\n    }\n  }\n\n  /// Get the latest block's number.\n  ///\n  /// The genesis block's 'number' is zero. They increment from there.\n  pub async fn get_latest_block_number(&self) -> Result<usize, RpcError> {\n    // getblockcount doesn't return the amount of blocks on the current chain, yet the \"height\"\n    // of the current chain. The \"height\" of the current chain is defined as the \"height\" of the\n    // tip block of the current chain. The \"height\" of a block is defined as the amount of blocks\n    // present when the block was created. Accordingly, the genesis block has height 0, and\n    // getblockcount will return 0 when it's only the only block, despite their being one block.\n    self.rpc_call(\"getblockcount\", json!([])).await\n  }\n\n  /// Get the hash of a block by the block's number.\n  pub async fn get_block_hash(&self, number: usize) -> Result<[u8; 32], RpcError> {\n    let mut hash = self\n      .rpc_call::<BlockHash>(\"getblockhash\", json!([number]))\n      .await?\n      .as_raw_hash()\n      .to_byte_array();\n    // bitcoin stores the inner bytes in reverse order.\n    hash.reverse();\n    Ok(hash)\n  }\n\n  /// Get a block's number by its hash.\n  pub async fn get_block_number(&self, hash: &[u8; 32]) -> Result<usize, RpcError> {\n    #[derive(Deserialize, Debug)]\n    struct Number {\n      height: usize,\n    }\n    Ok(self.rpc_call::<Number>(\"getblockheader\", json!([hex::encode(hash)])).await?.height)\n  }\n\n  /// Get a block by its hash.\n  pub async fn get_block(&self, hash: &[u8; 32]) -> Result<Block, RpcError> {\n    let hex = self.rpc_call::<String>(\"getblock\", json!([hex::encode(hash), 0])).await?;\n    let bytes: Vec<u8> = FromHex::from_hex(&hex)\n      .map_err(|_| RpcError::InvalidResponse(\"node didn't use hex to encode the block\"))?;\n    let block: Block = encode::deserialize(&bytes)\n      .map_err(|_| RpcError::InvalidResponse(\"node sent an improperly serialized block\"))?;\n\n    let mut block_hash = *block.block_hash().as_raw_hash().as_byte_array();\n    block_hash.reverse();\n    if hash != &block_hash {\n      Err(RpcError::InvalidResponse(\"node replied with a different block\"))?;\n    }\n\n    Ok(block)\n  }\n\n  /// Publish a transaction.\n  pub async fn send_raw_transaction(&self, tx: &Transaction) -> Result<Txid, RpcError> {\n    let txid = match self.rpc_call(\"sendrawtransaction\", json!([encode::serialize_hex(tx)])).await {\n      Ok(txid) => txid,\n      Err(e) => {\n        // A const from Bitcoin's bitcoin/src/rpc/protocol.h\n        const RPC_VERIFY_ALREADY_IN_CHAIN: isize = -27;\n        // If this was already successfully published, consider this having succeeded\n        if let RpcError::RequestError(Error { code, .. }) = e {\n          if code == RPC_VERIFY_ALREADY_IN_CHAIN {\n            return Ok(tx.compute_txid());\n          }\n        }\n        Err(e)?\n      }\n    };\n    if txid != tx.compute_txid() {\n      Err(RpcError::InvalidResponse(\"returned TX ID inequals calculated TX ID\"))?;\n    }\n    Ok(txid)\n  }\n\n  /// Get a transaction by its hash.\n  pub async fn get_transaction(&self, hash: &[u8; 32]) -> Result<Transaction, RpcError> {\n    let hex = self.rpc_call::<String>(\"getrawtransaction\", json!([hex::encode(hash)])).await?;\n    let bytes: Vec<u8> = FromHex::from_hex(&hex)\n      .map_err(|_| RpcError::InvalidResponse(\"node didn't use hex to encode the transaction\"))?;\n    let tx: Transaction = encode::deserialize(&bytes)\n      .map_err(|_| RpcError::InvalidResponse(\"node sent an improperly serialized transaction\"))?;\n\n    let mut tx_hash = *tx.compute_txid().as_raw_hash().as_byte_array();\n    tx_hash.reverse();\n    if hash != &tx_hash {\n      Err(RpcError::InvalidResponse(\"node replied with a different transaction\"))?;\n    }\n\n    Ok(tx)\n  }\n}\n"
  },
  {
    "path": "networks/bitcoin/src/tests/crypto.rs",
    "content": "use rand_core::OsRng;\n\nuse secp256k1::{Secp256k1 as BContext, Message, schnorr::Signature};\n\nuse frost::{\n  curve::Secp256k1,\n  Participant,\n  tests::{algorithm_machines, key_gen, sign},\n};\n\nuse crate::{\n  bitcoin::hashes::{Hash as HashTrait, sha256::Hash},\n  crypto::{x_only, Schnorr},\n  wallet::tweak_keys,\n};\n\n#[test]\nfn test_algorithm() {\n  let mut keys = key_gen::<_, Secp256k1>(&mut OsRng);\n  const MESSAGE: &[u8] = b\"Hello, World!\";\n\n  for keys in keys.values_mut() {\n    *keys = tweak_keys(keys.clone());\n  }\n\n  let algo = Schnorr::new();\n  let sig = sign(\n    &mut OsRng,\n    &algo,\n    keys.clone(),\n    algorithm_machines(&mut OsRng, &algo, &keys),\n    Hash::hash(MESSAGE).as_ref(),\n  );\n\n  BContext::new()\n    .verify_schnorr(\n      &Signature::from_slice(&sig)\n        .expect(\"couldn't convert produced signature to secp256k1::Signature\"),\n      &Message::from_digest_slice(Hash::hash(MESSAGE).as_ref()).unwrap(),\n      &x_only(&keys[&Participant::new(1).unwrap()].group_key()),\n    )\n    .unwrap()\n}\n"
  },
  {
    "path": "networks/bitcoin/src/tests/mod.rs",
    "content": "mod crypto;\n"
  },
  {
    "path": "networks/bitcoin/src/wallet/mod.rs",
    "content": "use std_shims::{\n  vec::Vec,\n  collections::HashMap,\n  io::{self, Write},\n};\n#[cfg(feature = \"std\")]\nuse std::io::{Read, BufReader};\n\nuse k256::{\n  elliptic_curve::sec1::{Tag, ToEncodedPoint},\n  Scalar, ProjectivePoint,\n};\n\n#[cfg(feature = \"std\")]\nuse frost::{\n  curve::{Ciphersuite, Secp256k1},\n  ThresholdKeys,\n};\n\nuse bitcoin::{\n  consensus::encode::serialize, key::TweakedPublicKey, OutPoint, ScriptBuf, TxOut, Transaction,\n  Block,\n};\n#[cfg(feature = \"std\")]\nuse bitcoin::{hashes::Hash, consensus::encode::Decodable, TapTweakHash};\n\nuse crate::crypto::x_only;\n#[cfg(feature = \"std\")]\nuse crate::crypto::needs_negation;\n\n#[cfg(feature = \"std\")]\nmod send;\n#[cfg(feature = \"std\")]\npub use send::*;\n\n/// Tweak keys to ensure they're usable with Bitcoin's Taproot upgrade.\n///\n/// This adds an unspendable script path to the key, preventing any outputs received to this key\n/// from being spent via a script. To have keys which have spendable script paths, further offsets\n/// from this position must be used.\n///\n/// After adding an unspendable script path, the key is negated if odd.\n///\n/// This has a neligible probability of returning keys whose group key is the point at infinity.\n#[cfg(feature = \"std\")]\npub fn tweak_keys(keys: ThresholdKeys<Secp256k1>) -> ThresholdKeys<Secp256k1> {\n  // Adds the unspendable script path per\n  // https://github.com/bitcoin/bips/blob/master/bip-0341.mediawiki#cite_note-23\n  let keys = {\n    use k256::elliptic_curve::{\n      bigint::{Encoding, U256},\n      ops::Reduce,\n      group::GroupEncoding,\n    };\n    let tweak_hash = TapTweakHash::hash(&keys.group_key().to_bytes().as_slice()[1 ..]);\n    /*\n      https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki#cite_ref-13-0 states how the\n      bias is negligible. This reduction shouldn't ever occur, yet if it did, the script path\n      would be unusable due to a check the script path hash is less than the order. That doesn't\n      impact us as we don't want the script path to be usable.\n    */\n    keys.offset(<Secp256k1 as Ciphersuite>::F::reduce(U256::from_be_bytes(\n      *tweak_hash.to_raw_hash().as_ref(),\n    )))\n  };\n\n  let needs_negation = needs_negation(&keys.group_key());\n  keys\n    .scale(<_ as subtle::ConditionallySelectable>::conditional_select(\n      &Scalar::ONE,\n      &-Scalar::ONE,\n      needs_negation,\n    ))\n    .expect(\"scaling keys by 1 or -1 yet interpreted as 0?\")\n}\n\n/// Return the Taproot address payload for a public key.\n///\n/// If the key is odd, this will return None.\npub fn p2tr_script_buf(key: ProjectivePoint) -> Option<ScriptBuf> {\n  if key.to_encoded_point(true).tag() != Tag::CompressedEvenY {\n    return None;\n  }\n\n  Some(ScriptBuf::new_p2tr_tweaked(TweakedPublicKey::dangerous_assume_tweaked(x_only(&key))))\n}\n\n/// A spendable output.\n#[derive(Clone, PartialEq, Eq, Debug)]\npub struct ReceivedOutput {\n  // The scalar offset to obtain the key usable to spend this output.\n  offset: Scalar,\n  // The output to spend.\n  output: TxOut,\n  // The TX ID and vout of the output to spend.\n  outpoint: OutPoint,\n}\n\nimpl ReceivedOutput {\n  /// The offset for this output.\n  pub fn offset(&self) -> Scalar {\n    self.offset\n  }\n\n  /// The Bitcoin output for this output.\n  pub fn output(&self) -> &TxOut {\n    &self.output\n  }\n\n  /// The outpoint for this output.\n  pub fn outpoint(&self) -> &OutPoint {\n    &self.outpoint\n  }\n\n  /// The value of this output.\n  pub fn value(&self) -> u64 {\n    self.output.value.to_sat()\n  }\n\n  /// Read a ReceivedOutput from a generic satisfying Read.\n  #[cfg(feature = \"std\")]\n  pub fn read<R: Read>(r: &mut R) -> io::Result<ReceivedOutput> {\n    let offset = Secp256k1::read_F(r)?;\n    let output;\n    let outpoint;\n    {\n      let mut buf_r = BufReader::with_capacity(0, r);\n      output =\n        TxOut::consensus_decode(&mut buf_r).map_err(|_| io::Error::other(\"invalid TxOut\"))?;\n      outpoint =\n        OutPoint::consensus_decode(&mut buf_r).map_err(|_| io::Error::other(\"invalid OutPoint\"))?;\n    }\n    Ok(ReceivedOutput { offset, output, outpoint })\n  }\n\n  /// Write a ReceivedOutput to a generic satisfying Write.\n  pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {\n    w.write_all(&self.offset.to_bytes())?;\n    w.write_all(&serialize(&self.output))?;\n    w.write_all(&serialize(&self.outpoint))\n  }\n\n  /// Serialize a ReceivedOutput to a `Vec<u8>`.\n  pub fn serialize(&self) -> Vec<u8> {\n    let mut res = Vec::new();\n    self.write(&mut res).unwrap();\n    res\n  }\n}\n\n/// A transaction scanner capable of being used with HDKD schemes.\n#[derive(Clone, Debug)]\npub struct Scanner {\n  key: ProjectivePoint,\n  scripts: HashMap<ScriptBuf, Scalar>,\n}\n\nimpl Scanner {\n  /// Construct a Scanner for a key.\n  ///\n  /// Returns None if this key can't be scanned for.\n  pub fn new(key: ProjectivePoint) -> Option<Scanner> {\n    let mut scripts = HashMap::new();\n    scripts.insert(p2tr_script_buf(key)?, Scalar::ZERO);\n    Some(Scanner { key, scripts })\n  }\n\n  /// Register an offset to scan for.\n  ///\n  /// Due to Bitcoin's requirement that points are even, not every offset may be used.\n  /// If an offset isn't usable, it will be incremented until it is. If this offset is already\n  /// present, None is returned. Else, Some(offset) will be, with the used offset.\n  ///\n  /// This means offsets are surjective, not bijective, and the order offsets are registered in\n  /// may determine the validity of future offsets.\n  ///\n  /// The offsets registered must be securely generated. Arbitrary offsets may introduce a script\n  /// path into the output, allowing the output to be spent by satisfaction of an arbitrary script\n  /// (not by the signature of the key).\n  pub fn register_offset(&mut self, mut offset: Scalar) -> Option<Scalar> {\n    // This loop will terminate as soon as an even point is found, with any point having a ~50%\n    // chance of being even\n    // That means this should terminate within a very small amount of iterations\n    loop {\n      match p2tr_script_buf(self.key + (ProjectivePoint::GENERATOR * offset)) {\n        Some(script) => {\n          if self.scripts.contains_key(&script) {\n            None?;\n          }\n          self.scripts.insert(script, offset);\n          return Some(offset);\n        }\n        None => offset += Scalar::ONE,\n      }\n    }\n  }\n\n  /// Scan a transaction.\n  pub fn scan_transaction(&self, tx: &Transaction) -> Vec<ReceivedOutput> {\n    let mut res = Vec::new();\n    for (vout, output) in tx.output.iter().enumerate() {\n      // If the vout index exceeds 2**32, stop scanning outputs\n      let Ok(vout) = u32::try_from(vout) else { break };\n\n      if let Some(offset) = self.scripts.get(&output.script_pubkey) {\n        res.push(ReceivedOutput {\n          offset: *offset,\n          output: output.clone(),\n          outpoint: OutPoint::new(tx.compute_txid(), vout),\n        });\n      }\n    }\n    res\n  }\n\n  /// Scan a block.\n  ///\n  /// This will also scan the coinbase transaction which is bound by maturity. If received outputs\n  /// must be immediately spendable, a post-processing pass is needed to remove those outputs.\n  /// Alternatively, scan_transaction can be called on `block.txdata[1 ..]`.\n  pub fn scan_block(&self, block: &Block) -> Vec<ReceivedOutput> {\n    let mut res = Vec::new();\n    for tx in &block.txdata {\n      res.extend(self.scan_transaction(tx));\n    }\n    res\n  }\n}\n"
  },
  {
    "path": "networks/bitcoin/src/wallet/send.rs",
    "content": "use std_shims::{\n  io::{self, Read},\n  collections::HashMap,\n};\n\nuse thiserror::Error;\n\nuse rand_core::{RngCore, CryptoRng};\n\nuse k256::Scalar;\nuse frost::{curve::Secp256k1, Participant, ThresholdKeys, FrostError, sign::*};\n\nuse bitcoin::{\n  hashes::Hash,\n  sighash::{TapSighashType, SighashCache, Prevouts},\n  absolute::LockTime,\n  script::{PushBytesBuf, ScriptBuf},\n  transaction::{Version, Transaction},\n  OutPoint, Sequence, Witness, TxIn, Amount, TxOut,\n};\n\nuse crate::{\n  crypto::Schnorr,\n  wallet::{ReceivedOutput, p2tr_script_buf},\n};\n\n#[rustfmt::skip]\n// https://github.com/bitcoin/bitcoin/blob/306ccd4927a2efe325c8d84be1bdb79edeb29b04/src/policy/policy.cpp#L26-L63\n// As the above notes, a lower amount may not be considered dust if contained in a SegWit output\n// This doesn't bother with delineation due to how marginal these values are, and because it isn't\n// worth the complexity to implement differentation\npub const DUST: u64 = 546;\n\n#[derive(Clone, PartialEq, Eq, Debug, Error)]\npub enum TransactionError {\n  #[error(\"no inputs were specified\")]\n  NoInputs,\n  #[error(\"no outputs were created\")]\n  NoOutputs,\n  #[error(\"a specified payment's amount was less than bitcoin's required minimum\")]\n  DustPayment,\n  #[error(\"too much data was specified\")]\n  TooMuchData,\n  #[error(\"fee was too low to pass the default minimum fee rate\")]\n  TooLowFee,\n  #[error(\"not enough funds for these payments\")]\n  NotEnoughFunds { inputs: u64, payments: u64, fee: u64 },\n  #[error(\"transaction was too large\")]\n  TooLargeTransaction,\n}\n\n/// A signable transaction, clone-able across attempts.\n#[derive(Clone, PartialEq, Eq, Debug)]\npub struct SignableTransaction {\n  tx: Transaction,\n  offsets: Vec<Scalar>,\n  prevouts: Vec<TxOut>,\n  needed_fee: u64,\n}\n\nimpl SignableTransaction {\n  fn calculate_weight_vbytes(\n    inputs: usize,\n    payments: &[(ScriptBuf, u64)],\n    change: Option<&ScriptBuf>,\n  ) -> (u64, u64) {\n    // Expand this a full transaction in order to use the bitcoin library's weight function\n    let mut tx = Transaction {\n      version: Version(2),\n      lock_time: LockTime::ZERO,\n      input: vec![\n        TxIn {\n          // This is a fixed size\n          // See https://developer.bitcoin.org/reference/transactions.html#raw-transaction-format\n          previous_output: OutPoint::default(),\n          // This is empty for a Taproot spend\n          script_sig: ScriptBuf::new(),\n          // This is fixed size, yet we do use Sequence::MAX\n          sequence: Sequence::MAX,\n          // Our witnesses contains a single 64-byte signature\n          witness: Witness::from_slice(&[vec![0; 64]])\n        };\n        inputs\n      ],\n      output: payments\n        .iter()\n        // The payment is a fixed size so we don't have to use it here\n        // The script pub key is not of a fixed size and does have to be used here\n        .map(|payment| TxOut {\n          value: Amount::from_sat(payment.1),\n          script_pubkey: payment.0.clone(),\n        })\n        .collect(),\n    };\n    if let Some(change) = change {\n      // Use a 0 value since we're currently unsure what the change amount will be, and since\n      // the value is fixed size (so any value could be used here)\n      tx.output.push(TxOut { value: Amount::ZERO, script_pubkey: change.clone() });\n    }\n\n    let weight = tx.weight();\n\n    // Now calculate the size in vbytes\n\n    /*\n      \"Virtual transaction size\" is weight ceildiv 4 per\n      https://github.com/bitcoin/bips/blob/master/bip-0141.mediawiki\n\n      https://github.com/bitcoin/bitcoin/blob/306ccd4927a2efe325c8d84be1bdb79edeb29b04\n        /src/policy/policy.cpp#L295-L298\n      implements this almost as expected, with an additional consideration to signature operations\n\n      Signature operations (the second argument of the following call) do not count Taproot\n      signatures per https://github.com/bitcoin/bips/blob/master/bip-0342.mediawiki#cite_ref-11-0\n\n      We don't risk running afoul of the Taproot signature limit as it allows at least one per\n      input, which is all we use\n    */\n    (\n      weight.to_wu(),\n      u64::try_from(bitcoin::policy::get_virtual_tx_size(\n        i64::try_from(weight.to_wu()).unwrap(),\n        0i64,\n      ))\n      .unwrap(),\n    )\n  }\n\n  /// Returns the fee necessary for this transaction to achieve the fee rate specified at\n  /// construction.\n  ///\n  /// The actual fee this transaction will use is `sum(inputs) - sum(outputs)`.\n  pub fn needed_fee(&self) -> u64 {\n    self.needed_fee\n  }\n\n  /// Returns the fee this transaction will use.\n  pub fn fee(&self) -> u64 {\n    self.prevouts.iter().map(|prevout| prevout.value.to_sat()).sum::<u64>() -\n      self.tx.output.iter().map(|prevout| prevout.value.to_sat()).sum::<u64>()\n  }\n\n  /// Create a new SignableTransaction.\n  ///\n  /// If a change address is specified, any leftover funds will be sent to it if the leftover funds\n  /// exceed the minimum output amount. If a change address isn't specified, all leftover funds\n  /// will become part of the paid fee.\n  ///\n  /// If data is specified, an OP_RETURN output will be added with it.\n  pub fn new(\n    mut inputs: Vec<ReceivedOutput>,\n    payments: &[(ScriptBuf, u64)],\n    change: Option<ScriptBuf>,\n    data: Option<Vec<u8>>,\n    fee_per_vbyte: u64,\n  ) -> Result<SignableTransaction, TransactionError> {\n    if inputs.is_empty() {\n      Err(TransactionError::NoInputs)?;\n    }\n\n    if payments.is_empty() && change.is_none() && data.is_none() {\n      Err(TransactionError::NoOutputs)?;\n    }\n\n    for (_, amount) in payments {\n      if *amount < DUST {\n        Err(TransactionError::DustPayment)?;\n      }\n    }\n\n    if data.as_ref().map_or(0, Vec::len) > 80 {\n      Err(TransactionError::TooMuchData)?;\n    }\n\n    let input_sat = inputs.iter().map(|input| input.output.value.to_sat()).sum::<u64>();\n    let offsets = inputs.iter().map(|input| input.offset).collect();\n    let tx_ins = inputs\n      .iter()\n      .map(|input| TxIn {\n        previous_output: input.outpoint,\n        script_sig: ScriptBuf::new(),\n        sequence: Sequence::MAX,\n        witness: Witness::new(),\n      })\n      .collect::<Vec<_>>();\n\n    let payment_sat = payments.iter().map(|payment| payment.1).sum::<u64>();\n    let mut tx_outs = payments\n      .iter()\n      .map(|payment| TxOut { value: Amount::from_sat(payment.1), script_pubkey: payment.0.clone() })\n      .collect::<Vec<_>>();\n\n    // Add the OP_RETURN output\n    if let Some(data) = data {\n      tx_outs.push(TxOut {\n        value: Amount::ZERO,\n        script_pubkey: ScriptBuf::new_op_return(\n          PushBytesBuf::try_from(data)\n            .expect(\"data didn't fit into PushBytes depsite being checked\"),\n        ),\n      })\n    }\n\n    let (mut weight, vbytes) = Self::calculate_weight_vbytes(tx_ins.len(), payments, None);\n\n    let mut needed_fee = fee_per_vbyte * vbytes;\n    // Technically, if there isn't change, this TX may still pay enough of a fee to pass the\n    // minimum fee. Such edge cases aren't worth programming when they go against intent, as the\n    // specified fee rate is too low to be valid\n    // bitcoin::policy::DEFAULT_MIN_RELAY_TX_FEE is in sats/kilo-vbyte\n    if needed_fee < ((u64::from(bitcoin::policy::DEFAULT_MIN_RELAY_TX_FEE) * vbytes) / 1000) {\n      Err(TransactionError::TooLowFee)?;\n    }\n\n    if input_sat < (payment_sat + needed_fee) {\n      Err(TransactionError::NotEnoughFunds {\n        inputs: input_sat,\n        payments: payment_sat,\n        fee: needed_fee,\n      })?;\n    }\n\n    // If there's a change address, check if there's change to give it\n    if let Some(change) = change {\n      let (weight_with_change, vbytes_with_change) =\n        Self::calculate_weight_vbytes(tx_ins.len(), payments, Some(&change));\n      let fee_with_change = fee_per_vbyte * vbytes_with_change;\n      if let Some(value) = input_sat.checked_sub(payment_sat + fee_with_change) {\n        if value >= DUST {\n          tx_outs.push(TxOut { value: Amount::from_sat(value), script_pubkey: change });\n          weight = weight_with_change;\n          needed_fee = fee_with_change;\n        }\n      }\n    }\n\n    if tx_outs.is_empty() {\n      Err(TransactionError::NoOutputs)?;\n    }\n\n    if weight > u64::from(bitcoin::policy::MAX_STANDARD_TX_WEIGHT) {\n      Err(TransactionError::TooLargeTransaction)?;\n    }\n\n    Ok(SignableTransaction {\n      tx: Transaction {\n        version: Version(2),\n        lock_time: LockTime::ZERO,\n        input: tx_ins,\n        output: tx_outs,\n      },\n      offsets,\n      prevouts: inputs.drain(..).map(|input| input.output).collect(),\n      needed_fee,\n    })\n  }\n\n  /// Returns the TX ID of the transaction this will create.\n  pub fn txid(&self) -> [u8; 32] {\n    let mut res = self.tx.compute_txid().to_byte_array();\n    res.reverse();\n    res\n  }\n\n  /// Returns the transaction, sans witness, this will create if signed.\n  pub fn transaction(&self) -> &Transaction {\n    &self.tx\n  }\n\n  /// Create a multisig machine for this transaction.\n  ///\n  /// Returns None if the wrong keys are used.\n  pub fn multisig(self, keys: &ThresholdKeys<Secp256k1>) -> Option<TransactionMachine> {\n    let mut sigs = vec![];\n    for i in 0 .. self.tx.input.len() {\n      let offset = keys.clone().offset(self.offsets[i]);\n      if p2tr_script_buf(offset.group_key())? != self.prevouts[i].script_pubkey {\n        None?;\n      }\n\n      sigs.push(AlgorithmMachine::new(Schnorr::new(), keys.clone().offset(self.offsets[i])));\n    }\n\n    Some(TransactionMachine { tx: self, sigs })\n  }\n}\n\n/// A FROST signing machine to produce a Bitcoin transaction.\n///\n/// This does not support caching its preprocess. When sign is called, the message must be empty.\n/// This will panic if either `cache`, `from_cache` is called or the message isn't empty.\npub struct TransactionMachine {\n  tx: SignableTransaction,\n  sigs: Vec<AlgorithmMachine<Secp256k1, Schnorr>>,\n}\n\nimpl PreprocessMachine for TransactionMachine {\n  type Preprocess = Vec<Preprocess<Secp256k1, ()>>;\n  type Signature = Transaction;\n  type SignMachine = TransactionSignMachine;\n\n  fn preprocess<R: RngCore + CryptoRng>(\n    mut self,\n    rng: &mut R,\n  ) -> (Self::SignMachine, Self::Preprocess) {\n    let mut preprocesses = Vec::with_capacity(self.sigs.len());\n    let sigs = self\n      .sigs\n      .drain(..)\n      .map(|sig| {\n        let (sig, preprocess) = sig.preprocess(rng);\n        preprocesses.push(preprocess);\n        sig\n      })\n      .collect();\n\n    (TransactionSignMachine { tx: self.tx, sigs }, preprocesses)\n  }\n}\n\npub struct TransactionSignMachine {\n  tx: SignableTransaction,\n  sigs: Vec<AlgorithmSignMachine<Secp256k1, Schnorr>>,\n}\n\nimpl SignMachine<Transaction> for TransactionSignMachine {\n  type Params = ();\n  type Keys = ThresholdKeys<Secp256k1>;\n  type Preprocess = Vec<Preprocess<Secp256k1, ()>>;\n  type SignatureShare = Vec<SignatureShare<Secp256k1>>;\n  type SignatureMachine = TransactionSignatureMachine;\n\n  fn cache(self) -> CachedPreprocess {\n    unimplemented!(\n      \"Bitcoin transactions don't support caching their preprocesses due to {}\",\n      \"being already bound to a specific transaction\"\n    );\n  }\n\n  fn from_cache(\n    (): (),\n    _: ThresholdKeys<Secp256k1>,\n    _: CachedPreprocess,\n  ) -> (Self, Self::Preprocess) {\n    unimplemented!(\n      \"Bitcoin transactions don't support caching their preprocesses due to {}\",\n      \"being already bound to a specific transaction\"\n    );\n  }\n\n  fn read_preprocess<R: Read>(&self, reader: &mut R) -> io::Result<Self::Preprocess> {\n    self.sigs.iter().map(|sig| sig.read_preprocess(reader)).collect()\n  }\n\n  fn sign(\n    mut self,\n    commitments: HashMap<Participant, Self::Preprocess>,\n    msg: &[u8],\n  ) -> Result<(TransactionSignatureMachine, Self::SignatureShare), FrostError> {\n    if !msg.is_empty() {\n      panic!(\"message was passed to the TransactionSignMachine when it generates its own\");\n    }\n\n    let commitments = (0 .. self.sigs.len())\n      .map(|c| {\n        commitments\n          .iter()\n          .map(|(l, commitments)| (*l, commitments[c].clone()))\n          .collect::<HashMap<_, _>>()\n      })\n      .collect::<Vec<_>>();\n\n    let mut cache = SighashCache::new(&self.tx.tx);\n    // Sign committing to all inputs\n    let prevouts = Prevouts::All(&self.tx.prevouts);\n\n    let mut shares = Vec::with_capacity(self.sigs.len());\n    let sigs = self\n      .sigs\n      .drain(..)\n      .enumerate()\n      .map(|(i, sig)| {\n        let (sig, share) = sig.sign(\n          commitments[i].clone(),\n          cache\n            .taproot_key_spend_signature_hash(i, &prevouts, TapSighashType::Default)\n            // This should never happen since the inputs align with the TX the cache was\n            // constructed with, and because i is always < prevouts.len()\n            .expect(\"taproot_key_spend_signature_hash failed to return a hash\")\n            .as_ref(),\n        )?;\n        shares.push(share);\n        Ok(sig)\n      })\n      .collect::<Result<_, _>>()?;\n\n    Ok((TransactionSignatureMachine { tx: self.tx.tx, sigs }, shares))\n  }\n}\n\npub struct TransactionSignatureMachine {\n  tx: Transaction,\n  sigs: Vec<AlgorithmSignatureMachine<Secp256k1, Schnorr>>,\n}\n\nimpl SignatureMachine<Transaction> for TransactionSignatureMachine {\n  type SignatureShare = Vec<SignatureShare<Secp256k1>>;\n\n  fn read_share<R: Read>(&self, reader: &mut R) -> io::Result<Self::SignatureShare> {\n    self.sigs.iter().map(|sig| sig.read_share(reader)).collect()\n  }\n\n  fn complete(\n    mut self,\n    mut shares: HashMap<Participant, Self::SignatureShare>,\n  ) -> Result<Transaction, FrostError> {\n    for (input, schnorr) in self.tx.input.iter_mut().zip(self.sigs.drain(..)) {\n      let sig = schnorr.complete(\n        shares.iter_mut().map(|(l, shares)| (*l, shares.remove(0))).collect::<HashMap<_, _>>(),\n      )?;\n\n      let mut witness = Witness::new();\n      witness.push(sig);\n      input.witness = witness;\n    }\n\n    Ok(self.tx)\n  }\n}\n"
  },
  {
    "path": "networks/bitcoin/tests/rpc.rs",
    "content": "use bitcoin_serai::{bitcoin::hashes::Hash as HashTrait, rpc::RpcError};\n\nmod runner;\nuse runner::rpc;\n\nasync_sequential! {\n  async fn test_rpc() {\n    let rpc = rpc().await;\n\n    // Test get_latest_block_number and get_block_hash by round tripping them\n    let latest = rpc.get_latest_block_number().await.unwrap();\n    let hash = rpc.get_block_hash(latest).await.unwrap();\n    assert_eq!(rpc.get_block_number(&hash).await.unwrap(), latest);\n\n    // Test this actually is the latest block number by checking asking for the next block's errors\n    assert!(matches!(rpc.get_block_hash(latest + 1).await, Err(RpcError::RequestError(_))));\n\n    // Test get_block by checking the received block's hash matches the request\n    let block = rpc.get_block(&hash).await.unwrap();\n    // Hashes are stored in reverse. It's bs from Satoshi\n    let mut block_hash = *block.block_hash().as_raw_hash().as_byte_array();\n    block_hash.reverse();\n    assert_eq!(hash, block_hash);\n  }\n}\n"
  },
  {
    "path": "networks/bitcoin/tests/runner.rs",
    "content": "use std::sync::LazyLock;\n\nuse bitcoin_serai::rpc::Rpc;\n\nuse tokio::sync::Mutex;\n\n#[allow(dead_code)]\npub(crate) static SEQUENTIAL: LazyLock<Mutex<()>> = LazyLock::new(|| Mutex::new(()));\n\n#[allow(dead_code)]\npub(crate) async fn rpc() -> Rpc {\n  let rpc = Rpc::new(\"http://serai:seraidex@127.0.0.1:8332\".to_string()).await.unwrap();\n\n  // If this node has already been interacted with, clear its chain\n  if rpc.get_latest_block_number().await.unwrap() > 0 {\n    rpc\n      .rpc_call(\n        \"invalidateblock\",\n        serde_json::json!([hex::encode(rpc.get_block_hash(1).await.unwrap())]),\n      )\n      .await\n      .unwrap()\n  }\n\n  rpc\n}\n\n#[macro_export]\nmacro_rules! async_sequential {\n  ($(async fn $name: ident() $body: block)*) => {\n    $(\n      #[tokio::test]\n      async fn $name() {\n        let guard = runner::SEQUENTIAL.lock().await;\n        let local = tokio::task::LocalSet::new();\n        local.run_until(async move {\n          if let Err(err) = tokio::task::spawn_local(async move { $body }).await {\n            drop(guard);\n            Err(err).unwrap()\n          }\n        }).await;\n      }\n    )*\n  }\n}\n"
  },
  {
    "path": "networks/bitcoin/tests/wallet.rs",
    "content": "use std::collections::HashMap;\n\nuse rand_core::{RngCore, OsRng};\n\nuse k256::{\n  elliptic_curve::{\n    group::{ff::Field, Group},\n    sec1::{Tag, ToEncodedPoint},\n  },\n  Scalar, ProjectivePoint,\n};\nuse frost::{\n  curve::Secp256k1,\n  Participant, ThresholdKeys,\n  tests::{THRESHOLD, key_gen, sign_without_caching},\n};\n\nuse bitcoin_serai::{\n  bitcoin::{\n    hashes::Hash as HashTrait,\n    blockdata::opcodes::all::OP_RETURN,\n    script::{PushBytesBuf, Instruction, Instructions, Script},\n    OutPoint, Amount, TxOut, Transaction, Network, Address,\n  },\n  wallet::{\n    tweak_keys, p2tr_script_buf, ReceivedOutput, Scanner, TransactionError, SignableTransaction,\n  },\n  rpc::Rpc,\n};\n\nmod runner;\nuse runner::rpc;\n\nconst FEE: u64 = 20;\n\nfn is_even(key: ProjectivePoint) -> bool {\n  key.to_encoded_point(true).tag() == Tag::CompressedEvenY\n}\n\nasync fn send_and_get_output(rpc: &Rpc, scanner: &Scanner, key: ProjectivePoint) -> ReceivedOutput {\n  let block_number = rpc.get_latest_block_number().await.unwrap() + 1;\n\n  rpc\n    .rpc_call::<Vec<String>>(\n      \"generatetoaddress\",\n      serde_json::json!([\n        1,\n        Address::from_script(&p2tr_script_buf(key).unwrap(), Network::Regtest).unwrap()\n      ]),\n    )\n    .await\n    .unwrap();\n\n  // Mine until maturity\n  rpc\n    .rpc_call::<Vec<String>>(\n      \"generatetoaddress\",\n      serde_json::json!([100, Address::p2sh(Script::new(), Network::Regtest).unwrap()]),\n    )\n    .await\n    .unwrap();\n\n  let block = rpc.get_block(&rpc.get_block_hash(block_number).await.unwrap()).await.unwrap();\n\n  let mut outputs = scanner.scan_block(&block);\n  assert_eq!(outputs, scanner.scan_transaction(&block.txdata[0]));\n\n  assert_eq!(outputs.len(), 1);\n  assert_eq!(outputs[0].outpoint(), &OutPoint::new(block.txdata[0].compute_txid(), 0));\n  assert_eq!(outputs[0].value(), block.txdata[0].output[0].value.to_sat());\n\n  assert_eq!(\n    ReceivedOutput::read::<&[u8]>(&mut outputs[0].serialize().as_ref()).unwrap(),\n    outputs[0]\n  );\n\n  outputs.swap_remove(0)\n}\n\nfn keys() -> (HashMap<Participant, ThresholdKeys<Secp256k1>>, ProjectivePoint) {\n  let mut keys = key_gen(&mut OsRng);\n  for keys in keys.values_mut() {\n    *keys = tweak_keys(keys.clone());\n  }\n  let key = keys.values().next().unwrap().group_key();\n  (keys, key)\n}\n\nfn sign(\n  keys: &HashMap<Participant, ThresholdKeys<Secp256k1>>,\n  tx: &SignableTransaction,\n) -> Transaction {\n  let mut machines = HashMap::new();\n  for i in (1 ..= THRESHOLD).map(|i| Participant::new(i).unwrap()) {\n    machines.insert(i, tx.clone().multisig(&keys[&i].clone()).unwrap());\n  }\n  sign_without_caching(&mut OsRng, machines, &[])\n}\n\nasync_sequential! {\n  async fn test_scanner() {\n    // Test Scanners are creatable for even keys.\n    for _ in 0 .. 128 {\n      let key = ProjectivePoint::random(&mut OsRng);\n      assert_eq!(Scanner::new(key).is_some(), is_even(key));\n    }\n\n    let mut key = ProjectivePoint::random(&mut OsRng);\n    while !is_even(key) {\n      key += ProjectivePoint::GENERATOR;\n    }\n\n    {\n      let mut scanner = Scanner::new(key).unwrap();\n      for _ in 0 .. 128 {\n        let mut offset = Scalar::random(&mut OsRng);\n        let registered = scanner.register_offset(offset).unwrap();\n        // Registering this again should return None\n        assert!(scanner.register_offset(offset).is_none());\n\n        // We can only register offsets resulting in even keys\n        // Make this even\n        while !is_even(key + (ProjectivePoint::GENERATOR * offset)) {\n          offset += Scalar::ONE;\n        }\n        // Ensure it matches the registered offset\n        assert_eq!(registered, offset);\n        // Assert registering this again fails\n        assert!(scanner.register_offset(offset).is_none());\n      }\n    }\n\n    let rpc = rpc().await;\n    let mut scanner = Scanner::new(key).unwrap();\n\n    assert_eq!(send_and_get_output(&rpc, &scanner, key).await.offset(), Scalar::ZERO);\n\n    // Register an offset and test receiving to it\n    let offset = scanner.register_offset(Scalar::random(&mut OsRng)).unwrap();\n    assert_eq!(\n      send_and_get_output(&rpc, &scanner, key + (ProjectivePoint::GENERATOR * offset))\n        .await\n        .offset(),\n      offset\n    );\n  }\n\n  async fn test_transaction_errors() {\n    let (_, key) = keys();\n\n    let rpc = rpc().await;\n    let scanner = Scanner::new(key).unwrap();\n\n    let output = send_and_get_output(&rpc, &scanner, key).await;\n    assert_eq!(output.offset(), Scalar::ZERO);\n\n    let inputs = vec![output];\n    let addr = || p2tr_script_buf(key).unwrap();\n    let payments = vec![(addr(), 1000)];\n\n    assert!(SignableTransaction::new(inputs.clone(), &payments, None, None, FEE).is_ok());\n\n    assert_eq!(\n      SignableTransaction::new(vec![], &payments, None, None, FEE),\n      Err(TransactionError::NoInputs)\n    );\n\n    // No change\n    assert!(SignableTransaction::new(inputs.clone(), &[(addr(), 1000)], None, None, FEE).is_ok());\n    // Consolidation TX\n    assert!(SignableTransaction::new(inputs.clone(), &[], Some(addr()), None, FEE).is_ok());\n    // Data\n    assert!(SignableTransaction::new(inputs.clone(), &[], None, Some(vec![]), FEE).is_ok());\n    // No outputs\n    assert_eq!(\n      SignableTransaction::new(inputs.clone(), &[], None, None, FEE),\n      Err(TransactionError::NoOutputs),\n    );\n\n    assert_eq!(\n      SignableTransaction::new(inputs.clone(), &[(addr(), 1)], None, None, FEE),\n      Err(TransactionError::DustPayment),\n    );\n\n    assert!(\n      SignableTransaction::new(inputs.clone(), &payments, None, Some(vec![0; 80]), FEE).is_ok()\n    );\n    assert_eq!(\n      SignableTransaction::new(inputs.clone(), &payments, None, Some(vec![0; 81]), FEE),\n      Err(TransactionError::TooMuchData),\n    );\n\n    assert_eq!(\n      SignableTransaction::new(inputs.clone(), &[], Some(addr()), None, 0),\n      Err(TransactionError::TooLowFee),\n    );\n\n    assert!(matches!(\n      SignableTransaction::new(inputs.clone(), &[(addr(), inputs[0].value() * 2)], None, None, FEE),\n      Err(TransactionError::NotEnoughFunds { .. }),\n    ));\n\n    assert_eq!(\n      SignableTransaction::new(inputs, &vec![(addr(), 1000); 10000], None, None, FEE),\n      Err(TransactionError::TooLargeTransaction),\n    );\n  }\n\n  async fn test_send() {\n    let (keys, key) = keys();\n\n    let rpc = rpc().await;\n    let mut scanner = Scanner::new(key).unwrap();\n\n    // Get inputs, one not offset and one offset\n    let output = send_and_get_output(&rpc, &scanner, key).await;\n    assert_eq!(output.offset(), Scalar::ZERO);\n\n    let offset = scanner.register_offset(Scalar::random(&mut OsRng)).unwrap();\n    let offset_key = key + (ProjectivePoint::GENERATOR * offset);\n    let offset_output = send_and_get_output(&rpc, &scanner, offset_key).await;\n    assert_eq!(offset_output.offset(), offset);\n\n    // Declare payments, change, fee\n    let payments = [\n      (p2tr_script_buf(key).unwrap(), 1005),\n      (p2tr_script_buf(offset_key).unwrap(), 1007)\n    ];\n\n    let change_offset = scanner.register_offset(Scalar::random(&mut OsRng)).unwrap();\n    let change_key = key + (ProjectivePoint::GENERATOR * change_offset);\n    let change_addr = p2tr_script_buf(change_key).unwrap();\n\n    // Create and sign the TX\n    let tx = SignableTransaction::new(\n      vec![output.clone(), offset_output.clone()],\n      &payments,\n      Some(change_addr.clone()),\n      None,\n      FEE\n    ).unwrap();\n    let needed_fee = tx.needed_fee();\n    let expected_id = tx.txid();\n    let tx = sign(&keys, &tx);\n\n    assert_eq!(tx.output.len(), 3);\n\n    // Ensure we can scan it\n    let outputs = scanner.scan_transaction(&tx);\n    for (o, output) in outputs.iter().enumerate() {\n      assert_eq!(output.outpoint(), &OutPoint::new(tx.compute_txid(), u32::try_from(o).unwrap()));\n      assert_eq!(&ReceivedOutput::read::<&[u8]>(&mut output.serialize().as_ref()).unwrap(), output);\n    }\n\n    assert_eq!(outputs[0].offset(), Scalar::ZERO);\n    assert_eq!(outputs[1].offset(), offset);\n    assert_eq!(outputs[2].offset(), change_offset);\n\n    // Make sure the payments were properly created\n    for ((output, scanned), payment) in tx.output.iter().zip(outputs.iter()).zip(payments.iter()) {\n      assert_eq!(\n        output,\n        &TxOut { script_pubkey: payment.0.clone(), value: Amount::from_sat(payment.1) },\n      );\n      assert_eq!(scanned.value(), payment.1 );\n    }\n\n    // Make sure the change is correct\n    assert_eq!(needed_fee, u64::try_from(tx.vsize()).unwrap() * FEE);\n    let input_value = output.value() + offset_output.value();\n    let output_value = tx.output.iter().map(|output| output.value.to_sat()).sum::<u64>();\n    assert_eq!(input_value - output_value, needed_fee);\n\n    let change_amount =\n      input_value - payments.iter().map(|payment| payment.1).sum::<u64>() - needed_fee;\n    assert_eq!(\n      tx.output[2],\n      TxOut { script_pubkey: change_addr, value: Amount::from_sat(change_amount) },\n    );\n\n    // This also tests send_raw_transaction and get_transaction, which the RPC test can't\n    // effectively test\n    rpc.send_raw_transaction(&tx).await.unwrap();\n    let mut hash = *tx.compute_txid().as_raw_hash().as_byte_array();\n    hash.reverse();\n    assert_eq!(tx, rpc.get_transaction(&hash).await.unwrap());\n    assert_eq!(expected_id, hash);\n  }\n\n  async fn test_data() {\n    let (keys, key) = keys();\n\n    let rpc = rpc().await;\n    let scanner = Scanner::new(key).unwrap();\n\n    let output = send_and_get_output(&rpc, &scanner, key).await;\n    assert_eq!(output.offset(), Scalar::ZERO);\n\n    let data_len = 60 + usize::try_from(OsRng.next_u64() % 21).unwrap();\n    let mut data = vec![0; data_len];\n    OsRng.fill_bytes(&mut data);\n\n    let tx = sign(\n      &keys,\n      &SignableTransaction::new(\n        vec![output],\n        &[],\n        Some(p2tr_script_buf(key).unwrap()),\n        Some(data.clone()),\n        FEE\n      ).unwrap()\n    );\n\n    assert!(tx.output[0].script_pubkey.is_op_return());\n    let check = |mut instructions: Instructions| {\n      assert_eq!(instructions.next().unwrap().unwrap(), Instruction::Op(OP_RETURN));\n      assert_eq!(\n        instructions.next().unwrap().unwrap(),\n        Instruction::PushBytes(&PushBytesBuf::try_from(data.clone()).unwrap()),\n      );\n      assert!(instructions.next().is_none());\n    };\n    check(tx.output[0].script_pubkey.instructions());\n    check(tx.output[0].script_pubkey.instructions_minimal());\n  }\n}\n"
  },
  {
    "path": "networks/ethereum/.gitignore",
    "content": "# Solidity build outputs\ncache\nartifacts\n"
  },
  {
    "path": "networks/ethereum/Cargo.toml",
    "content": "[package]\nname = \"ethereum-serai\"\nversion = \"0.1.0\"\ndescription = \"An Ethereum library supporting Schnorr signing and on-chain verification\"\nlicense = \"AGPL-3.0-only\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/networks/ethereum\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\", \"Elizabeth Binks <elizabethjbinks@gmail.com>\"]\nedition = \"2021\"\npublish = false\nrust-version = \"1.79\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nthiserror = { version = \"1\", default-features = false }\n\nrand_core = { version = \"0.6\", default-features = false, features = [\"std\"] }\n\ntranscript = { package = \"flexible-transcript\", path = \"../../crypto/transcript\", default-features = false, features = [\"recommended\"] }\n\ngroup = { version = \"0.13\", default-features = false }\nk256 = { version = \"^0.13.1\", default-features = false, features = [\"std\", \"ecdsa\", \"arithmetic\"] }\nfrost = { package = \"modular-frost\", path = \"../../crypto/frost\", default-features = false, features = [\"secp256k1\"] }\n\nalloy-core = { version = \"0.8\", default-features = false }\nalloy-sol-types = { version = \"0.8\", default-features = false, features = [\"json\"] }\nalloy-consensus = { version = \"0.4\", default-features = false, features = [\"k256\"] }\nalloy-network = { version = \"0.4\", default-features = false }\nalloy-rpc-types-eth = { version = \"0.4\", default-features = false }\nalloy-rpc-client = { version = \"0.4\", default-features = false }\nalloy-simple-request-transport = { path = \"./alloy-simple-request-transport\", default-features = false }\nalloy-provider = { version = \"0.4\", default-features = false }\n\nalloy-node-bindings = { version = \"0.4\", default-features = false, optional = true }\n\n[dev-dependencies]\nfrost = { package = \"modular-frost\", path = \"../../crypto/frost\", default-features = false, features = [\"tests\"] }\n\ntokio = { version = \"1\", features = [\"macros\"] }\n\nalloy-node-bindings = { version = \"0.4\", default-features = false }\n\n[features]\ntests = [\"alloy-node-bindings\", \"frost/tests\"]\n"
  },
  {
    "path": "networks/ethereum/LICENSE",
    "content": "AGPL-3.0-only license\n\nCopyright (c) 2022-2023 Luke Parker\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License Version 3 as\npublished by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n"
  },
  {
    "path": "networks/ethereum/README.md",
    "content": "# Ethereum\n\nThis package contains Ethereum-related functionality, specifically deploying\nand interacting with Serai contracts.\n\nWhile `bitcoin-serai` is a general purpose library, `ethereum-serai` is Serai\nspecific. If any of the utilities are generally desired, please fork and\nmaintain your own copy to ensure the desired functionality is preserved, or\nopen an issue to request we make this library general purpose.\n\n### Dependencies\n\n- solc\n- [Foundry](https://github.com/foundry-rs/foundry)\n"
  },
  {
    "path": "networks/ethereum/alloy-simple-request-transport/Cargo.toml",
    "content": "[package]\nname = \"alloy-simple-request-transport\"\nversion = \"0.1.0\"\ndescription = \"A transport for alloy based off simple-request\"\nlicense = \"MIT\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/networks/ethereum/alloy-simple-request-transport\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nedition = \"2021\"\nrust-version = \"1.74\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\ntower = \"0.5\"\n\nserde_json = { version = \"1\", default-features = false }\nsimple-request = { path = \"../../../common/request\", default-features = false }\n\nalloy-json-rpc = { version = \"0.4\", default-features = false }\nalloy-transport = { version = \"0.4\", default-features = false }\n\n[features]\ndefault = [\"tls\"]\ntls = [\"simple-request/tls\"]\n"
  },
  {
    "path": "networks/ethereum/alloy-simple-request-transport/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2024 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "networks/ethereum/alloy-simple-request-transport/README.md",
    "content": "# Alloy Simple Request Transport\n\nA transport for alloy based on simple-request, a small HTTP client built around\nhyper.\n"
  },
  {
    "path": "networks/ethereum/alloy-simple-request-transport/src/lib.rs",
    "content": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![doc = include_str!(\"../README.md\")]\n\nuse core::task;\nuse std::io;\n\nuse alloy_json_rpc::{RequestPacket, ResponsePacket};\nuse alloy_transport::{TransportError, TransportErrorKind, TransportFut};\n\nuse simple_request::{hyper, Request, Client};\n\nuse tower::Service;\n\n#[derive(Clone, Debug)]\npub struct SimpleRequest {\n  client: Client,\n  url: String,\n}\n\nimpl SimpleRequest {\n  pub fn new(url: String) -> Self {\n    Self { client: Client::with_connection_pool(), url }\n  }\n}\n\nimpl Service<RequestPacket> for SimpleRequest {\n  type Response = ResponsePacket;\n  type Error = TransportError;\n  type Future = TransportFut<'static>;\n\n  #[inline]\n  fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> task::Poll<Result<(), Self::Error>> {\n    task::Poll::Ready(Ok(()))\n  }\n\n  #[inline]\n  fn call(&mut self, req: RequestPacket) -> Self::Future {\n    let inner = self.clone();\n    Box::pin(async move {\n      let packet = req.serialize().map_err(TransportError::SerError)?;\n      let request = Request::from(\n        hyper::Request::post(&inner.url)\n          .header(\"Content-Type\", \"application/json\")\n          .body(serde_json::to_vec(&packet).map_err(TransportError::SerError)?.into())\n          .unwrap(),\n      );\n\n      let mut res = inner\n        .client\n        .request(request)\n        .await\n        .map_err(|e| TransportErrorKind::custom(io::Error::other(format!(\"{e:?}\"))))?\n        .body()\n        .await\n        .map_err(|e| TransportErrorKind::custom(io::Error::other(format!(\"{e:?}\"))))?;\n\n      serde_json::from_reader(&mut res).map_err(|e| TransportError::deser_err(e, \"\"))\n    })\n  }\n}\n"
  },
  {
    "path": "networks/ethereum/build.rs",
    "content": "use std::process::Command;\n\nfn main() {\n  println!(\"cargo:rerun-if-changed=contracts/*\");\n  println!(\"cargo:rerun-if-changed=artifacts/*\");\n\n  for line in String::from_utf8(Command::new(\"solc\").args([\"--version\"]).output().unwrap().stdout)\n    .unwrap()\n    .lines()\n  {\n    if let Some(version) = line.strip_prefix(\"Version: \") {\n      let version = version.split('+').next().unwrap();\n      assert_eq!(version, \"0.8.26\");\n    }\n  }\n\n  #[rustfmt::skip]\n  let args = [\n    \"--base-path\", \".\",\n    \"-o\", \"./artifacts\", \"--overwrite\",\n    \"--bin\", \"--abi\",\n    \"--via-ir\", \"--optimize\",\n\n    \"./contracts/IERC20.sol\",\n\n    \"./contracts/Schnorr.sol\",\n    \"./contracts/Deployer.sol\",\n    \"./contracts/Sandbox.sol\",\n    \"./contracts/Router.sol\",\n\n    \"./src/tests/contracts/Schnorr.sol\",\n    \"./src/tests/contracts/ERC20.sol\",\n\n    \"--no-color\",\n  ];\n  let solc = Command::new(\"solc\").args(args).output().unwrap();\n  assert!(solc.status.success());\n  for line in String::from_utf8(solc.stderr).unwrap().lines() {\n    assert!(!line.starts_with(\"Error:\"));\n  }\n}\n"
  },
  {
    "path": "networks/ethereum/contracts/Deployer.sol",
    "content": "// SPDX-License-Identifier: AGPLv3\npragma solidity ^0.8.0;\n\n/*\nThe expected deployment process of the Router is as follows:\n\n1) A transaction deploying Deployer is made. Then, a deterministic signature is\n   created such that an account with an unknown private key is the creator of\n   the contract. Anyone can fund this address, and once anyone does, the\n   transaction deploying Deployer can be published by anyone. No other\n   transaction may be made from that account.\n\n2) Anyone deploys the Router through the Deployer. This uses a sequential nonce\n   such that meet-in-the-middle attacks, with complexity 2**80, aren't feasible.\n   While such attacks would still be feasible if the Deployer's address was\n   controllable, the usage of a deterministic signature with a NUMS method\n   prevents that.\n\nThis doesn't have any denial-of-service risks and will resolve once anyone steps\nforward as deployer. This does fail to guarantee an identical address across\nevery chain, though it enables letting anyone efficiently ask the Deployer for\nthe address (with the Deployer having an identical address on every chain).\n\nUnfortunately, guaranteeing identical addresses aren't feasible. We'd need the\nDeployer contract to use a consistent salt for the Router, yet the Router must\nbe deployed with a specific public key for Serai. Since Ethereum isn't able to\ndetermine a valid public key (one the result of a Serai DKG) from a dishonest\npublic key, we have to allow multiple deployments with Serai being the one to\ndetermine which to use.\n\nThe alternative would be to have a council publish the Serai key on-Ethereum,\nwith Serai verifying the published result. This would introduce a DoS risk in\nthe council not publishing the correct key/not publishing any key.\n*/\n\ncontract Deployer {\n  event Deployment(bytes32 indexed init_code_hash, address created);\n\n  error DeploymentFailed();\n\n  function deploy(bytes memory init_code) external {\n    address created;\n    assembly {\n      created := create(0, add(init_code, 0x20), mload(init_code))\n    }\n    if (created == address(0)) {\n      revert DeploymentFailed();\n    }\n    // These may be emitted out of order upon re-entrancy\n    emit Deployment(keccak256(init_code), created);\n  }\n}\n"
  },
  {
    "path": "networks/ethereum/contracts/IERC20.sol",
    "content": "// SPDX-License-Identifier: CC0\npragma solidity ^0.8.0;\n\ninterface IERC20 {\n  event Transfer(address indexed from, address indexed to, uint256 value);\n  event Approval(address indexed owner, address indexed spender, uint256 value);\n\n  function name() external view returns (string memory);\n  function symbol() external view returns (string memory);\n  function decimals() external view returns (uint8);\n\n  function totalSupply() external view returns (uint256);\n\n  function balanceOf(address owner) external view returns (uint256);\n  function transfer(address to, uint256 value) external returns (bool);\n  function transferFrom(address from, address to, uint256 value) external returns (bool);\n\n  function approve(address spender, uint256 value) external returns (bool);\n  function allowance(address owner, address spender) external view returns (uint256);\n}\n"
  },
  {
    "path": "networks/ethereum/contracts/Router.sol",
    "content": "// SPDX-License-Identifier: AGPLv3\npragma solidity ^0.8.0;\n\nimport \"./IERC20.sol\";\n\nimport \"./Schnorr.sol\";\nimport \"./Sandbox.sol\";\n\ncontract Router {\n  // Nonce is incremented for each batch of transactions executed/key update\n  uint256 public nonce;\n\n  // Current public key's x-coordinate\n  // This key must always have the parity defined within the Schnorr contract\n  bytes32 public seraiKey;\n\n  struct OutInstruction {\n    address to;\n    Call[] calls;\n\n    uint256 value;\n  }\n\n  struct Signature {\n    bytes32 c;\n    bytes32 s;\n  }\n\n  event SeraiKeyUpdated(\n    uint256 indexed nonce,\n    bytes32 indexed key,\n    Signature signature\n  );\n  event InInstruction(\n    address indexed from,\n    address indexed coin,\n    uint256 amount,\n    bytes instruction\n  );\n  // success is a uint256 representing a bitfield of transaction successes\n  event Executed(\n    uint256 indexed nonce,\n    bytes32 indexed batch,\n    uint256 success,\n    Signature signature\n  );\n\n  // error types\n  error InvalidKey();\n  error InvalidSignature();\n  error InvalidAmount();\n  error FailedTransfer();\n  error TooManyTransactions();\n\n  modifier _updateSeraiKeyAtEndOfFn(\n    uint256 _nonce,\n    bytes32 key,\n    Signature memory sig\n  ) {\n    if (\n      (key == bytes32(0)) ||\n      ((bytes32(uint256(key) % Schnorr.Q)) != key)\n    ) {\n      revert InvalidKey();\n    }\n\n    _;\n\n    seraiKey = key;\n    emit SeraiKeyUpdated(_nonce, key, sig);\n  }\n\n  constructor(bytes32 _seraiKey) _updateSeraiKeyAtEndOfFn(\n    0,\n    _seraiKey,\n    Signature({ c: bytes32(0), s: bytes32(0) })\n  ) {\n    nonce = 1;\n  }\n\n  // updateSeraiKey validates the given Schnorr signature against the current\n  // public key, and if successful, updates the contract's public key to the\n  // given one.\n  function updateSeraiKey(\n    bytes32 _seraiKey,\n    Signature calldata sig\n  ) external _updateSeraiKeyAtEndOfFn(nonce, _seraiKey, sig) {\n    bytes memory message =\n      abi.encodePacked(\"updateSeraiKey\", block.chainid, nonce, _seraiKey);\n    nonce++;\n\n    if (!Schnorr.verify(seraiKey, message, sig.c, sig.s)) {\n      revert InvalidSignature();\n    }\n  }\n\n  function inInstruction(\n    address coin,\n    uint256 amount,\n    bytes memory instruction\n  ) external payable {\n    if (coin == address(0)) {\n      if (amount != msg.value) {\n        revert InvalidAmount();\n      }\n    } else {\n      (bool success, bytes memory res) =\n        address(coin).call(\n          abi.encodeWithSelector(\n            IERC20.transferFrom.selector,\n            msg.sender,\n            address(this),\n            amount\n          )\n        );\n\n      // Require there was nothing returned, which is done by some non-standard\n      // tokens, or that the ERC20 contract did in fact return true\n      bool nonStandardResOrTrue =\n        (res.length == 0) || abi.decode(res, (bool));\n      if (!(success && nonStandardResOrTrue)) {\n        revert FailedTransfer();\n      }\n    }\n\n    /*\n    Due to fee-on-transfer tokens, emitting the amount directly is frowned upon.\n    The amount instructed to transfer may not actually be the amount\n    transferred.\n\n    If we add nonReentrant to every single function which can effect the\n    balance, we can check the amount exactly matches. This prevents transfers of\n    less value than expected occurring, at least, not without an additional\n    transfer to top up the difference (which isn't routed through this contract\n    and accordingly isn't trying to artificially create events).\n\n    If we don't add nonReentrant, a transfer can be started, and then a new\n    transfer for the difference can follow it up (again and again until a\n    rounding error is reached). This contract would believe all transfers were\n    done in full, despite each only being done in part (except for the last\n    one).\n\n    Given fee-on-transfer tokens aren't intended to be supported, the only\n    token planned to be supported is Dai and it doesn't have any fee-on-transfer\n    logic, fee-on-transfer tokens aren't even able to be supported at this time,\n    we simply classify this entire class of tokens as non-standard\n    implementations which induce undefined behavior. It is the Serai network's\n    role not to add support for any non-standard implementations.\n    */\n    emit InInstruction(msg.sender, coin, amount, instruction);\n  }\n\n  // execute accepts a list of transactions to execute as well as a signature.\n  // if signature verification passes, the given transactions are executed.\n  // if signature verification fails, this function will revert.\n  function execute(\n    OutInstruction[] calldata transactions,\n    Signature calldata sig\n  ) external {\n    if (transactions.length > 256) {\n      revert TooManyTransactions();\n    }\n\n    bytes memory message =\n      abi.encode(\"execute\", block.chainid, nonce, transactions);\n    uint256 executed_with_nonce = nonce;\n    // This prevents re-entrancy from causing double spends yet does allow\n    // out-of-order execution via re-entrancy\n    nonce++;\n\n    if (!Schnorr.verify(seraiKey, message, sig.c, sig.s)) {\n      revert InvalidSignature();\n    }\n\n    uint256 successes;\n    for (uint256 i = 0; i < transactions.length; i++) {\n      bool success;\n\n      // If there are no calls, send to `to` the value\n      if (transactions[i].calls.length == 0) {\n        (success, ) = transactions[i].to.call{\n          value: transactions[i].value,\n          gas: 5_000\n        }(\"\");\n      } else {\n        // If there are calls, ignore `to`. Deploy a new Sandbox and proxy the\n        // calls through that\n        //\n        // We could use a single sandbox in order to reduce gas costs, yet that\n        // risks one person creating an approval that's hooked before another\n        // user's intended action executes, in order to drain their coins\n        //\n        // While technically, that would be a flaw in the sandboxed flow, this\n        // is robust and prevents such flaws from being possible\n        //\n        // We also don't want people to set state via the Sandbox and expect it\n        // future available when anyone else could set a distinct value\n        Sandbox sandbox = new Sandbox();\n        (success, ) = address(sandbox).call{\n          value: transactions[i].value,\n          // TODO: Have the Call specify the gas up front\n          gas: 350_000\n        }(\n          abi.encodeWithSelector(\n            Sandbox.sandbox.selector,\n            transactions[i].calls\n          )\n        );\n      }\n\n      assembly {\n        successes := or(successes, shl(i, success))\n      }\n    }\n    emit Executed(\n      executed_with_nonce,\n      keccak256(message),\n      successes,\n      sig\n    );\n  }\n}\n"
  },
  {
    "path": "networks/ethereum/contracts/Sandbox.sol",
    "content": "// SPDX-License-Identifier: AGPLv3\npragma solidity ^0.8.24;\n\nstruct Call {\n  address to;\n  uint256 value;\n  bytes data;\n}\n\n// A minimal sandbox focused on gas efficiency.\n//\n// The first call is executed if any of the calls fail, making it a fallback.\n// All other calls are executed sequentially.\ncontract Sandbox {\n  error AlreadyCalled();\n  error CallsFailed();\n\n  function sandbox(Call[] calldata calls) external payable {\n    // Prevent re-entrancy due to this executing arbitrary calls from anyone\n    // and anywhere\n    bool called;\n    assembly { called := tload(0) }\n    if (called) {\n      revert AlreadyCalled();\n    }\n    assembly { tstore(0, 1) }\n\n    // Execute the calls, starting from 1\n    for (uint256 i = 1; i < calls.length; i++) {\n      (bool success, ) =\n        calls[i].to.call{ value: calls[i].value }(calls[i].data);\n\n      // If this call failed, execute the fallback (call 0)\n      if (!success) {\n        (success, ) =\n          calls[0].to.call{ value: address(this).balance }(calls[0].data);\n        // If this call also failed, revert entirely\n        if (!success) {\n          revert CallsFailed();\n        }\n        return;\n      }\n    }\n\n    // We don't clear the re-entrancy guard as this contract should never be\n    // called again, so there's no reason to spend the effort\n  }\n}\n"
  },
  {
    "path": "networks/ethereum/contracts/Schnorr.sol",
    "content": "// SPDX-License-Identifier: AGPLv3\npragma solidity ^0.8.0;\n\n// see https://github.com/noot/schnorr-verify for implementation details\nlibrary Schnorr {\n  // secp256k1 group order\n  uint256 constant public Q =\n    0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141;\n\n  // Fixed parity for the public keys used in this contract\n  // This avoids spending a word passing the parity in a similar style to\n  // Bitcoin's Taproot\n  uint8 constant public KEY_PARITY = 27;\n\n  error InvalidSOrA();\n  error MalformedSignature();\n\n  // px := public key x-coord, where the public key has a parity of KEY_PARITY\n  // message := 32-byte hash of the message\n  // c := schnorr signature challenge\n  // s := schnorr signature\n  function verify(\n    bytes32 px,\n    bytes memory message,\n    bytes32 c,\n    bytes32 s\n  ) internal pure returns (bool) {\n    // ecrecover = (m, v, r, s) -> key\n    // We instead pass the following to obtain the nonce (not the key)\n    // Then we hash it and verify it matches the challenge\n    bytes32 sa = bytes32(Q - mulmod(uint256(s), uint256(px), Q));\n    bytes32 ca = bytes32(Q - mulmod(uint256(c), uint256(px), Q));\n\n    // For safety, we want each input to ecrecover to be 0 (sa, px, ca)\n    // The ecreover precomple checks `r` and `s` (`px` and `ca`) are non-zero\n    // That leaves us to check `sa` are non-zero\n    if (sa == 0) revert InvalidSOrA();\n    address R = ecrecover(sa, KEY_PARITY, px, ca);\n    if (R == address(0)) revert MalformedSignature();\n\n    // Check the signature is correct by rebuilding the challenge\n    return c == keccak256(abi.encodePacked(R, px, message));\n  }\n}\n"
  },
  {
    "path": "networks/ethereum/relayer/Cargo.toml",
    "content": "[package]\nname = \"serai-ethereum-relayer\"\nversion = \"0.1.0\"\ndescription = \"A relayer for Serai's Ethereum transactions\"\nlicense = \"AGPL-3.0-only\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/networks/ethereum/relayer\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nkeywords = []\nedition = \"2021\"\npublish = false\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nlog = { version = \"0.4\", default-features = false, features = [\"std\"] }\nenv_logger = { version = \"0.10\", default-features = false, features = [\"humantime\"] }\n\ntokio = { version = \"1\", default-features = false, features = [\"rt\", \"time\", \"io-util\", \"net\", \"macros\"] }\n\nserai-env = { path = \"../../../common/env\" }\nserai-db = { path = \"../../../common/db\" }\n\n[features]\nparity-db = [\"serai-db/parity-db\"]\nrocksdb = [\"serai-db/rocksdb\"]\n"
  },
  {
    "path": "networks/ethereum/relayer/LICENSE",
    "content": "AGPL-3.0-only license\n\nCopyright (c) 2023-2024 Luke Parker\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License Version 3 as\npublished by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n"
  },
  {
    "path": "networks/ethereum/relayer/README.md",
    "content": "# Ethereum Transaction Relayer\n\nThis server collects Ethereum router commands to be published, offering an RPC\nto fetch them.\n"
  },
  {
    "path": "networks/ethereum/relayer/src/main.rs",
    "content": "pub(crate) use tokio::{\n  io::{AsyncReadExt, AsyncWriteExt},\n  net::TcpListener,\n};\n\nuse serai_db::{Get, DbTxn, Db as DbTrait};\n\n#[tokio::main(flavor = \"current_thread\")]\nasync fn main() {\n  // Override the panic handler with one which will panic if any tokio task panics\n  {\n    let existing = std::panic::take_hook();\n    std::panic::set_hook(Box::new(move |panic| {\n      existing(panic);\n      const MSG: &str = \"exiting the process due to a task panicking\";\n      println!(\"{MSG}\");\n      log::error!(\"{MSG}\");\n      std::process::exit(1);\n    }));\n  }\n\n  if std::env::var(\"RUST_LOG\").is_err() {\n    std::env::set_var(\"RUST_LOG\", serai_env::var(\"RUST_LOG\").unwrap_or_else(|| \"info\".to_string()));\n  }\n  env_logger::init();\n\n  log::info!(\"Starting Ethereum relayer server...\");\n\n  // Open the DB\n  #[allow(unused_variables, unreachable_code)]\n  let db = {\n    #[cfg(all(feature = \"parity-db\", feature = \"rocksdb\"))]\n    panic!(\"built with parity-db and rocksdb\");\n    #[cfg(all(feature = \"parity-db\", not(feature = \"rocksdb\")))]\n    let db =\n      serai_db::new_parity_db(&serai_env::var(\"DB_PATH\").expect(\"path to DB wasn't specified\"));\n    #[cfg(feature = \"rocksdb\")]\n    let db =\n      serai_db::new_rocksdb(&serai_env::var(\"DB_PATH\").expect(\"path to DB wasn't specified\"));\n    db\n  };\n\n  // Start command recipience server\n  // This should not be publicly exposed\n  // TODO: Add auth\n  tokio::spawn({\n    let db = db.clone();\n    async move {\n      // 5132 ^ ((b'E' << 8) | b'R')\n      let server = TcpListener::bind(\"0.0.0.0:20830\").await.unwrap();\n      loop {\n        let (mut socket, _) = server.accept().await.unwrap();\n        let db = db.clone();\n        tokio::spawn(async move {\n          let mut db = db.clone();\n          while let Ok(msg_len) = socket.read_u32_le().await {\n            let mut buf = vec![0; usize::try_from(msg_len).unwrap()];\n            let Ok(_) = socket.read_exact(&mut buf).await else { break };\n\n            if buf.len() < 5 {\n              break;\n            }\n            let nonce = u32::from_le_bytes(buf[.. 4].try_into().unwrap());\n            let mut txn = db.txn();\n            txn.put(nonce.to_le_bytes(), &buf[4 ..]);\n            txn.commit();\n\n            let Ok(()) = socket.write_all(&[1]).await else { break };\n\n            log::info!(\"received signed command #{nonce}\");\n          }\n        });\n      }\n    }\n  });\n\n  // Start command fetch server\n  // 5132 ^ ((b'E' << 8) | b'R') + 1\n  let server = TcpListener::bind(\"0.0.0.0:20831\").await.unwrap();\n  loop {\n    let (mut socket, _) = server.accept().await.unwrap();\n    let db = db.clone();\n    tokio::spawn(async move {\n      let db = db.clone();\n      loop {\n        // Nonce to get the router comamnd for\n        let mut buf = vec![0; 4];\n        let Ok(_) = socket.read_exact(&mut buf).await else { break };\n\n        let command = db.get(&buf[.. 4]).unwrap_or(vec![]);\n        let Ok(()) = socket.write_all(&u32::try_from(command.len()).unwrap().to_le_bytes()).await\n        else {\n          break;\n        };\n        let Ok(()) = socket.write_all(&command).await else { break };\n      }\n    });\n  }\n}\n"
  },
  {
    "path": "networks/ethereum/src/abi/mod.rs",
    "content": "use alloy_sol_types::sol;\n\n#[rustfmt::skip]\n#[allow(warnings)]\n#[allow(needless_pass_by_value)]\n#[allow(clippy::all)]\n#[allow(clippy::ignored_unit_patterns)]\n#[allow(clippy::redundant_closure_for_method_calls)]\nmod erc20_container {\n  use super::*;\n  sol!(\"contracts/IERC20.sol\");\n}\npub use erc20_container::IERC20 as erc20;\n\n#[rustfmt::skip]\n#[allow(warnings)]\n#[allow(needless_pass_by_value)]\n#[allow(clippy::all)]\n#[allow(clippy::ignored_unit_patterns)]\n#[allow(clippy::redundant_closure_for_method_calls)]\nmod deployer_container {\n  use super::*;\n  sol!(\"contracts/Deployer.sol\");\n}\npub use deployer_container::Deployer as deployer;\n\n#[rustfmt::skip]\n#[allow(warnings)]\n#[allow(needless_pass_by_value)]\n#[allow(clippy::all)]\n#[allow(clippy::ignored_unit_patterns)]\n#[allow(clippy::redundant_closure_for_method_calls)]\nmod router_container {\n  use super::*;\n  sol!(Router, \"artifacts/Router.abi\");\n}\npub use router_container::Router as router;\n"
  },
  {
    "path": "networks/ethereum/src/crypto.rs",
    "content": "#![allow(deprecated)]\n\nuse group::ff::PrimeField;\nuse k256::{\n  elliptic_curve::{ops::Reduce, point::AffineCoordinates, sec1::ToEncodedPoint},\n  ProjectivePoint, Scalar, U256 as KU256,\n};\n#[cfg(test)]\nuse k256::{elliptic_curve::point::DecompressPoint, AffinePoint};\n\nuse frost::{\n  algorithm::{Hram, SchnorrSignature},\n  curve::{Ciphersuite, Secp256k1},\n};\n\nuse alloy_core::primitives::{Parity, Signature as AlloySignature};\nuse alloy_consensus::{SignableTransaction, Signed, TxLegacy};\n\nuse crate::abi::router::{Signature as AbiSignature};\n\npub(crate) fn keccak256(data: &[u8]) -> [u8; 32] {\n  alloy_core::primitives::keccak256(data).into()\n}\n\npub(crate) fn hash_to_scalar(data: &[u8]) -> Scalar {\n  <Scalar as Reduce<KU256>>::reduce_bytes(&keccak256(data).into())\n}\n\npub fn address(point: &ProjectivePoint) -> [u8; 20] {\n  let encoded_point = point.to_encoded_point(false);\n  // Last 20 bytes of the hash of the concatenated x and y coordinates\n  // We obtain the concatenated x and y coordinates via the uncompressed encoding of the point\n  keccak256(&encoded_point.as_ref()[1 .. 65])[12 ..].try_into().unwrap()\n}\n\n/// Deterministically sign a transaction.\n///\n/// This function panics if passed a transaction with a non-None chain ID.\npub fn deterministically_sign(tx: &TxLegacy) -> Signed<TxLegacy> {\n  assert!(\n    tx.chain_id.is_none(),\n    \"chain ID was Some when deterministically signing a TX (causing a non-deterministic signer)\"\n  );\n\n  let sig_hash = tx.signature_hash().0;\n  let mut r = hash_to_scalar(&[sig_hash.as_slice(), b\"r\"].concat());\n  let mut s = hash_to_scalar(&[sig_hash.as_slice(), b\"s\"].concat());\n  loop {\n    let r_bytes: [u8; 32] = r.to_repr().into();\n    let s_bytes: [u8; 32] = s.to_repr().into();\n    let v = Parity::NonEip155(false);\n    let signature =\n      AlloySignature::from_scalars_and_parity(r_bytes.into(), s_bytes.into(), v).unwrap();\n    let tx = tx.clone().into_signed(signature);\n    if tx.recover_signer().is_ok() {\n      return tx;\n    }\n\n    // Re-hash until valid\n    r = hash_to_scalar(r_bytes.as_ref());\n    s = hash_to_scalar(s_bytes.as_ref());\n  }\n}\n\n/// The public key for a Schnorr-signing account.\n#[allow(non_snake_case)]\n#[derive(Clone, Copy, PartialEq, Eq, Debug)]\npub struct PublicKey {\n  pub(crate) A: ProjectivePoint,\n  pub(crate) px: Scalar,\n}\n\nimpl PublicKey {\n  /// Construct a new `PublicKey`.\n  ///\n  /// This will return None if the provided point isn't eligible to be a public key (due to\n  /// bounds such as parity).\n  #[allow(non_snake_case)]\n  pub fn new(A: ProjectivePoint) -> Option<PublicKey> {\n    let affine = A.to_affine();\n    // Only allow even keys to save a word within Ethereum\n    let is_odd = bool::from(affine.y_is_odd());\n    if is_odd {\n      None?;\n    }\n\n    let x_coord = affine.x();\n    let x_coord_scalar = <Scalar as Reduce<KU256>>::reduce_bytes(&x_coord);\n    // Return None if a reduction would occur\n    // Reductions would be incredibly unlikely and shouldn't be an issue, yet it's one less\n    // headache/concern to have\n    // This does ban a trivial amoount of public keys\n    if x_coord_scalar.to_repr() != x_coord {\n      None?;\n    }\n\n    Some(PublicKey { A, px: x_coord_scalar })\n  }\n\n  pub fn point(&self) -> ProjectivePoint {\n    self.A\n  }\n\n  pub(crate) fn eth_repr(&self) -> [u8; 32] {\n    self.px.to_repr().into()\n  }\n\n  #[cfg(test)]\n  pub(crate) fn from_eth_repr(repr: [u8; 32]) -> Option<Self> {\n    #[allow(non_snake_case)]\n    let A = Option::<AffinePoint>::from(AffinePoint::decompress(&repr.into(), 0.into()))?.into();\n    Option::from(Scalar::from_repr(repr.into())).map(|px| PublicKey { A, px })\n  }\n}\n\n/// The HRAm to use for the Schnorr contract.\n#[derive(Clone, Default)]\npub struct EthereumHram {}\nimpl Hram<Secp256k1> for EthereumHram {\n  #[allow(non_snake_case)]\n  fn hram(R: &ProjectivePoint, A: &ProjectivePoint, m: &[u8]) -> Scalar {\n    let x_coord = A.to_affine().x();\n\n    let mut data = address(R).to_vec();\n    data.extend(x_coord.as_slice());\n    data.extend(m);\n\n    <Scalar as Reduce<KU256>>::reduce_bytes(&keccak256(&data).into())\n  }\n}\n\n/// A signature for the Schnorr contract.\n#[derive(Clone, Copy, PartialEq, Eq, Debug)]\npub struct Signature {\n  pub(crate) c: Scalar,\n  pub(crate) s: Scalar,\n}\nimpl Signature {\n  pub fn verify(&self, public_key: &PublicKey, message: &[u8]) -> bool {\n    #[allow(non_snake_case)]\n    let R = (Secp256k1::generator() * self.s) - (public_key.A * self.c);\n    EthereumHram::hram(&R, &public_key.A, message) == self.c\n  }\n\n  /// Construct a new `Signature`.\n  ///\n  /// This will return None if the signature is invalid.\n  pub fn new(\n    public_key: &PublicKey,\n    message: &[u8],\n    signature: SchnorrSignature<Secp256k1>,\n  ) -> Option<Signature> {\n    let c = EthereumHram::hram(&signature.R, &public_key.A, message);\n    if !signature.verify(public_key.A, c) {\n      None?;\n    }\n\n    let res = Signature { c, s: signature.s };\n    assert!(res.verify(public_key, message));\n    Some(res)\n  }\n\n  pub fn c(&self) -> Scalar {\n    self.c\n  }\n  pub fn s(&self) -> Scalar {\n    self.s\n  }\n\n  pub fn to_bytes(&self) -> [u8; 64] {\n    let mut res = [0; 64];\n    res[.. 32].copy_from_slice(self.c.to_repr().as_ref());\n    res[32 ..].copy_from_slice(self.s.to_repr().as_ref());\n    res\n  }\n\n  pub fn from_bytes(bytes: [u8; 64]) -> std::io::Result<Self> {\n    let mut reader = bytes.as_slice();\n    let c = Secp256k1::read_F(&mut reader)?;\n    let s = Secp256k1::read_F(&mut reader)?;\n    Ok(Signature { c, s })\n  }\n}\nimpl From<&Signature> for AbiSignature {\n  fn from(sig: &Signature) -> AbiSignature {\n    let c: [u8; 32] = sig.c.to_repr().into();\n    let s: [u8; 32] = sig.s.to_repr().into();\n    AbiSignature { c: c.into(), s: s.into() }\n  }\n}\n"
  },
  {
    "path": "networks/ethereum/src/deployer.rs",
    "content": "use std::sync::Arc;\n\nuse alloy_core::primitives::{hex::FromHex, Address, B256, U256, Bytes, TxKind};\nuse alloy_consensus::{Signed, TxLegacy};\n\nuse alloy_sol_types::{SolCall, SolEvent};\n\nuse alloy_rpc_types_eth::{BlockNumberOrTag, Filter};\nuse alloy_simple_request_transport::SimpleRequest;\nuse alloy_provider::{Provider, RootProvider};\n\nuse crate::{\n  Error,\n  crypto::{self, keccak256, PublicKey},\n  router::Router,\n};\npub use crate::abi::deployer as abi;\n\n/// The Deployer contract for the Router contract.\n///\n/// This Deployer has a deterministic address, letting it be immediately identified on any\n/// compatible chain. It then supports retrieving the Router contract's address (which isn't\n/// deterministic) using a single log query.\n#[derive(Clone, Debug)]\npub struct Deployer;\nimpl Deployer {\n  /// Obtain the transaction to deploy this contract, already signed.\n  ///\n  /// The account this transaction is sent from (which is populated in `from`) must be sufficiently\n  /// funded for this transaction to be submitted. This account has no known private key to anyone,\n  /// so ETH sent can be neither misappropriated nor returned.\n  pub fn deployment_tx() -> Signed<TxLegacy> {\n    let bytecode = include_str!(\"../artifacts/Deployer.bin\");\n    let bytecode =\n      Bytes::from_hex(bytecode).expect(\"compiled-in Deployer bytecode wasn't valid hex\");\n\n    let tx = TxLegacy {\n      chain_id: None,\n      nonce: 0,\n      gas_price: 100_000_000_000u128,\n      // TODO: Use a more accurate gas limit\n      gas_limit: 1_000_000,\n      to: TxKind::Create,\n      value: U256::ZERO,\n      input: bytecode,\n    };\n\n    crypto::deterministically_sign(&tx)\n  }\n\n  /// Obtain the deterministic address for this contract.\n  pub fn address() -> [u8; 20] {\n    let deployer_deployer =\n      Self::deployment_tx().recover_signer().expect(\"deployment_tx didn't have a valid signature\");\n    **Address::create(&deployer_deployer, 0)\n  }\n\n  /// Construct a new view of the `Deployer`.\n  pub async fn new(provider: Arc<RootProvider<SimpleRequest>>) -> Result<Option<Self>, Error> {\n    let address = Self::address();\n    let code = provider.get_code_at(address.into()).await.map_err(|_| Error::ConnectionError)?;\n    // Contract has yet to be deployed\n    if code.is_empty() {\n      return Ok(None);\n    }\n    Ok(Some(Self))\n  }\n\n  /// Yield the `ContractCall` necessary to deploy the Router.\n  pub fn deploy_router(&self, key: &PublicKey) -> TxLegacy {\n    TxLegacy {\n      to: TxKind::Call(Self::address().into()),\n      input: abi::deployCall::new((Router::init_code(key).into(),)).abi_encode().into(),\n      gas_limit: 1_000_000,\n      ..Default::default()\n    }\n  }\n\n  /// Find the first Router deployed with the specified key as its first key.\n  ///\n  /// This is the Router Serai will use, and is the only way to construct a `Router`.\n  pub async fn find_router(\n    &self,\n    provider: Arc<RootProvider<SimpleRequest>>,\n    key: &PublicKey,\n  ) -> Result<Option<Router>, Error> {\n    let init_code = Router::init_code(key);\n    let init_code_hash = keccak256(&init_code);\n\n    #[cfg(not(test))]\n    let to_block = BlockNumberOrTag::Finalized;\n    #[cfg(test)]\n    let to_block = BlockNumberOrTag::Latest;\n\n    // Find the first log using this init code (where the init code is binding to the key)\n    // TODO: Make an abstraction for event filtering (de-duplicating common code)\n    let filter =\n      Filter::new().from_block(0).to_block(to_block).address(Address::from(Self::address()));\n    let filter = filter.event_signature(abi::Deployment::SIGNATURE_HASH);\n    let filter = filter.topic1(B256::from(init_code_hash));\n    let logs = provider.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;\n\n    let Some(first_log) = logs.first() else { return Ok(None) };\n    let router = first_log\n      .log_decode::<abi::Deployment>()\n      .map_err(|_| Error::ConnectionError)?\n      .inner\n      .data\n      .created;\n\n    Ok(Some(Router::new(provider, router)))\n  }\n}\n"
  },
  {
    "path": "networks/ethereum/src/erc20.rs",
    "content": "use std::{sync::Arc, collections::HashSet};\n\nuse alloy_core::primitives::{Address, B256, U256};\n\nuse alloy_sol_types::{SolInterface, SolEvent};\n\nuse alloy_rpc_types_eth::Filter;\nuse alloy_simple_request_transport::SimpleRequest;\nuse alloy_provider::{Provider, RootProvider};\n\nuse crate::Error;\npub use crate::abi::erc20 as abi;\nuse abi::{IERC20Calls, Transfer, transferCall, transferFromCall};\n\n#[derive(Clone, Debug)]\npub struct TopLevelErc20Transfer {\n  pub id: [u8; 32],\n  pub from: [u8; 20],\n  pub amount: U256,\n  pub data: Vec<u8>,\n}\n\n/// A view for an ERC20 contract.\n#[derive(Clone, Debug)]\npub struct Erc20(Arc<RootProvider<SimpleRequest>>, Address);\nimpl Erc20 {\n  /// Construct a new view of the specified ERC20 contract.\n  pub fn new(provider: Arc<RootProvider<SimpleRequest>>, address: [u8; 20]) -> Self {\n    Self(provider, Address::from(&address))\n  }\n\n  pub async fn top_level_transfers(\n    &self,\n    block: u64,\n    to: [u8; 20],\n  ) -> Result<Vec<TopLevelErc20Transfer>, Error> {\n    let filter = Filter::new().from_block(block).to_block(block).address(self.1);\n    let filter = filter.event_signature(Transfer::SIGNATURE_HASH);\n    let mut to_topic = [0; 32];\n    to_topic[12 ..].copy_from_slice(&to);\n    let filter = filter.topic2(B256::from(to_topic));\n    let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;\n\n    let mut handled = HashSet::new();\n\n    let mut top_level_transfers = vec![];\n    for log in logs {\n      // Double check the address which emitted this log\n      if log.address() != self.1 {\n        Err(Error::ConnectionError)?;\n      }\n\n      let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?;\n      let tx =\n        self.0.get_transaction_by_hash(tx_id).await.ok().flatten().ok_or(Error::ConnectionError)?;\n\n      // If this is a top-level call...\n      if tx.to == Some(self.1) {\n        // And we recognize the call...\n        // Don't validate the encoding as this can't be re-encoded to an identical bytestring due\n        // to the InInstruction appended\n        if let Ok(call) = IERC20Calls::abi_decode(&tx.input, false) {\n          // Extract the top-level call's from/to/value\n          let (from, call_to, value) = match call {\n            IERC20Calls::transfer(transferCall { to: call_to, value }) => (tx.from, call_to, value),\n            IERC20Calls::transferFrom(transferFromCall { from, to: call_to, value }) => {\n              (from, call_to, value)\n            }\n            // Treat any other function selectors as unrecognized\n            _ => continue,\n          };\n\n          let log = log.log_decode::<Transfer>().map_err(|_| Error::ConnectionError)?.inner.data;\n\n          // Ensure the top-level transfer is equivalent, and this presumably isn't a log for an\n          // internal transfer\n          if (log.from != from) || (call_to != to) || (value != log.value) {\n            continue;\n          }\n\n          // Now that the top-level transfer is confirmed to be equivalent to the log, ensure it's\n          // the only log we handle\n          if handled.contains(&tx_id) {\n            continue;\n          }\n          handled.insert(tx_id);\n\n          // Read the data appended after\n          let encoded = call.abi_encode();\n          let data = tx.input.as_ref()[encoded.len() ..].to_vec();\n\n          // Push the transfer\n          top_level_transfers.push(TopLevelErc20Transfer {\n            // Since we'll only handle one log for this TX, set the ID to the TX ID\n            id: *tx_id,\n            from: *log.from.0,\n            amount: log.value,\n            data,\n          });\n        }\n      }\n    }\n    Ok(top_level_transfers)\n  }\n}\n"
  },
  {
    "path": "networks/ethereum/src/lib.rs",
    "content": "use thiserror::Error;\n\npub mod alloy {\n  pub use alloy_core::primitives;\n  pub use alloy_core as core;\n  pub use alloy_sol_types as sol_types;\n\n  pub use alloy_consensus as consensus;\n  pub use alloy_network as network;\n  pub use alloy_rpc_types_eth as rpc_types;\n  pub use alloy_simple_request_transport as simple_request_transport;\n  pub use alloy_rpc_client as rpc_client;\n  pub use alloy_provider as provider;\n}\n\npub mod crypto;\n\npub(crate) mod abi;\n\npub mod erc20;\npub mod deployer;\npub mod router;\n\npub mod machine;\n\n#[cfg(any(test, feature = \"tests\"))]\npub mod tests;\n\n#[derive(Clone, Copy, PartialEq, Eq, Debug, Error)]\npub enum Error {\n  #[error(\"failed to verify Schnorr signature\")]\n  InvalidSignature,\n  #[error(\"couldn't make call/send TX\")]\n  ConnectionError,\n}\n"
  },
  {
    "path": "networks/ethereum/src/machine.rs",
    "content": "use std::{\n  io::{self, Read},\n  collections::HashMap,\n};\n\nuse rand_core::{RngCore, CryptoRng};\n\nuse transcript::{Transcript, RecommendedTranscript};\n\nuse group::GroupEncoding;\nuse frost::{\n  curve::{Ciphersuite, Secp256k1},\n  Participant, ThresholdKeys, FrostError,\n  algorithm::Schnorr,\n  sign::*,\n};\n\nuse alloy_core::primitives::U256;\n\nuse crate::{\n  crypto::{PublicKey, EthereumHram, Signature},\n  router::{\n    abi::{Call as AbiCall, OutInstruction as AbiOutInstruction},\n    Router,\n  },\n};\n\n#[derive(Clone, PartialEq, Eq, Debug)]\npub struct Call {\n  pub to: [u8; 20],\n  pub value: U256,\n  pub data: Vec<u8>,\n}\nimpl Call {\n  pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {\n    let mut to = [0; 20];\n    reader.read_exact(&mut to)?;\n\n    let value = {\n      let mut value_bytes = [0; 32];\n      reader.read_exact(&mut value_bytes)?;\n      U256::from_le_slice(&value_bytes)\n    };\n\n    let mut data_len = {\n      let mut data_len = [0; 4];\n      reader.read_exact(&mut data_len)?;\n      usize::try_from(u32::from_le_bytes(data_len)).expect(\"u32 couldn't fit within a usize\")\n    };\n\n    // A valid DoS would be to claim a 4 GB data is present for only 4 bytes\n    // We read this in 1 KB chunks to only read data actually present (with a max DoS of 1 KB)\n    let mut data = vec![];\n    while data_len > 0 {\n      let chunk_len = data_len.min(1024);\n      let mut chunk = vec![0; chunk_len];\n      reader.read_exact(&mut chunk)?;\n      data.extend(&chunk);\n      data_len -= chunk_len;\n    }\n\n    Ok(Call { to, value, data })\n  }\n\n  fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {\n    writer.write_all(&self.to)?;\n    writer.write_all(&self.value.as_le_bytes())?;\n\n    let data_len = u32::try_from(self.data.len())\n      .map_err(|_| io::Error::other(\"call data length exceeded 2**32\"))?;\n    writer.write_all(&data_len.to_le_bytes())?;\n    writer.write_all(&self.data)\n  }\n}\nimpl From<Call> for AbiCall {\n  fn from(call: Call) -> AbiCall {\n    AbiCall { to: call.to.into(), value: call.value, data: call.data.into() }\n  }\n}\n\n#[derive(Clone, PartialEq, Eq, Debug)]\npub enum OutInstructionTarget {\n  Direct([u8; 20]),\n  Calls(Vec<Call>),\n}\nimpl OutInstructionTarget {\n  fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {\n    let mut kind = [0xff];\n    reader.read_exact(&mut kind)?;\n\n    match kind[0] {\n      0 => {\n        let mut addr = [0; 20];\n        reader.read_exact(&mut addr)?;\n        Ok(OutInstructionTarget::Direct(addr))\n      }\n      1 => {\n        let mut calls_len = [0; 4];\n        reader.read_exact(&mut calls_len)?;\n        let calls_len = u32::from_le_bytes(calls_len);\n\n        let mut calls = vec![];\n        for _ in 0 .. calls_len {\n          calls.push(Call::read(reader)?);\n        }\n        Ok(OutInstructionTarget::Calls(calls))\n      }\n      _ => Err(io::Error::other(\"unrecognized OutInstructionTarget\"))?,\n    }\n  }\n\n  fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {\n    match self {\n      OutInstructionTarget::Direct(addr) => {\n        writer.write_all(&[0])?;\n        writer.write_all(addr)?;\n      }\n      OutInstructionTarget::Calls(calls) => {\n        writer.write_all(&[1])?;\n        let call_len = u32::try_from(calls.len())\n          .map_err(|_| io::Error::other(\"amount of calls exceeded 2**32\"))?;\n        writer.write_all(&call_len.to_le_bytes())?;\n        for call in calls {\n          call.write(writer)?;\n        }\n      }\n    }\n    Ok(())\n  }\n}\n\n#[derive(Clone, PartialEq, Eq, Debug)]\npub struct OutInstruction {\n  pub target: OutInstructionTarget,\n  pub value: U256,\n}\nimpl OutInstruction {\n  fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {\n    let target = OutInstructionTarget::read(reader)?;\n\n    let value = {\n      let mut value_bytes = [0; 32];\n      reader.read_exact(&mut value_bytes)?;\n      U256::from_le_slice(&value_bytes)\n    };\n\n    Ok(OutInstruction { target, value })\n  }\n  fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {\n    self.target.write(writer)?;\n    writer.write_all(&self.value.as_le_bytes())\n  }\n}\nimpl From<OutInstruction> for AbiOutInstruction {\n  fn from(instruction: OutInstruction) -> AbiOutInstruction {\n    match instruction.target {\n      OutInstructionTarget::Direct(addr) => {\n        AbiOutInstruction { to: addr.into(), calls: vec![], value: instruction.value }\n      }\n      OutInstructionTarget::Calls(calls) => AbiOutInstruction {\n        to: [0; 20].into(),\n        calls: calls.into_iter().map(Into::into).collect(),\n        value: instruction.value,\n      },\n    }\n  }\n}\n\n#[derive(Clone, PartialEq, Eq, Debug)]\npub enum RouterCommand {\n  UpdateSeraiKey { chain_id: U256, nonce: U256, key: PublicKey },\n  Execute { chain_id: U256, nonce: U256, outs: Vec<OutInstruction> },\n}\n\nimpl RouterCommand {\n  pub fn msg(&self) -> Vec<u8> {\n    match self {\n      RouterCommand::UpdateSeraiKey { chain_id, nonce, key } => {\n        Router::update_serai_key_message(*chain_id, *nonce, key)\n      }\n      RouterCommand::Execute { chain_id, nonce, outs } => Router::execute_message(\n        *chain_id,\n        *nonce,\n        outs.iter().map(|out| out.clone().into()).collect(),\n      ),\n    }\n  }\n\n  pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {\n    let mut kind = [0xff];\n    reader.read_exact(&mut kind)?;\n\n    match kind[0] {\n      0 => {\n        let mut chain_id = [0; 32];\n        reader.read_exact(&mut chain_id)?;\n\n        let mut nonce = [0; 32];\n        reader.read_exact(&mut nonce)?;\n\n        let key = PublicKey::new(Secp256k1::read_G(reader)?)\n          .ok_or(io::Error::other(\"key for RouterCommand doesn't have an eth representation\"))?;\n        Ok(RouterCommand::UpdateSeraiKey {\n          chain_id: U256::from_le_slice(&chain_id),\n          nonce: U256::from_le_slice(&nonce),\n          key,\n        })\n      }\n      1 => {\n        let mut chain_id = [0; 32];\n        reader.read_exact(&mut chain_id)?;\n        let chain_id = U256::from_le_slice(&chain_id);\n\n        let mut nonce = [0; 32];\n        reader.read_exact(&mut nonce)?;\n        let nonce = U256::from_le_slice(&nonce);\n\n        let mut outs_len = [0; 4];\n        reader.read_exact(&mut outs_len)?;\n        let outs_len = u32::from_le_bytes(outs_len);\n\n        let mut outs = vec![];\n        for _ in 0 .. outs_len {\n          outs.push(OutInstruction::read(reader)?);\n        }\n\n        Ok(RouterCommand::Execute { chain_id, nonce, outs })\n      }\n      _ => Err(io::Error::other(\"reading unknown type of RouterCommand\"))?,\n    }\n  }\n\n  pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {\n    match self {\n      RouterCommand::UpdateSeraiKey { chain_id, nonce, key } => {\n        writer.write_all(&[0])?;\n        writer.write_all(&chain_id.as_le_bytes())?;\n        writer.write_all(&nonce.as_le_bytes())?;\n        writer.write_all(&key.A.to_bytes())\n      }\n      RouterCommand::Execute { chain_id, nonce, outs } => {\n        writer.write_all(&[1])?;\n        writer.write_all(&chain_id.as_le_bytes())?;\n        writer.write_all(&nonce.as_le_bytes())?;\n        writer.write_all(&u32::try_from(outs.len()).unwrap().to_le_bytes())?;\n        for out in outs {\n          out.write(writer)?;\n        }\n        Ok(())\n      }\n    }\n  }\n\n  pub fn serialize(&self) -> Vec<u8> {\n    let mut res = vec![];\n    self.write(&mut res).unwrap();\n    res\n  }\n}\n\n#[derive(Clone, PartialEq, Eq, Debug)]\npub struct SignedRouterCommand {\n  command: RouterCommand,\n  signature: Signature,\n}\n\nimpl SignedRouterCommand {\n  pub fn new(key: &PublicKey, command: RouterCommand, signature: &[u8; 64]) -> Option<Self> {\n    let c = Secp256k1::read_F(&mut &signature[.. 32]).ok()?;\n    let s = Secp256k1::read_F(&mut &signature[32 ..]).ok()?;\n    let signature = Signature { c, s };\n\n    if !signature.verify(key, &command.msg()) {\n      None?\n    }\n    Some(SignedRouterCommand { command, signature })\n  }\n\n  pub fn command(&self) -> &RouterCommand {\n    &self.command\n  }\n\n  pub fn signature(&self) -> &Signature {\n    &self.signature\n  }\n\n  pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {\n    let command = RouterCommand::read(reader)?;\n\n    let mut sig = [0; 64];\n    reader.read_exact(&mut sig)?;\n    let signature = Signature::from_bytes(sig)?;\n\n    Ok(SignedRouterCommand { command, signature })\n  }\n\n  pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {\n    self.command.write(writer)?;\n    writer.write_all(&self.signature.to_bytes())\n  }\n}\n\npub struct RouterCommandMachine {\n  key: PublicKey,\n  command: RouterCommand,\n  machine: AlgorithmMachine<Secp256k1, Schnorr<Secp256k1, RecommendedTranscript, EthereumHram>>,\n}\n\nimpl RouterCommandMachine {\n  pub fn new(keys: ThresholdKeys<Secp256k1>, command: RouterCommand) -> Option<Self> {\n    // The Schnorr algorithm should be fine without this, even when using the IETF variant\n    // If this is better and more comprehensive, we should do it, even if not necessary\n    let mut transcript = RecommendedTranscript::new(b\"ethereum-serai RouterCommandMachine v0.1\");\n    let key = keys.group_key();\n    transcript.append_message(b\"key\", key.to_bytes());\n    transcript.append_message(b\"command\", command.serialize());\n\n    Some(Self {\n      key: PublicKey::new(key)?,\n      command,\n      machine: AlgorithmMachine::new(Schnorr::new(transcript), keys),\n    })\n  }\n}\n\nimpl PreprocessMachine for RouterCommandMachine {\n  type Preprocess = Preprocess<Secp256k1, ()>;\n  type Signature = SignedRouterCommand;\n  type SignMachine = RouterCommandSignMachine;\n\n  fn preprocess<R: RngCore + CryptoRng>(\n    self,\n    rng: &mut R,\n  ) -> (Self::SignMachine, Self::Preprocess) {\n    let (machine, preprocess) = self.machine.preprocess(rng);\n\n    (RouterCommandSignMachine { key: self.key, command: self.command, machine }, preprocess)\n  }\n}\n\npub struct RouterCommandSignMachine {\n  key: PublicKey,\n  command: RouterCommand,\n  machine: AlgorithmSignMachine<Secp256k1, Schnorr<Secp256k1, RecommendedTranscript, EthereumHram>>,\n}\n\nimpl SignMachine<SignedRouterCommand> for RouterCommandSignMachine {\n  type Params = ();\n  type Keys = ThresholdKeys<Secp256k1>;\n  type Preprocess = Preprocess<Secp256k1, ()>;\n  type SignatureShare = SignatureShare<Secp256k1>;\n  type SignatureMachine = RouterCommandSignatureMachine;\n\n  fn cache(self) -> CachedPreprocess {\n    unimplemented!(\n      \"RouterCommand machines don't support caching their preprocesses due to {}\",\n      \"being already bound to a specific command\"\n    );\n  }\n\n  fn from_cache(\n    (): (),\n    _: ThresholdKeys<Secp256k1>,\n    _: CachedPreprocess,\n  ) -> (Self, Self::Preprocess) {\n    unimplemented!(\n      \"RouterCommand machines don't support caching their preprocesses due to {}\",\n      \"being already bound to a specific command\"\n    );\n  }\n\n  fn read_preprocess<R: Read>(&self, reader: &mut R) -> io::Result<Self::Preprocess> {\n    self.machine.read_preprocess(reader)\n  }\n\n  fn sign(\n    self,\n    commitments: HashMap<Participant, Self::Preprocess>,\n    msg: &[u8],\n  ) -> Result<(RouterCommandSignatureMachine, Self::SignatureShare), FrostError> {\n    if !msg.is_empty() {\n      panic!(\"message was passed to a RouterCommand machine when it generates its own\");\n    }\n\n    let (machine, share) = self.machine.sign(commitments, &self.command.msg())?;\n\n    Ok((RouterCommandSignatureMachine { key: self.key, command: self.command, machine }, share))\n  }\n}\n\npub struct RouterCommandSignatureMachine {\n  key: PublicKey,\n  command: RouterCommand,\n  machine:\n    AlgorithmSignatureMachine<Secp256k1, Schnorr<Secp256k1, RecommendedTranscript, EthereumHram>>,\n}\n\nimpl SignatureMachine<SignedRouterCommand> for RouterCommandSignatureMachine {\n  type SignatureShare = SignatureShare<Secp256k1>;\n\n  fn read_share<R: Read>(&self, reader: &mut R) -> io::Result<Self::SignatureShare> {\n    self.machine.read_share(reader)\n  }\n\n  fn complete(\n    self,\n    shares: HashMap<Participant, Self::SignatureShare>,\n  ) -> Result<SignedRouterCommand, FrostError> {\n    let sig = self.machine.complete(shares)?;\n    let signature = Signature::new(&self.key, &self.command.msg(), sig)\n      .expect(\"machine produced an invalid signature\");\n    Ok(SignedRouterCommand { command: self.command, signature })\n  }\n}\n"
  },
  {
    "path": "networks/ethereum/src/router.rs",
    "content": "use std::{sync::Arc, io, collections::HashSet};\n\nuse k256::{\n  elliptic_curve::{group::GroupEncoding, sec1},\n  ProjectivePoint,\n};\n\nuse alloy_core::primitives::{hex::FromHex, Address, U256, Bytes, TxKind};\n#[cfg(test)]\nuse alloy_core::primitives::B256;\nuse alloy_consensus::TxLegacy;\n\nuse alloy_sol_types::{SolValue, SolConstructor, SolCall, SolEvent};\n\nuse alloy_rpc_types_eth::Filter;\n#[cfg(test)]\nuse alloy_rpc_types_eth::{BlockId, TransactionRequest, TransactionInput};\nuse alloy_simple_request_transport::SimpleRequest;\nuse alloy_provider::{Provider, RootProvider};\n\npub use crate::{\n  Error,\n  crypto::{PublicKey, Signature},\n  abi::{erc20::Transfer, router as abi},\n};\nuse abi::{SeraiKeyUpdated, InInstruction as InInstructionEvent, Executed as ExecutedEvent};\n\n#[derive(Clone, PartialEq, Eq, Debug)]\npub enum Coin {\n  Ether,\n  Erc20([u8; 20]),\n}\n\nimpl Coin {\n  pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {\n    let mut kind = [0xff];\n    reader.read_exact(&mut kind)?;\n    Ok(match kind[0] {\n      0 => Coin::Ether,\n      1 => {\n        let mut address = [0; 20];\n        reader.read_exact(&mut address)?;\n        Coin::Erc20(address)\n      }\n      _ => Err(io::Error::other(\"unrecognized Coin type\"))?,\n    })\n  }\n\n  pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {\n    match self {\n      Coin::Ether => writer.write_all(&[0]),\n      Coin::Erc20(token) => {\n        writer.write_all(&[1])?;\n        writer.write_all(token)\n      }\n    }\n  }\n}\n\n#[derive(Clone, PartialEq, Eq, Debug)]\npub struct InInstruction {\n  pub id: ([u8; 32], u64),\n  pub from: [u8; 20],\n  pub coin: Coin,\n  pub amount: U256,\n  pub data: Vec<u8>,\n  pub key_at_end_of_block: ProjectivePoint,\n}\n\nimpl InInstruction {\n  pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {\n    let id = {\n      let mut id_hash = [0; 32];\n      reader.read_exact(&mut id_hash)?;\n      let mut id_pos = [0; 8];\n      reader.read_exact(&mut id_pos)?;\n      let id_pos = u64::from_le_bytes(id_pos);\n      (id_hash, id_pos)\n    };\n\n    let mut from = [0; 20];\n    reader.read_exact(&mut from)?;\n\n    let coin = Coin::read(reader)?;\n    let mut amount = [0; 32];\n    reader.read_exact(&mut amount)?;\n    let amount = U256::from_le_slice(&amount);\n\n    let mut data_len = [0; 4];\n    reader.read_exact(&mut data_len)?;\n    let data_len = usize::try_from(u32::from_le_bytes(data_len))\n      .map_err(|_| io::Error::other(\"InInstruction data exceeded 2**32 in length\"))?;\n    let mut data = vec![0; data_len];\n    reader.read_exact(&mut data)?;\n\n    let mut key_at_end_of_block = <ProjectivePoint as GroupEncoding>::Repr::default();\n    reader.read_exact(&mut key_at_end_of_block)?;\n    let key_at_end_of_block = Option::from(ProjectivePoint::from_bytes(&key_at_end_of_block))\n      .ok_or(io::Error::other(\"InInstruction had key at end of block which wasn't valid\"))?;\n\n    Ok(InInstruction { id, from, coin, amount, data, key_at_end_of_block })\n  }\n\n  pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {\n    writer.write_all(&self.id.0)?;\n    writer.write_all(&self.id.1.to_le_bytes())?;\n\n    writer.write_all(&self.from)?;\n\n    self.coin.write(writer)?;\n    writer.write_all(&self.amount.as_le_bytes())?;\n\n    writer.write_all(\n      &u32::try_from(self.data.len())\n        .map_err(|_| {\n          io::Error::other(\"InInstruction being written had data exceeding 2**32 in length\")\n        })?\n        .to_le_bytes(),\n    )?;\n    writer.write_all(&self.data)?;\n\n    writer.write_all(&self.key_at_end_of_block.to_bytes())\n  }\n}\n\n#[derive(Clone, PartialEq, Eq, Debug)]\npub struct Executed {\n  pub tx_id: [u8; 32],\n  pub nonce: u64,\n  pub signature: [u8; 64],\n}\n\n/// The contract Serai uses to manage its state.\n#[derive(Clone, Debug)]\npub struct Router(Arc<RootProvider<SimpleRequest>>, Address);\nimpl Router {\n  pub(crate) fn code() -> Vec<u8> {\n    let bytecode = include_str!(\"../artifacts/Router.bin\");\n    Bytes::from_hex(bytecode).expect(\"compiled-in Router bytecode wasn't valid hex\").to_vec()\n  }\n\n  pub(crate) fn init_code(key: &PublicKey) -> Vec<u8> {\n    let mut bytecode = Self::code();\n    // Append the constructor arguments\n    bytecode.extend((abi::constructorCall { _seraiKey: key.eth_repr().into() }).abi_encode());\n    bytecode\n  }\n\n  // This isn't pub in order to force users to use `Deployer::find_router`.\n  pub(crate) fn new(provider: Arc<RootProvider<SimpleRequest>>, address: Address) -> Self {\n    Self(provider, address)\n  }\n\n  pub fn address(&self) -> [u8; 20] {\n    **self.1\n  }\n\n  /// Get the key for Serai at the specified block.\n  #[cfg(test)]\n  pub async fn serai_key(&self, at: [u8; 32]) -> Result<PublicKey, Error> {\n    let call = TransactionRequest::default()\n      .to(self.1)\n      .input(TransactionInput::new(abi::seraiKeyCall::new(()).abi_encode().into()));\n    let bytes = self\n      .0\n      .call(&call)\n      .block(BlockId::Hash(B256::from(at).into()))\n      .await\n      .map_err(|_| Error::ConnectionError)?;\n    let res =\n      abi::seraiKeyCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?;\n    PublicKey::from_eth_repr(res._0.0).ok_or(Error::ConnectionError)\n  }\n\n  /// Get the message to be signed in order to update the key for Serai.\n  pub(crate) fn update_serai_key_message(chain_id: U256, nonce: U256, key: &PublicKey) -> Vec<u8> {\n    let mut buffer = b\"updateSeraiKey\".to_vec();\n    buffer.extend(&chain_id.to_be_bytes::<32>());\n    buffer.extend(&nonce.to_be_bytes::<32>());\n    buffer.extend(&key.eth_repr());\n    buffer\n  }\n\n  /// Update the key representing Serai.\n  pub fn update_serai_key(&self, public_key: &PublicKey, sig: &Signature) -> TxLegacy {\n    // TODO: Set a more accurate gas\n    TxLegacy {\n      to: TxKind::Call(self.1),\n      input: abi::updateSeraiKeyCall::new((public_key.eth_repr().into(), sig.into()))\n        .abi_encode()\n        .into(),\n      gas_limit: 100_000,\n      ..Default::default()\n    }\n  }\n\n  /// Get the current nonce for the published batches.\n  #[cfg(test)]\n  pub async fn nonce(&self, at: [u8; 32]) -> Result<U256, Error> {\n    let call = TransactionRequest::default()\n      .to(self.1)\n      .input(TransactionInput::new(abi::nonceCall::new(()).abi_encode().into()));\n    let bytes = self\n      .0\n      .call(&call)\n      .block(BlockId::Hash(B256::from(at).into()))\n      .await\n      .map_err(|_| Error::ConnectionError)?;\n    let res =\n      abi::nonceCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?;\n    Ok(res._0)\n  }\n\n  /// Get the message to be signed in order to update the key for Serai.\n  pub(crate) fn execute_message(\n    chain_id: U256,\n    nonce: U256,\n    outs: Vec<abi::OutInstruction>,\n  ) -> Vec<u8> {\n    (\"execute\".to_string(), chain_id, nonce, outs).abi_encode_params()\n  }\n\n  /// Execute a batch of `OutInstruction`s.\n  pub fn execute(&self, outs: &[abi::OutInstruction], sig: &Signature) -> TxLegacy {\n    TxLegacy {\n      to: TxKind::Call(self.1),\n      input: abi::executeCall::new((outs.to_vec(), sig.into())).abi_encode().into(),\n      // TODO\n      gas_limit: 100_000 + ((200_000 + 10_000) * u64::try_from(outs.len()).unwrap()),\n      ..Default::default()\n    }\n  }\n\n  pub async fn key_at_end_of_block(&self, block: u64) -> Result<Option<ProjectivePoint>, Error> {\n    let filter = Filter::new().from_block(0).to_block(block).address(self.1);\n    let filter = filter.event_signature(SeraiKeyUpdated::SIGNATURE_HASH);\n    let all_keys = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;\n    if all_keys.is_empty() {\n      return Ok(None);\n    };\n\n    let last_key_x_coordinate_log = all_keys.last().ok_or(Error::ConnectionError)?;\n    let last_key_x_coordinate = last_key_x_coordinate_log\n      .log_decode::<SeraiKeyUpdated>()\n      .map_err(|_| Error::ConnectionError)?\n      .inner\n      .data\n      .key;\n\n    let mut compressed_point = <ProjectivePoint as GroupEncoding>::Repr::default();\n    compressed_point[0] = u8::from(sec1::Tag::CompressedEvenY);\n    compressed_point[1 ..].copy_from_slice(last_key_x_coordinate.as_slice());\n\n    let key =\n      Option::from(ProjectivePoint::from_bytes(&compressed_point)).ok_or(Error::ConnectionError)?;\n    Ok(Some(key))\n  }\n\n  pub async fn in_instructions(\n    &self,\n    block: u64,\n    allowed_tokens: &HashSet<[u8; 20]>,\n  ) -> Result<Vec<InInstruction>, Error> {\n    let Some(key_at_end_of_block) = self.key_at_end_of_block(block).await? else {\n      return Ok(vec![]);\n    };\n\n    let filter = Filter::new().from_block(block).to_block(block).address(self.1);\n    let filter = filter.event_signature(InInstructionEvent::SIGNATURE_HASH);\n    let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;\n\n    let mut transfer_check = HashSet::new();\n    let mut in_instructions = vec![];\n    for log in logs {\n      // Double check the address which emitted this log\n      if log.address() != self.1 {\n        Err(Error::ConnectionError)?;\n      }\n\n      let id = (\n        log.block_hash.ok_or(Error::ConnectionError)?.into(),\n        log.log_index.ok_or(Error::ConnectionError)?,\n      );\n\n      let tx_hash = log.transaction_hash.ok_or(Error::ConnectionError)?;\n      let tx = self\n        .0\n        .get_transaction_by_hash(tx_hash)\n        .await\n        .ok()\n        .flatten()\n        .ok_or(Error::ConnectionError)?;\n\n      let log =\n        log.log_decode::<InInstructionEvent>().map_err(|_| Error::ConnectionError)?.inner.data;\n\n      let coin = if log.coin.0 == [0; 20] {\n        Coin::Ether\n      } else {\n        let token = *log.coin.0;\n\n        if !allowed_tokens.contains(&token) {\n          continue;\n        }\n\n        // If this also counts as a top-level transfer via the token, drop it\n        //\n        // Necessary in order to handle a potential edge case with some theoretical token\n        // implementations\n        //\n        // This will either let it be handled by the top-level transfer hook or will drop it\n        // entirely on the side of caution\n        if tx.to == Some(token.into()) {\n          continue;\n        }\n\n        // Get all logs for this TX\n        let receipt = self\n          .0\n          .get_transaction_receipt(tx_hash)\n          .await\n          .map_err(|_| Error::ConnectionError)?\n          .ok_or(Error::ConnectionError)?;\n        let tx_logs = receipt.inner.logs();\n\n        // Find a matching transfer log\n        let mut found_transfer = false;\n        for tx_log in tx_logs {\n          let log_index = tx_log.log_index.ok_or(Error::ConnectionError)?;\n          // Ensure we didn't already use this transfer to check a distinct InInstruction event\n          if transfer_check.contains(&log_index) {\n            continue;\n          }\n\n          // Check if this log is from the token we expected to be transferred\n          if tx_log.address().0 != token {\n            continue;\n          }\n          // Check if this is a transfer log\n          // https://github.com/alloy-rs/core/issues/589\n          if tx_log.topics()[0] != Transfer::SIGNATURE_HASH {\n            continue;\n          }\n          let Ok(transfer) = Transfer::decode_log(&tx_log.inner.clone(), true) else { continue };\n          // Check if this is a transfer to us for the expected amount\n          if (transfer.to == self.1) && (transfer.value == log.amount) {\n            transfer_check.insert(log_index);\n            found_transfer = true;\n            break;\n          }\n        }\n        if !found_transfer {\n          // This shouldn't be a ConnectionError\n          // This is an exploit, a non-conforming ERC20, or an invalid connection\n          // This should halt the process which is sufficient, yet this is sub-optimal\n          // TODO\n          Err(Error::ConnectionError)?;\n        }\n\n        Coin::Erc20(token)\n      };\n\n      in_instructions.push(InInstruction {\n        id,\n        from: *log.from.0,\n        coin,\n        amount: log.amount,\n        data: log.instruction.as_ref().to_vec(),\n        key_at_end_of_block,\n      });\n    }\n\n    Ok(in_instructions)\n  }\n\n  pub async fn executed_commands(&self, block: u64) -> Result<Vec<Executed>, Error> {\n    let mut res = vec![];\n\n    {\n      let filter = Filter::new().from_block(block).to_block(block).address(self.1);\n      let filter = filter.event_signature(SeraiKeyUpdated::SIGNATURE_HASH);\n      let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;\n\n      for log in logs {\n        // Double check the address which emitted this log\n        if log.address() != self.1 {\n          Err(Error::ConnectionError)?;\n        }\n\n        let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?.into();\n\n        let log =\n          log.log_decode::<SeraiKeyUpdated>().map_err(|_| Error::ConnectionError)?.inner.data;\n\n        let mut signature = [0; 64];\n        signature[.. 32].copy_from_slice(log.signature.c.as_ref());\n        signature[32 ..].copy_from_slice(log.signature.s.as_ref());\n        res.push(Executed {\n          tx_id,\n          nonce: log.nonce.try_into().map_err(|_| Error::ConnectionError)?,\n          signature,\n        });\n      }\n    }\n\n    {\n      let filter = Filter::new().from_block(block).to_block(block).address(self.1);\n      let filter = filter.event_signature(ExecutedEvent::SIGNATURE_HASH);\n      let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;\n\n      for log in logs {\n        // Double check the address which emitted this log\n        if log.address() != self.1 {\n          Err(Error::ConnectionError)?;\n        }\n\n        let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?.into();\n\n        let log = log.log_decode::<ExecutedEvent>().map_err(|_| Error::ConnectionError)?.inner.data;\n\n        let mut signature = [0; 64];\n        signature[.. 32].copy_from_slice(log.signature.c.as_ref());\n        signature[32 ..].copy_from_slice(log.signature.s.as_ref());\n        res.push(Executed {\n          tx_id,\n          nonce: log.nonce.try_into().map_err(|_| Error::ConnectionError)?,\n          signature,\n        });\n      }\n    }\n\n    Ok(res)\n  }\n\n  #[cfg(feature = \"tests\")]\n  pub fn key_updated_filter(&self) -> Filter {\n    Filter::new().address(self.1).event_signature(SeraiKeyUpdated::SIGNATURE_HASH)\n  }\n  #[cfg(feature = \"tests\")]\n  pub fn executed_filter(&self) -> Filter {\n    Filter::new().address(self.1).event_signature(ExecutedEvent::SIGNATURE_HASH)\n  }\n}\n"
  },
  {
    "path": "networks/ethereum/src/tests/abi/mod.rs",
    "content": "use alloy_sol_types::sol;\n\n#[rustfmt::skip]\n#[allow(warnings)]\n#[allow(needless_pass_by_value)]\n#[allow(clippy::all)]\n#[allow(clippy::ignored_unit_patterns)]\n#[allow(clippy::redundant_closure_for_method_calls)]\nmod schnorr_container {\n  use super::*;\n  sol!(\"src/tests/contracts/Schnorr.sol\");\n}\npub(crate) use schnorr_container::TestSchnorr as schnorr;\n"
  },
  {
    "path": "networks/ethereum/src/tests/contracts/ERC20.sol",
    "content": "// SPDX-License-Identifier: AGPLv3\npragma solidity ^0.8.0;\n\ncontract TestERC20 {\n  event Transfer(address indexed from, address indexed to, uint256 value);\n  event Approval(address indexed owner, address indexed spender, uint256 value);\n\n  function name() public pure returns (string memory) {\n    return \"Test ERC20\";\n  }\n  function symbol() public pure returns (string memory) {\n    return \"TEST\";\n  }\n  function decimals() public pure returns (uint8) {\n    return 18;\n  }\n\n  function totalSupply() public pure returns (uint256) {\n    return 1_000_000 * 10e18;\n  }\n\n  mapping(address => uint256) balances;\n  mapping(address => mapping(address => uint256)) allowances;\n\n  constructor() {\n    balances[msg.sender] = totalSupply();\n  }\n\n  function balanceOf(address owner) public view returns (uint256) {\n    return balances[owner];\n  }\n  function transfer(address to, uint256 value) public returns (bool) {\n    balances[msg.sender] -= value;\n    balances[to] += value;\n    return true;\n  }\n  function transferFrom(address from, address to, uint256 value) public returns (bool) {\n    allowances[from][msg.sender] -= value;\n    balances[from] -= value;\n    balances[to] += value;\n    return true;\n  }\n\n  function approve(address spender, uint256 value) public returns (bool) {\n    allowances[msg.sender][spender] = value;\n    return true;\n  }\n  function allowance(address owner, address spender) public view returns (uint256) {\n    return allowances[owner][spender];\n  }\n}\n"
  },
  {
    "path": "networks/ethereum/src/tests/contracts/Schnorr.sol",
    "content": "// SPDX-License-Identifier: AGPLv3\npragma solidity ^0.8.0;\n\nimport \"../../../contracts/Schnorr.sol\";\n\ncontract TestSchnorr {\n  function verify(\n    bytes32 px,\n    bytes calldata message,\n    bytes32 c,\n    bytes32 s\n  ) external pure returns (bool) {\n    return Schnorr.verify(px, message, c, s);\n  }\n}\n"
  },
  {
    "path": "networks/ethereum/src/tests/crypto.rs",
    "content": "#![allow(deprecated)]\n\nuse rand_core::OsRng;\n\nuse group::ff::{Field, PrimeField};\nuse k256::{\n  ecdsa::{\n    self, hazmat::SignPrimitive, signature::hazmat::PrehashVerifier, SigningKey, VerifyingKey,\n  },\n  Scalar, ProjectivePoint,\n};\n\nuse frost::{\n  curve::{Ciphersuite, Secp256k1},\n  algorithm::{Hram, IetfSchnorr},\n  tests::{algorithm_machines, sign},\n};\n\nuse crate::{crypto::*, tests::key_gen};\n\n// The ecrecover opcode, yet with parity replacing v\npub(crate) fn ecrecover(message: Scalar, odd_y: bool, r: Scalar, s: Scalar) -> Option<[u8; 20]> {\n  let sig = ecdsa::Signature::from_scalars(r, s).ok()?;\n  let message: [u8; 32] = message.to_repr().into();\n  alloy_core::primitives::Signature::from_signature_and_parity(\n    sig,\n    alloy_core::primitives::Parity::Parity(odd_y),\n  )\n  .ok()?\n  .recover_address_from_prehash(&alloy_core::primitives::B256::from(message))\n  .ok()\n  .map(Into::into)\n}\n\n#[test]\nfn test_ecrecover() {\n  let private = SigningKey::random(&mut OsRng);\n  let public = VerifyingKey::from(&private);\n\n  // Sign the signature\n  const MESSAGE: &[u8] = b\"Hello, World!\";\n  let (sig, recovery_id) = private\n    .as_nonzero_scalar()\n    .try_sign_prehashed(\n      <Secp256k1 as Ciphersuite>::F::random(&mut OsRng),\n      &keccak256(MESSAGE).into(),\n    )\n    .unwrap();\n\n  // Sanity check the signature verifies\n  #[allow(clippy::unit_cmp)] // Intended to assert this wasn't changed to Result<bool>\n  {\n    assert_eq!(public.verify_prehash(&keccak256(MESSAGE), &sig).unwrap(), ());\n  }\n\n  // Perform the ecrecover\n  assert_eq!(\n    ecrecover(\n      hash_to_scalar(MESSAGE),\n      u8::from(recovery_id.unwrap().is_y_odd()) == 1,\n      *sig.r(),\n      *sig.s()\n    )\n    .unwrap(),\n    address(&ProjectivePoint::from(public.as_affine()))\n  );\n}\n\n// Run the sign test with the EthereumHram\n#[test]\nfn test_signing() {\n  let (keys, _) = key_gen();\n\n  const MESSAGE: &[u8] = b\"Hello, World!\";\n\n  let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();\n  let _sig =\n    sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE);\n}\n\n#[allow(non_snake_case)]\npub fn preprocess_signature_for_ecrecover(\n  R: ProjectivePoint,\n  public_key: &PublicKey,\n  m: &[u8],\n  s: Scalar,\n) -> (Scalar, Scalar) {\n  let c = EthereumHram::hram(&R, &public_key.A, m);\n  let sa = -(s * public_key.px);\n  let ca = -(c * public_key.px);\n  (sa, ca)\n}\n\n#[test]\nfn test_ecrecover_hack() {\n  let (keys, public_key) = key_gen();\n\n  const MESSAGE: &[u8] = b\"Hello, World!\";\n\n  let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();\n  let sig =\n    sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE);\n\n  let (sa, ca) = preprocess_signature_for_ecrecover(sig.R, &public_key, MESSAGE, sig.s);\n  let q = ecrecover(sa, false, public_key.px, ca).unwrap();\n  assert_eq!(q, address(&sig.R));\n}\n"
  },
  {
    "path": "networks/ethereum/src/tests/mod.rs",
    "content": "#![allow(deprecated)]\n\nuse std::{sync::Arc, collections::HashMap};\n\nuse rand_core::OsRng;\n\nuse k256::{Scalar, ProjectivePoint};\nuse frost::{curve::Secp256k1, Participant, ThresholdKeys, tests::key_gen as frost_key_gen};\n\nuse alloy_core::{\n  primitives::{Address, U256, Bytes, Signature, TxKind},\n  hex::FromHex,\n};\nuse alloy_consensus::{SignableTransaction, TxLegacy};\n\nuse alloy_rpc_types_eth::TransactionReceipt;\nuse alloy_simple_request_transport::SimpleRequest;\nuse alloy_provider::{Provider, RootProvider};\n\nuse crate::crypto::{address, deterministically_sign, PublicKey};\n\n#[cfg(test)]\nmod crypto;\n\n#[cfg(test)]\nmod abi;\n#[cfg(test)]\nmod schnorr;\n#[cfg(test)]\nmod router;\n\npub fn key_gen() -> (HashMap<Participant, ThresholdKeys<Secp256k1>>, PublicKey) {\n  let mut keys = frost_key_gen::<_, Secp256k1>(&mut OsRng);\n  let mut group_key = keys[&Participant::new(1).unwrap()].group_key();\n\n  let mut offset = Scalar::ZERO;\n  while PublicKey::new(group_key).is_none() {\n    offset += Scalar::ONE;\n    group_key += ProjectivePoint::GENERATOR;\n  }\n  for keys in keys.values_mut() {\n    *keys = keys.clone().offset(offset);\n  }\n  let public_key = PublicKey::new(group_key).unwrap();\n\n  (keys, public_key)\n}\n\n// TODO: Use a proper error here\npub async fn send(\n  provider: &RootProvider<SimpleRequest>,\n  wallet: &k256::ecdsa::SigningKey,\n  mut tx: TxLegacy,\n) -> Option<TransactionReceipt> {\n  let verifying_key = *wallet.verifying_key().as_affine();\n  let address = Address::from(address(&verifying_key.into()));\n\n  // https://github.com/alloy-rs/alloy/issues/539\n  // let chain_id = provider.get_chain_id().await.unwrap();\n  // tx.chain_id = Some(chain_id);\n  tx.chain_id = None;\n  tx.nonce = provider.get_transaction_count(address).await.unwrap();\n  // 100 gwei\n  tx.gas_price = 100_000_000_000u128;\n\n  let sig = wallet.sign_prehash_recoverable(tx.signature_hash().as_ref()).unwrap();\n  assert_eq!(address, tx.clone().into_signed(sig.into()).recover_signer().unwrap());\n  assert!(\n    provider.get_balance(address).await.unwrap() >\n      ((U256::from(tx.gas_price) * U256::from(tx.gas_limit)) + tx.value)\n  );\n\n  let mut bytes = vec![];\n  tx.encode_with_signature_fields(&Signature::from(sig), &mut bytes);\n  let pending_tx = provider.send_raw_transaction(&bytes).await.ok()?;\n  pending_tx.get_receipt().await.ok()\n}\n\npub async fn fund_account(\n  provider: &RootProvider<SimpleRequest>,\n  wallet: &k256::ecdsa::SigningKey,\n  to_fund: Address,\n  value: U256,\n) -> Option<()> {\n  let funding_tx =\n    TxLegacy { to: TxKind::Call(to_fund), gas_limit: 21_000, value, ..Default::default() };\n  assert!(send(provider, wallet, funding_tx).await.unwrap().status());\n\n  Some(())\n}\n\n// TODO: Use a proper error here\npub async fn deploy_contract(\n  client: Arc<RootProvider<SimpleRequest>>,\n  wallet: &k256::ecdsa::SigningKey,\n  name: &str,\n) -> Option<Address> {\n  let hex_bin_buf = std::fs::read_to_string(format!(\"./artifacts/{name}.bin\")).unwrap();\n  let hex_bin =\n    if let Some(stripped) = hex_bin_buf.strip_prefix(\"0x\") { stripped } else { &hex_bin_buf };\n  let bin = Bytes::from_hex(hex_bin).unwrap();\n\n  let deployment_tx = TxLegacy {\n    chain_id: None,\n    nonce: 0,\n    // 100 gwei\n    gas_price: 100_000_000_000u128,\n    gas_limit: 1_000_000,\n    to: TxKind::Create,\n    value: U256::ZERO,\n    input: bin,\n  };\n\n  let deployment_tx = deterministically_sign(&deployment_tx);\n\n  // Fund the deployer address\n  fund_account(\n    &client,\n    wallet,\n    deployment_tx.recover_signer().unwrap(),\n    U256::from(deployment_tx.tx().gas_limit) * U256::from(deployment_tx.tx().gas_price),\n  )\n  .await?;\n\n  let (deployment_tx, sig, _) = deployment_tx.into_parts();\n  let mut bytes = vec![];\n  deployment_tx.encode_with_signature_fields(&sig, &mut bytes);\n  let pending_tx = client.send_raw_transaction(&bytes).await.ok()?;\n  let receipt = pending_tx.get_receipt().await.ok()?;\n  assert!(receipt.status());\n\n  Some(receipt.contract_address.unwrap())\n}\n"
  },
  {
    "path": "networks/ethereum/src/tests/router.rs",
    "content": "use std::{convert::TryFrom, sync::Arc, collections::HashMap};\n\nuse rand_core::OsRng;\n\nuse group::Group;\nuse k256::ProjectivePoint;\nuse frost::{\n  curve::Secp256k1,\n  Participant, ThresholdKeys,\n  algorithm::IetfSchnorr,\n  tests::{algorithm_machines, sign},\n};\n\nuse alloy_core::primitives::{Address, U256};\n\nuse alloy_simple_request_transport::SimpleRequest;\nuse alloy_rpc_types_eth::BlockTransactionsKind;\nuse alloy_rpc_client::ClientBuilder;\nuse alloy_provider::{Provider, RootProvider};\n\nuse alloy_node_bindings::{Anvil, AnvilInstance};\n\nuse crate::{\n  crypto::*,\n  deployer::Deployer,\n  router::{Router, abi as router},\n  tests::{key_gen, send, fund_account},\n};\n\nasync fn setup_test() -> (\n  AnvilInstance,\n  Arc<RootProvider<SimpleRequest>>,\n  u64,\n  Router,\n  HashMap<Participant, ThresholdKeys<Secp256k1>>,\n  PublicKey,\n) {\n  let anvil = Anvil::new().spawn();\n\n  let provider = RootProvider::new(\n    ClientBuilder::default().transport(SimpleRequest::new(anvil.endpoint()), true),\n  );\n  let chain_id = provider.get_chain_id().await.unwrap();\n  let wallet = anvil.keys()[0].clone().into();\n  let client = Arc::new(provider);\n\n  // Make sure the Deployer constructor returns None, as it doesn't exist yet\n  assert!(Deployer::new(client.clone()).await.unwrap().is_none());\n\n  // Deploy the Deployer\n  let tx = Deployer::deployment_tx();\n  fund_account(\n    &client,\n    &wallet,\n    tx.recover_signer().unwrap(),\n    U256::from(tx.tx().gas_limit) * U256::from(tx.tx().gas_price),\n  )\n  .await\n  .unwrap();\n\n  let (tx, sig, _) = tx.into_parts();\n  let mut bytes = vec![];\n  tx.encode_with_signature_fields(&sig, &mut bytes);\n\n  let pending_tx = client.send_raw_transaction(&bytes).await.unwrap();\n  let receipt = pending_tx.get_receipt().await.unwrap();\n  assert!(receipt.status());\n  let deployer =\n    Deployer::new(client.clone()).await.expect(\"network error\").expect(\"deployer wasn't deployed\");\n\n  let (keys, public_key) = key_gen();\n\n  // Verify the Router constructor returns None, as it doesn't exist yet\n  assert!(deployer.find_router(client.clone(), &public_key).await.unwrap().is_none());\n\n  // Deploy the router\n  let receipt = send(&client, &anvil.keys()[0].clone().into(), deployer.deploy_router(&public_key))\n    .await\n    .unwrap();\n  assert!(receipt.status());\n  let contract = deployer.find_router(client.clone(), &public_key).await.unwrap().unwrap();\n\n  (anvil, client, chain_id, contract, keys, public_key)\n}\n\nasync fn latest_block_hash(client: &RootProvider<SimpleRequest>) -> [u8; 32] {\n  client\n    .get_block(client.get_block_number().await.unwrap().into(), BlockTransactionsKind::Hashes)\n    .await\n    .unwrap()\n    .unwrap()\n    .header\n    .hash\n    .0\n}\n\n#[tokio::test]\nasync fn test_deploy_contract() {\n  let (_anvil, client, _, router, _, public_key) = setup_test().await;\n\n  let block_hash = latest_block_hash(&client).await;\n  assert_eq!(router.serai_key(block_hash).await.unwrap(), public_key);\n  assert_eq!(router.nonce(block_hash).await.unwrap(), U256::try_from(1u64).unwrap());\n  // TODO: Check it emitted SeraiKeyUpdated(public_key) at its genesis\n}\n\npub fn hash_and_sign(\n  keys: &HashMap<Participant, ThresholdKeys<Secp256k1>>,\n  public_key: &PublicKey,\n  message: &[u8],\n) -> Signature {\n  let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();\n  let sig =\n    sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, keys), message);\n\n  Signature::new(public_key, message, sig).unwrap()\n}\n\n#[tokio::test]\nasync fn test_router_update_serai_key() {\n  let (anvil, client, chain_id, contract, keys, public_key) = setup_test().await;\n\n  let next_key = loop {\n    let point = ProjectivePoint::random(&mut OsRng);\n    let Some(next_key) = PublicKey::new(point) else { continue };\n    break next_key;\n  };\n\n  let message = Router::update_serai_key_message(\n    U256::try_from(chain_id).unwrap(),\n    U256::try_from(1u64).unwrap(),\n    &next_key,\n  );\n  let sig = hash_and_sign(&keys, &public_key, &message);\n\n  let first_block_hash = latest_block_hash(&client).await;\n  assert_eq!(contract.serai_key(first_block_hash).await.unwrap(), public_key);\n\n  let receipt =\n    send(&client, &anvil.keys()[0].clone().into(), contract.update_serai_key(&next_key, &sig))\n      .await\n      .unwrap();\n  assert!(receipt.status());\n\n  let second_block_hash = latest_block_hash(&client).await;\n  assert_eq!(contract.serai_key(second_block_hash).await.unwrap(), next_key);\n  // Check this does still offer the historical state\n  assert_eq!(contract.serai_key(first_block_hash).await.unwrap(), public_key);\n  // TODO: Check logs\n\n  println!(\"gas used: {:?}\", receipt.gas_used);\n  // println!(\"logs: {:?}\", receipt.logs);\n}\n\n#[tokio::test]\nasync fn test_router_execute() {\n  let (anvil, client, chain_id, contract, keys, public_key) = setup_test().await;\n\n  let to = Address::from([0; 20]);\n  let value = U256::ZERO;\n  let tx = router::OutInstruction { to, value, calls: vec![] };\n  let txs = vec![tx];\n\n  let first_block_hash = latest_block_hash(&client).await;\n  let nonce = contract.nonce(first_block_hash).await.unwrap();\n  assert_eq!(nonce, U256::try_from(1u64).unwrap());\n\n  let message = Router::execute_message(U256::try_from(chain_id).unwrap(), nonce, txs.clone());\n  let sig = hash_and_sign(&keys, &public_key, &message);\n\n  let receipt =\n    send(&client, &anvil.keys()[0].clone().into(), contract.execute(&txs, &sig)).await.unwrap();\n  assert!(receipt.status());\n\n  let second_block_hash = latest_block_hash(&client).await;\n  assert_eq!(contract.nonce(second_block_hash).await.unwrap(), U256::try_from(2u64).unwrap());\n  // Check this does still offer the historical state\n  assert_eq!(contract.nonce(first_block_hash).await.unwrap(), U256::try_from(1u64).unwrap());\n  // TODO: Check logs\n\n  println!(\"gas used: {:?}\", receipt.gas_used);\n  // println!(\"logs: {:?}\", receipt.logs);\n}\n"
  },
  {
    "path": "networks/ethereum/src/tests/schnorr.rs",
    "content": "use std::sync::Arc;\n\nuse rand_core::OsRng;\n\nuse group::ff::PrimeField;\nuse k256::Scalar;\n\nuse frost::{\n  curve::Secp256k1,\n  algorithm::IetfSchnorr,\n  tests::{algorithm_machines, sign},\n};\n\nuse alloy_core::primitives::Address;\n\nuse alloy_sol_types::SolCall;\n\nuse alloy_rpc_types_eth::{TransactionInput, TransactionRequest};\nuse alloy_simple_request_transport::SimpleRequest;\nuse alloy_rpc_client::ClientBuilder;\nuse alloy_provider::{Provider, RootProvider};\n\nuse alloy_node_bindings::{Anvil, AnvilInstance};\n\nuse crate::{\n  Error,\n  crypto::*,\n  tests::{key_gen, deploy_contract, abi::schnorr as abi},\n};\n\nasync fn setup_test() -> (AnvilInstance, Arc<RootProvider<SimpleRequest>>, Address) {\n  let anvil = Anvil::new().spawn();\n\n  let provider = RootProvider::new(\n    ClientBuilder::default().transport(SimpleRequest::new(anvil.endpoint()), true),\n  );\n  let wallet = anvil.keys()[0].clone().into();\n  let client = Arc::new(provider);\n\n  let address = deploy_contract(client.clone(), &wallet, \"TestSchnorr\").await.unwrap();\n  (anvil, client, address)\n}\n\n#[tokio::test]\nasync fn test_deploy_contract() {\n  setup_test().await;\n}\n\npub async fn call_verify(\n  provider: &RootProvider<SimpleRequest>,\n  contract: Address,\n  public_key: &PublicKey,\n  message: &[u8],\n  signature: &Signature,\n) -> Result<(), Error> {\n  let px: [u8; 32] = public_key.px.to_repr().into();\n  let c_bytes: [u8; 32] = signature.c.to_repr().into();\n  let s_bytes: [u8; 32] = signature.s.to_repr().into();\n  let call = TransactionRequest::default().to(contract).input(TransactionInput::new(\n    abi::verifyCall::new((px.into(), message.to_vec().into(), c_bytes.into(), s_bytes.into()))\n      .abi_encode()\n      .into(),\n  ));\n  let bytes = provider.call(&call).await.map_err(|_| Error::ConnectionError)?;\n  let res =\n    abi::verifyCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?;\n\n  if res._0 {\n    Ok(())\n  } else {\n    Err(Error::InvalidSignature)\n  }\n}\n\n#[tokio::test]\nasync fn test_ecrecover_hack() {\n  let (_anvil, client, contract) = setup_test().await;\n\n  let (keys, public_key) = key_gen();\n\n  const MESSAGE: &[u8] = b\"Hello, World!\";\n\n  let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();\n  let sig =\n    sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE);\n  let sig = Signature::new(&public_key, MESSAGE, sig).unwrap();\n\n  call_verify(&client, contract, &public_key, MESSAGE, &sig).await.unwrap();\n  // Test an invalid signature fails\n  let mut sig = sig;\n  sig.s += Scalar::ONE;\n  assert!(call_verify(&client, contract, &public_key, MESSAGE, &sig).await.is_err());\n}\n"
  },
  {
    "path": "orchestration/Cargo.toml",
    "content": "[package]\nname = \"serai-orchestrator\"\nversion = \"0.0.1\"\ndescription = \"Generates Dockerfiles for Serai\"\nlicense = \"AGPL-3.0-only\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/orchestration/\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nkeywords = []\nedition = \"2021\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nhex = { version = \"0.4\", default-features = false, features = [\"std\"] }\n\nzeroize = { version = \"1\", default-features = false, features = [\"std\"] }\nrand_core = { version = \"0.6\", default-features = false, features = [\"std\", \"getrandom\"] }\nrand_chacha = { version = \"0.3\", default-features = false, features = [\"std\"] }\n\ntranscript = { package = \"flexible-transcript\", path = \"../crypto/transcript\", default-features = false, features = [\"std\", \"recommended\"] }\ndalek-ff-group = { path = \"../crypto/dalek-ff-group\", default-features = false, features = [\"std\"] }\nciphersuite = { path = \"../crypto/ciphersuite\", default-features = false, features = [\"std\"] }\n\nzalloc = { path = \"../common/zalloc\" }\n\nhome = \"0.5\"\n"
  },
  {
    "path": "orchestration/README.md",
    "content": "# Orchestration\n\nThis folder contains the tool which generates various dockerfiles and manage\ndeployments of Serai.\n\nTo start, run:\n\n```sh\ncargo run -p serai-orchestrator\n```\n\nto generate all of the dockerfiles needed for development.\n"
  },
  {
    "path": "orchestration/dev/coordinator/.folder",
    "content": ""
  },
  {
    "path": "orchestration/dev/message-queue/.folder",
    "content": ""
  },
  {
    "path": "orchestration/dev/networks/bitcoin/run.sh",
    "content": "#!/bin/sh\n\nRPC_USER=\"${RPC_USER:=serai}\"\nRPC_PASS=\"${RPC_PASS:=seraidex}\"\n\nbitcoind -txindex -regtest --port=8333 \\\n  -rpcuser=$RPC_USER -rpcpassword=$RPC_PASS \\\n  -rpcbind=0.0.0.0 -rpcallowip=0.0.0.0/0 -rpcport=8332 \\\n  $1\n"
  },
  {
    "path": "orchestration/dev/networks/ethereum/run.sh",
    "content": "#!/bin/sh\n\n~/.foundry/bin/anvil --host 0.0.0.0 --no-cors --no-mining --slots-in-an-epoch 32 --silent\n"
  },
  {
    "path": "orchestration/dev/networks/ethereum-relayer/.folder",
    "content": "#!/bin/sh\n\nRPC_USER=\"${RPC_USER:=serai}\"\nRPC_PASS=\"${RPC_PASS:=seraidex}\"\n\n# Run Monero\nmonerod --non-interactive --regtest --offline --fixed-difficulty=1 \\\n  --no-zmq --rpc-bind-ip=0.0.0.0 --rpc-bind-port=18081 --confirm-external-bind \\\n  --rpc-access-control-origins \"*\" --disable-rpc-ban \\\n  --rpc-login=$RPC_USER:$RPC_PASS \\\n  $1\n"
  },
  {
    "path": "orchestration/dev/networks/monero/hashes-v0.18.3.4.txt",
    "content": "-----BEGIN PGP SIGNED MESSAGE-----\nHash: SHA256\n\n# This GPG-signed message exists to confirm the SHA256 sums of Monero binaries.\n#\n# Please verify the signature against the key for binaryFate in the\n# source code repository (/utils/gpg_keys).\n#\n#\n## CLI\n15e4d7dfc2f9261a0a452b0f8fd157c33cdbc8a896e23d883ddd13e2480a3800  monero-android-armv7-v0.18.3.4.tar.bz2\nd9c9249d1408822ce36b346c6b9fb6b896cda16714d62117fb1c588a5201763c  monero-android-armv8-v0.18.3.4.tar.bz2\n360a551388922c8991a9ba4abaa88676b0fc7ec1fa4d0f4b5c0500847e0b946c  monero-freebsd-x64-v0.18.3.4.tar.bz2\n354603c56446fb0551cdd6933bce5a13590b7881e05979b7ec25d89e7e59a0e2  monero-linux-armv7-v0.18.3.4.tar.bz2\n33ca2f0055529d225b61314c56370e35606b40edad61c91c859f873ed67a1ea7  monero-linux-armv8-v0.18.3.4.tar.bz2\n88739a1521b9fda3154540268e416c7af016ed7857041c76ab8ed7d7674c71ca  monero-linux-riscv64-v0.18.3.4.tar.bz2\n51ba03928d189c1c11b5379cab17dd9ae8d2230056dc05c872d0f8dba4a87f1d  monero-linux-x64-v0.18.3.4.tar.bz2\nd7ca0878abff2919a0104d7ed29d9c35df9ca0ea1b6fb4ebf6c8f7607ffb9e41  monero-linux-x86-v0.18.3.4.tar.bz2\n44520cb3a05c2518ca9aeae1b2e3080fe2bba1e3596d014ceff1090dfcba8ab4  monero-mac-armv8-v0.18.3.4.tar.bz2\n32c449f562216d3d83154e708471236d07db7477d6b67f1936a0a85a5005f2b8  monero-mac-x64-v0.18.3.4.tar.bz2\n54a66db6c892b2a0999754841f4ca68511741b88ea3ab20c7cd504a027f465f5  monero-win-x64-v0.18.3.4.zip\n1a9824742aa1587023c3bddea788c115940cfd49371c78a8dd62c40113132d01  monero-win-x86-v0.18.3.4.zip\n7d4845ec0a3b52404d41785da348ec33509f0a5981e8a27c5fa55b18d696e139  monero-source-v0.18.3.4.tar.bz2\n#\n## GUI\n63349d5a7637cd0c5d1693a1a2e910a92cbb123903d57667077a36454845d7bf  monero-gui-install-win-x64-v0.18.3.4.exe\n2866f3a2be30e4c4113e6274cad1d6698f81c37ceebc6e8f084c57230a0f70a6  monero-gui-linux-x64-v0.18.3.4.tar.bz2\needbf827513607a3ef579077dacd573e65892b199102effef97dff9d73138ca6  monero-gui-mac-armv8-v0.18.3.4.dmg\n54eb151d7511a9f26130864e2c02f258344803b2b68311c8be29850d7faef359  monero-gui-mac-x64-v0.18.3.4.dmg\nb5d42dddd722e728e480337f89038c8ea606c6507bf0c88ddf2af25050c9b751  monero-gui-win-x64-v0.18.3.4.zip\n2f1d643bb2cc08e5eb334a6bfd649b0aa95ceb6178ff2f90448d5ef8d2a752a6  monero-gui-source-v0.18.3.4.tar.bz2\n#\n#\n# ~binaryFate\n-----BEGIN PGP SIGNATURE-----\n\niQIzBAEBCAAdFiEEgaxZH+nEtlxYBq/D8K9NRioL35IFAmbF8bAACgkQ8K9NRioL\n35KQAQ/7BP9j0Tx+zlFs3zbVIFXzfoPbGo2/uerM4xUWX/NUoI7XDTGWV2lpcR1x\no6eqstbuHciY0Aj2MsICsdqD+1PYW0EBZlfNLMrk161c3nQMJcjCE65uIhbLkOSs\n6SUakmpxkueQOE/Ug5Afaa/JBATVTxLTmqSCI7Ai9NplF+6KNauXQXNrlwO/gHcd\nwhYDmsqp2JyOtMpMlpOckzLgg7Oroj7B0LBf78Z13p1naUyPooBaIEXSdKm5g2HI\nvPd+z1bOVIluqPBnYWUwL7EmXy08/broejHGliQ+2iY9IsmDDx6rnSe/oprNEDic\nl+/w3KvPcTkBh8hJLVDyYieYdVYHqOktIPlR1dKV512CnuP1ljr/CXjJmkAkXHlg\nbObMUCIM9UYqp1I+KDaArjYNbzkHK02Lu6sak49GXgEuq66m9t4isF2GdcHrbERs\ncLGsnhkTO2LtnGcziOC2l9XSzL41swxe0GrkK0rdeiyDCGAlb7hllevFy7zlT90l\nJw670TyFVBs8fUFHk/tOtT0ivSDJJg8m9waBzi/46ksOvuid6p3P3a0agqu3uclj\nrscSpk0JS3E/3+A/N0IaiTmUO5zSjbsCrSnxQjcfrRRtERL+6JVHFVlW+nJzYWWH\nu0O7bNZSqEruR4aTEtsddLgs57I10thDR5SUONuAqbEq8EYN8OE=\n=aLFR\n-----END PGP SIGNATURE-----\n"
  },
  {
    "path": "orchestration/dev/networks/monero/run.sh",
    "content": "#!/bin/sh\n\nRPC_USER=\"${RPC_USER:=serai}\"\nRPC_PASS=\"${RPC_PASS:=seraidex}\"\n\n# Run Monero\nmonerod --non-interactive --regtest --offline --fixed-difficulty=1 \\\n  --no-zmq --rpc-bind-ip=0.0.0.0 --rpc-bind-port=18081 --confirm-external-bind \\\n  --rpc-access-control-origins \"*\" --disable-rpc-ban \\\n  --rpc-login=$RPC_USER:$RPC_PASS --log-level 2 \\\n  $1\n"
  },
  {
    "path": "orchestration/dev/networks/monero-wallet-rpc/run.sh",
    "content": "#!/bin/sh\n\nmonero-wallet-rpc \\\n  --allow-mismatched-daemon-version \\\n  --daemon-address serai-dev-monero:18081 --daemon-login serai:seraidex \\\n  --disable-rpc-login --rpc-bind-ip=0.0.0.0 --rpc-bind-port 18082 --confirm-external-bind \\\n  --wallet-dir /home/monero\n"
  },
  {
    "path": "orchestration/dev/processor/bitcoin/.folder",
    "content": ""
  },
  {
    "path": "orchestration/dev/processor/ethereum/.folder",
    "content": ""
  },
  {
    "path": "orchestration/dev/processor/monero/.folder",
    "content": ""
  },
  {
    "path": "orchestration/dev/serai/run.sh",
    "content": "#!/bin/sh\n\nserai-node --unsafe-rpc-external --rpc-cors all --chain local --$SERAI_NAME\n"
  },
  {
    "path": "orchestration/runtime/Dockerfile",
    "content": "# rust:1.89.0-slim-bookworm as of August 1st, 2025 (GMT)\nFROM --platform=linux/amd64 rust@sha256:703cfb0f80db8eb8a3452bf5151162472039c1b37fe4fb2957b495a6f0104ae7 AS deterministic\n\n# Move to a Debian package snapshot\nRUN rm -rf /etc/apt/sources.list.d/debian.sources && \\\n  rm -rf /var/lib/apt/lists/* && \\\n  echo \"deb [arch=amd64] http://snapshot.debian.org/archive/debian/20250801T000000Z bookworm main\" > /etc/apt/sources.list && \\\n  apt update\n\n# Install dependencies\nRUN apt update -y && apt upgrade -y && apt install -y clang\n\n# Add the wasm toolchain\nRUN rustup target add wasm32v1-none\n\nFROM deterministic\n\n# Add files for build\nADD patches /serai/patches\nADD common /serai/common\nADD crypto /serai/crypto\nADD networks /serai/networks\nADD message-queue /serai/message-queue\nADD processor /serai/processor\nADD coordinator /serai/coordinator\nADD substrate /serai/substrate\nADD orchestration/Cargo.toml /serai/orchestration/Cargo.toml\nADD orchestration/src /serai/orchestration/src\nADD mini /serai/mini\nADD tests /serai/tests\nADD Cargo.toml /serai\nADD Cargo.lock /serai\nADD AGPL-3.0 /serai\n\nWORKDIR /serai\n\n# Build the runtime, copying it to the volume if it exists\nCMD cargo build --release -p serai-runtime && \\\n  mkdir -p /volume && \\\n  cp /serai/target/release/wbuild/serai-runtime/serai_runtime.wasm /volume/serai.wasm\n"
  },
  {
    "path": "orchestration/src/coordinator.rs",
    "content": "use std::path::Path;\n\nuse zeroize::Zeroizing;\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::{group::ff::PrimeField, Ciphersuite};\n\nuse crate::{Network, Os, mimalloc, os, build_serai_service, write_dockerfile};\n\n#[allow(clippy::needless_pass_by_value)]\npub fn coordinator(\n  orchestration_path: &Path,\n  network: Network,\n  coordinator_key: Zeroizing<<Ristretto as Ciphersuite>::F>,\n  serai_key: &Zeroizing<<Ristretto as Ciphersuite>::F>,\n) {\n  let db = network.db();\n  let longer_reattempts = if network == Network::Dev { \"longer-reattempts\" } else { \"\" };\n  let setup = mimalloc(Os::Debian).to_string() +\n    &build_serai_service(\n      \"\",\n      network.release(),\n      &format!(\"{db} {longer_reattempts}\"),\n      \"serai-coordinator\",\n    );\n\n  const ADDITIONAL_ROOT: &str = r#\"\n# Install ca-certificates\nRUN apt install -y ca-certificates\n\"#;\n\n  #[rustfmt::skip]\n  const DEFAULT_RUST_LOG: &str = \"info,serai_coordinator=debug,tributary_chain=debug,tendermint=debug,libp2p_gossipsub::behaviour=error\";\n\n  let env_vars = [\n    (\"MESSAGE_QUEUE_RPC\", format!(\"serai-{}-message-queue\", network.label())),\n    (\"MESSAGE_QUEUE_KEY\", hex::encode(coordinator_key.to_repr())),\n    (\"DB_PATH\", \"/volume/coordinator-db\".to_string()),\n    (\"SERAI_KEY\", hex::encode(serai_key.to_repr())),\n    (\"SERAI_HOSTNAME\", format!(\"serai-{}-serai\", network.label())),\n    (\"RUST_LOG\", DEFAULT_RUST_LOG.to_string()),\n  ];\n  let mut env_vars_str = String::new();\n  for (env_var, value) in env_vars {\n    env_vars_str += &format!(r#\"{env_var}=${{{env_var}:=\"{value}\"}} \"#);\n  }\n\n  let run_coordinator = format!(\n    r#\"\n# Copy the Coordinator binary and relevant license\nCOPY --from=builder --chown=coordinator /serai/bin/serai-coordinator /bin/\nCOPY --from=builder --chown=coordinator /serai/AGPL-3.0 .\n\n# Run coordinator\nCMD {env_vars_str} serai-coordinator\n\"#\n  );\n\n  let run = os(Os::Debian, ADDITIONAL_ROOT, \"coordinator\") + &run_coordinator;\n  let res = setup + &run;\n\n  let mut coordinator_path = orchestration_path.to_path_buf();\n  coordinator_path.push(\"coordinator\");\n  coordinator_path.push(\"Dockerfile\");\n\n  write_dockerfile(coordinator_path, &res);\n}\n"
  },
  {
    "path": "orchestration/src/docker.rs",
    "content": "use std::{collections::HashSet, path::Path, env, process::Command};\n\nuse crate::Network;\n\npub fn build(orchestration_path: &Path, network: Network, name: &str) {\n  let mut repo_path = env::current_exe().unwrap();\n  repo_path.pop();\n  if repo_path.as_path().ends_with(\"deps\") {\n    repo_path.pop();\n  }\n  assert!(repo_path.as_path().ends_with(\"debug\") || repo_path.as_path().ends_with(\"release\"));\n  repo_path.pop();\n  assert!(repo_path.as_path().ends_with(\"target\"));\n  repo_path.pop();\n\n  let mut dockerfile_path = orchestration_path.to_path_buf();\n  if HashSet::from([\"bitcoin\", \"ethereum\", \"monero\", \"monero-wallet-rpc\"]).contains(name) {\n    dockerfile_path = dockerfile_path.join(\"networks\");\n  }\n  if name.contains(\"-processor\") {\n    dockerfile_path =\n      dockerfile_path.join(\"processor\").join(name.split('-').next().unwrap()).join(\"Dockerfile\");\n  } else {\n    dockerfile_path = dockerfile_path.join(name).join(\"Dockerfile\");\n  }\n\n  println!(\"Building {}...\", &name);\n\n  if !Command::new(\"docker\")\n    .current_dir(&repo_path)\n    .arg(\"build\")\n    .arg(\"-f\")\n    .arg(dockerfile_path)\n    .arg(\".\")\n    .arg(\"-t\")\n    .arg(format!(\"serai-{}-{name}-img\", network.label()))\n    .spawn()\n    .unwrap()\n    .wait()\n    .unwrap()\n    .success()\n  {\n    panic!(\"failed to build {name}\");\n  }\n\n  println!(\"Built!\");\n}\n"
  },
  {
    "path": "orchestration/src/ethereum_relayer.rs",
    "content": "use std::path::Path;\n\nuse crate::{Network, Os, mimalloc, os, build_serai_service, write_dockerfile};\n\npub fn ethereum_relayer(orchestration_path: &Path, network: Network) {\n  let setup = mimalloc(Os::Debian).to_string() +\n    &build_serai_service(\"\", network.release(), network.db(), \"serai-ethereum-relayer\");\n\n  let env_vars = [\n    (\"DB_PATH\", \"/volume/ethereum-relayer-db\".to_string()),\n    (\"RUST_LOG\", \"info,serai_ethereum_relayer=trace\".to_string()),\n  ];\n  let mut env_vars_str = String::new();\n  for (env_var, value) in env_vars {\n    env_vars_str += &format!(r#\"{env_var}=${{{env_var}:=\"{value}\"}} \"#);\n  }\n\n  let run_ethereum_relayer = format!(\n    r#\"\n# Copy the relayer server binary and relevant license\nCOPY --from=builder --chown=ethereumrelayer /serai/bin/serai-ethereum-relayer /bin\n\n# Run ethereum-relayer\nEXPOSE 20830\nEXPOSE 20831\nCMD {env_vars_str} serai-ethereum-relayer\n\"#\n  );\n\n  let run = os(Os::Debian, \"\", \"ethereumrelayer\") + &run_ethereum_relayer;\n  let res = setup + &run;\n\n  let mut ethereum_relayer_path = orchestration_path.to_path_buf();\n  ethereum_relayer_path.push(\"networks\");\n  ethereum_relayer_path.push(\"ethereum-relayer\");\n  ethereum_relayer_path.push(\"Dockerfile\");\n\n  write_dockerfile(ethereum_relayer_path, &res);\n}\n"
  },
  {
    "path": "orchestration/src/main.rs",
    "content": "// TODO: Generate randomized RPC credentials for all services\n// TODO: Generate keys for a validator and the infra\n\nuse core::ops::Deref;\nuse std::{\n  collections::{HashSet, HashMap},\n  env,\n  path::PathBuf,\n  io::Write,\n  fs,\n  process::{Stdio, Command},\n};\n\nuse zeroize::Zeroizing;\n\nuse rand_core::{RngCore, SeedableRng, OsRng};\nuse rand_chacha::ChaCha20Rng;\n\nuse transcript::{Transcript, RecommendedTranscript};\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::{\n  group::{\n    ff::{Field, PrimeField},\n    GroupEncoding,\n  },\n  Ciphersuite,\n};\n\nmod mimalloc;\nuse mimalloc::mimalloc;\n\nmod networks;\nuse networks::*;\n\nmod ethereum_relayer;\nuse ethereum_relayer::ethereum_relayer;\n\nmod message_queue;\nuse message_queue::message_queue;\n\nmod processor;\nuse processor::processor;\n\nmod coordinator;\nuse coordinator::coordinator;\n\nmod serai;\nuse serai::serai;\n\nmod docker;\n\n#[global_allocator]\nstatic ALLOCATOR: zalloc::ZeroizingAlloc<std::alloc::System> =\n  zalloc::ZeroizingAlloc(std::alloc::System);\n\n#[derive(Clone, Copy, PartialEq, Eq, Debug, PartialOrd, Ord, Hash)]\npub enum Network {\n  Dev,\n  Testnet,\n}\n\nimpl Network {\n  pub fn db(&self) -> &'static str {\n    match self {\n      Network::Dev => \"parity-db\",\n      Network::Testnet => \"rocksdb\",\n    }\n  }\n\n  pub fn release(&self) -> bool {\n    match self {\n      Network::Dev => false,\n      Network::Testnet => true,\n    }\n  }\n\n  pub fn label(&self) -> &'static str {\n    match self {\n      Network::Dev => \"dev\",\n      Network::Testnet => \"testnet\",\n    }\n  }\n}\n\n#[derive(Clone, Copy, PartialEq, Eq, Debug, PartialOrd, Ord, Hash)]\nenum Os {\n  Alpine,\n  Debian,\n}\n\nfn os(os: Os, additional_root: &str, user: &str) -> String {\n  match os {\n    Os::Alpine => format!(\n      r#\"\nFROM alpine:latest AS image\n\nCOPY --from=mimalloc-alpine libmimalloc.so /usr/lib\nENV LD_PRELOAD=libmimalloc.so\n\nRUN apk update && apk upgrade\n\nRUN adduser --system --shell /sbin/nologin --disabled-password {user}\nRUN addgroup {user}\nRUN addgroup {user} {user}\n\n# Make the /volume directory and transfer it to the user\nRUN mkdir /volume && chown {user}:{user} /volume\n\n{additional_root}\n\n# Switch to a non-root user\nUSER {user}\n\nWORKDIR /home/{user}\n\"#\n    ),\n\n    Os::Debian => format!(\n      r#\"\nFROM debian:trixie-slim AS image\n\nCOPY --from=mimalloc-debian libmimalloc.so /usr/lib\nRUN echo \"/usr/lib/libmimalloc.so\" >> /etc/ld.so.preload\n\nRUN apt update && apt upgrade -y && apt autoremove -y && apt clean\n\nRUN useradd --system --user-group --create-home --shell /sbin/nologin {user}\n\n# Make the /volume directory and transfer it to the user\nRUN mkdir /volume && chown {user}:{user} /volume\n\n{additional_root}\n\n# Switch to a non-root user\nUSER {user}\n\nWORKDIR /home/{user}\n\"#\n    ),\n  }\n}\n\nfn build_serai_service(prelude: &str, release: bool, features: &str, package: &str) -> String {\n  let profile = if release { \"release\" } else { \"debug\" };\n  let profile_flag = if release { \"--release\" } else { \"\" };\n\n  format!(\n    r#\"\nFROM rust:1.90-slim-trixie AS builder\n\nCOPY --from=mimalloc-debian libmimalloc.so /usr/lib\nRUN echo \"/usr/lib/libmimalloc.so\" >> /etc/ld.so.preload\n\nRUN apt update && apt upgrade -y && apt autoremove -y && apt clean\n\n# Add dev dependencies\nRUN apt install -y pkg-config libclang-dev clang\n\n# Dependencies for the Serai node\nRUN apt install -y make protobuf-compiler\n\n# Add the wasm toolchain\nRUN rustup target add wasm32v1-none\n\n{prelude}\n\n# Add files for build\nADD patches /serai/patches\nADD common /serai/common\nADD crypto /serai/crypto\nADD networks /serai/networks\nADD message-queue /serai/message-queue\nADD processor /serai/processor\nADD coordinator /serai/coordinator\nADD substrate /serai/substrate\nADD orchestration/Cargo.toml /serai/orchestration/Cargo.toml\nADD orchestration/src /serai/orchestration/src\nADD mini /serai/mini\nADD tests /serai/tests\nADD Cargo.toml /serai\nADD Cargo.lock /serai\nADD AGPL-3.0 /serai\n\nWORKDIR /serai\n\n# Mount the caches and build\nRUN --mount=type=cache,target=/root/.cargo \\\n  --mount=type=cache,target=/usr/local/cargo/registry \\\n  --mount=type=cache,target=/usr/local/cargo/git \\\n  --mount=type=cache,target=/serai/target \\\n  mkdir /serai/bin && \\\n  cargo build {profile_flag} --features \"{features}\" -p {package} && \\\n  mv /serai/target/{profile}/{package} /serai/bin\n\"#\n  )\n}\n\npub fn write_dockerfile(path: PathBuf, dockerfile: &str) {\n  if let Ok(existing) = fs::read_to_string(&path).as_ref() {\n    if existing == dockerfile {\n      return;\n    }\n  }\n  fs::File::create(path).unwrap().write_all(dockerfile.as_bytes()).unwrap();\n}\n\nfn orchestration_path(network: Network) -> PathBuf {\n  let mut repo_path = env::current_exe().unwrap();\n  repo_path.pop();\n  assert!(repo_path.as_path().ends_with(\"debug\"));\n  repo_path.pop();\n  assert!(repo_path.as_path().ends_with(\"target\"));\n  repo_path.pop();\n\n  let mut orchestration_path = repo_path.clone();\n  orchestration_path.push(\"orchestration\");\n  orchestration_path.push(network.label());\n  orchestration_path\n}\n\ntype InfrastructureKeys =\n  HashMap<&'static str, (Zeroizing<<Ristretto as Ciphersuite>::F>, <Ristretto as Ciphersuite>::G)>;\nfn infrastructure_keys(network: Network) -> InfrastructureKeys {\n  // Generate entropy for the infrastructure keys\n\n  let entropy = if network == Network::Dev {\n    // Don't use actual entropy if this is a dev environment\n    Zeroizing::new([0; 32])\n  } else {\n    let path = home::home_dir()\n      .unwrap()\n      .join(\".serai\")\n      .join(network.label())\n      .join(\"infrastructure_keys_entropy\");\n    // Check if there's existing entropy\n    if let Ok(entropy) = fs::read(&path).map(Zeroizing::new) {\n      assert_eq!(entropy.len(), 32, \"entropy saved to disk wasn't 32 bytes\");\n      let mut res = Zeroizing::new([0; 32]);\n      res.copy_from_slice(entropy.as_ref());\n      res\n    } else {\n      // If there isn't, generate fresh entropy\n      let mut res = Zeroizing::new([0; 32]);\n      OsRng.fill_bytes(res.as_mut());\n      fs::write(&path, &res).unwrap();\n      res\n    }\n  };\n\n  let mut transcript =\n    RecommendedTranscript::new(b\"Serai Orchestrator Infrastructure Keys Transcript\");\n  transcript.append_message(b\"network\", network.label().as_bytes());\n  transcript.append_message(b\"entropy\", entropy);\n  let mut rng = ChaCha20Rng::from_seed(transcript.rng_seed(b\"infrastructure_keys\"));\n\n  let mut key_pair = || {\n    let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut rng));\n    let public = Ristretto::generator() * key.deref();\n    (key, public)\n  };\n\n  HashMap::from([\n    (\"coordinator\", key_pair()),\n    (\"bitcoin\", key_pair()),\n    (\"ethereum\", key_pair()),\n    (\"monero\", key_pair()),\n  ])\n}\n\nfn dockerfiles(network: Network) {\n  let orchestration_path = orchestration_path(network);\n\n  bitcoin(&orchestration_path, network);\n  ethereum(&orchestration_path, network);\n  monero(&orchestration_path, network);\n  if network == Network::Dev {\n    monero_wallet_rpc(&orchestration_path);\n  }\n\n  let mut infrastructure_keys = infrastructure_keys(network);\n  let coordinator_key = infrastructure_keys.remove(\"coordinator\").unwrap();\n  let bitcoin_key = infrastructure_keys.remove(\"bitcoin\").unwrap();\n  let ethereum_key = infrastructure_keys.remove(\"ethereum\").unwrap();\n  let monero_key = infrastructure_keys.remove(\"monero\").unwrap();\n\n  ethereum_relayer(&orchestration_path, network);\n\n  message_queue(\n    &orchestration_path,\n    network,\n    coordinator_key.1,\n    bitcoin_key.1,\n    ethereum_key.1,\n    monero_key.1,\n  );\n\n  let new_entropy = || {\n    let mut res = Zeroizing::new([0; 32]);\n    OsRng.fill_bytes(res.as_mut());\n    res\n  };\n  processor(\n    &orchestration_path,\n    network,\n    \"bitcoin\",\n    coordinator_key.1,\n    bitcoin_key.0,\n    new_entropy(),\n  );\n  processor(\n    &orchestration_path,\n    network,\n    \"ethereum\",\n    coordinator_key.1,\n    ethereum_key.0,\n    new_entropy(),\n  );\n  processor(&orchestration_path, network, \"monero\", coordinator_key.1, monero_key.0, new_entropy());\n\n  let serai_key = {\n    let serai_key = Zeroizing::new(\n      fs::read(home::home_dir().unwrap().join(\".serai\").join(network.label()).join(\"key\"))\n        .expect(\"couldn't read key for this network\"),\n    );\n    let mut serai_key_repr =\n      Zeroizing::new(<<Ristretto as Ciphersuite>::F as PrimeField>::Repr::default());\n    serai_key_repr.as_mut().copy_from_slice(serai_key.as_ref());\n    Zeroizing::new(<Ristretto as Ciphersuite>::F::from_repr(*serai_key_repr).unwrap())\n  };\n\n  coordinator(&orchestration_path, network, coordinator_key.0, &serai_key);\n\n  serai(&orchestration_path, network, &serai_key);\n}\n\nfn key_gen(network: Network) {\n  let serai_dir = home::home_dir().unwrap().join(\".serai\").join(network.label());\n  let key_file = serai_dir.join(\"key\");\n  if fs::File::open(&key_file).is_ok() {\n    println!(\"already created key\");\n    return;\n  }\n\n  let key = <Ristretto as Ciphersuite>::F::random(&mut OsRng);\n\n  let _ = fs::create_dir_all(&serai_dir);\n  fs::write(key_file, key.to_repr()).expect(\"couldn't write key\");\n\n  println!(\n    \"Public Key: {}\",\n    hex::encode((<Ristretto as Ciphersuite>::generator() * key).to_bytes())\n  );\n}\n\nfn start(network: Network, services: HashSet<String>) {\n  // Create the serai network\n  Command::new(\"docker\")\n    .arg(\"network\")\n    .arg(\"create\")\n    .arg(\"--driver\")\n    .arg(\"bridge\")\n    .arg(\"serai\")\n    .output()\n    .unwrap();\n\n  for service in services {\n    println!(\"Starting {service}\");\n    let name = match service.as_ref() {\n      \"serai\" => \"serai\",\n      \"coordinator\" => \"coordinator\",\n      \"ethereum-relayer\" => \"ethereum-relayer\",\n      \"message-queue\" => \"message-queue\",\n      \"bitcoin-daemon\" => \"bitcoin\",\n      \"bitcoin-processor\" => \"bitcoin-processor\",\n      \"monero-daemon\" => \"monero\",\n      \"monero-processor\" => \"monero-processor\",\n      \"monero-wallet-rpc\" => \"monero-wallet-rpc\",\n      _ => panic!(\"starting unrecognized service\"),\n    };\n\n    // If we're building the Serai service, first build the runtime\n    let serai_runtime_volume = format!(\"serai-{}-runtime-volume\", network.label());\n    if name == \"serai\" {\n      // Check if it's built by checking if the volume has the expected runtime file\n      let wasm_build_container_name = format!(\"serai-{}-runtime\", network.label());\n      let built = || {\n        if let Ok(state_and_status) = Command::new(\"docker\")\n          .arg(\"inspect\")\n          .arg(\"-f\")\n          .arg(\"{{.State.Status}}:{{.State.ExitCode}}\")\n          .arg(&wasm_build_container_name)\n          .output()\n        {\n          if let Ok(state_and_status) = String::from_utf8(state_and_status.stdout) {\n            return state_and_status.trim() == \"exited:0\";\n          }\n        }\n        false\n      };\n\n      if !built() {\n        let mut repo_path = env::current_exe().unwrap();\n        repo_path.pop();\n        if repo_path.as_path().ends_with(\"deps\") {\n          repo_path.pop();\n        }\n        assert!(repo_path.as_path().ends_with(\"debug\") || repo_path.as_path().ends_with(\"release\"));\n        repo_path.pop();\n        assert!(repo_path.as_path().ends_with(\"target\"));\n        repo_path.pop();\n\n        // Build the image to build the runtime\n        if !Command::new(\"docker\")\n          .current_dir(&repo_path)\n          .arg(\"build\")\n          .arg(\"-f\")\n          .arg(\"orchestration/runtime/Dockerfile\")\n          .arg(\".\")\n          .arg(\"-t\")\n          .arg(format!(\"serai-{}-runtime-img\", network.label()))\n          .spawn()\n          .unwrap()\n          .wait()\n          .unwrap()\n          .success()\n        {\n          panic!(\"failed to build runtime image\");\n        }\n\n        // Run the image, building the runtime\n        println!(\"Building the Serai runtime\");\n        let container_name = format!(\"serai-{}-runtime\", network.label());\n        let _ =\n          Command::new(\"docker\").arg(\"rm\").arg(\"-f\").arg(&container_name).spawn().unwrap().wait();\n        let _ = Command::new(\"docker\")\n          .arg(\"run\")\n          .arg(\"--name\")\n          .arg(container_name)\n          .arg(\"--volume\")\n          .arg(format!(\"{serai_runtime_volume}:/volume\"))\n          .arg(format!(\"serai-{}-runtime-img\", network.label()))\n          .spawn();\n\n        // Wait until its built\n        let mut ticks = 0;\n        while !built() {\n          std::thread::sleep(core::time::Duration::from_secs(60));\n          ticks += 1;\n          if ticks > 6 * 60 {\n            panic!(\"couldn't build the runtime after 6 hours\")\n          }\n        }\n      }\n    }\n\n    // Build it\n    println!(\"Building {service}\");\n    docker::build(&orchestration_path(network), network, name);\n\n    let docker_name = format!(\"serai-{}-{name}\", network.label());\n    let docker_image = format!(\"{docker_name}-img\");\n    if !Command::new(\"docker\")\n      .arg(\"container\")\n      .arg(\"inspect\")\n      .arg(&docker_name)\n      // Use null for all IO to silence 'container does not exist'\n      .stdin(Stdio::null())\n      .stdout(Stdio::null())\n      .stderr(Stdio::null())\n      .status()\n      .unwrap()\n      .success()\n    {\n      // Create the docker container\n      println!(\"Creating new container for {service}\");\n      let volume = format!(\"serai-{}-{name}-volume:/volume\", network.label());\n      let mut command = Command::new(\"docker\");\n      let command = command.arg(\"create\").arg(\"--name\").arg(&docker_name);\n      let command = command.arg(\"--network\").arg(\"serai\");\n      let command = command.arg(\"--restart\").arg(\"always\");\n      let command = command.arg(\"--log-opt\").arg(\"max-size=100m\");\n      let command = command.arg(\"--log-opt\").arg(\"max-file=3\");\n      let command = if network == Network::Dev {\n        command\n      } else {\n        // Assign a persistent volume if this isn't for Dev\n        command.arg(\"--volume\").arg(volume)\n      };\n      let command = match name {\n        \"bitcoin\" => {\n          // Expose the RPC for tests\n          if network == Network::Dev {\n            command.arg(\"-p\").arg(\"8332:8332\")\n          } else {\n            command\n          }\n        }\n        \"ethereum-relayer\" => {\n          // Expose the router command fetch server\n          command.arg(\"-p\").arg(\"20831:20831\")\n        }\n        \"monero\" => {\n          // Expose the RPC for tests\n          if network == Network::Dev {\n            command.arg(\"-p\").arg(\"18081:18081\")\n          } else {\n            command\n          }\n        }\n        \"monero-wallet-rpc\" => {\n          assert_eq!(network, Network::Dev, \"monero-wallet-rpc is only for dev\");\n          // Expose the RPC for tests\n          command.arg(\"-p\").arg(\"18082:18082\")\n        }\n        \"coordinator\" => {\n          if network == Network::Dev {\n            command\n          } else {\n            // Publish the port\n            command.arg(\"-p\").arg(\"30563:30563\")\n          }\n        }\n        \"serai\" => {\n          let command = command.arg(\"--volume\").arg(format!(\"{serai_runtime_volume}:/runtime\"));\n          if network == Network::Dev {\n            command\n          } else {\n            // Publish the port\n            command.arg(\"-p\").arg(\"30333:30333\")\n          }\n        }\n        _ => command,\n      };\n      assert!(\n        command.arg(docker_image).status().unwrap().success(),\n        \"couldn't create the container\"\n      );\n    }\n\n    // Start it\n    // TODO: Check it successfully started\n    println!(\"Starting existing container for {service}\");\n    let _ = Command::new(\"docker\").arg(\"start\").arg(docker_name).output();\n  }\n}\n\nfn main() {\n  let help = || -> ! {\n    println!(\n      r#\"\nSerai Orchestrator v0.0.1\n\nCommands:\n  key_gen *network*\n    Generate a key for the validator.\n\n  setup *network*\n    Generate the Dockerfiles for every Serai service.\n\n  start *network* [service1, service2...]\n    Start the specified services for the specified network (\"dev\" or \"testnet\").\n\n    - `serai`\n    - `coordinator`\n    - `message-queue`\n    - `bitcoin-daemon`\n    - `bitcoin-processor`\n    - `ethereum-daemon`\n    - `ethereum-processor`\n    - `ethereum-relayer`\n    - `monero-daemon`\n    - `monero-processor`\n    - `monero-wallet-rpc` (if \"dev\")\n\n    are valid services.\n\n    `*network*-processor` will automatically start `*network*-daemon`.\n\"#\n    );\n    std::process::exit(1);\n  };\n\n  let mut args = env::args();\n  args.next();\n  let command = args.next();\n  let network = match args.next().as_ref().map(AsRef::as_ref) {\n    Some(\"dev\") => Network::Dev,\n    Some(\"testnet\") => Network::Testnet,\n    Some(_) => panic!(r#\"unrecognized network. only \"dev\" and \"testnet\" are recognized\"#),\n    None => help(),\n  };\n\n  match command.as_ref().map(AsRef::as_ref) {\n    Some(\"key_gen\") => {\n      key_gen(network);\n    }\n    Some(\"setup\") => {\n      dockerfiles(network);\n    }\n    Some(\"start\") => {\n      let mut services = HashSet::new();\n      for arg in args {\n        if arg == \"ethereum-processor\" {\n          services.insert(\"ethereum-relayer\".to_string());\n        }\n        if let Some(ext_network) = arg.strip_suffix(\"-processor\") {\n          services.insert(ext_network.to_string() + \"-daemon\");\n        }\n        services.insert(arg);\n      }\n\n      start(network, services);\n    }\n    _ => help(),\n  }\n}\n"
  },
  {
    "path": "orchestration/src/message_queue.rs",
    "content": "use std::path::Path;\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::{group::GroupEncoding, Ciphersuite};\n\nuse crate::{Network, Os, mimalloc, os, build_serai_service, write_dockerfile};\n\npub fn message_queue(\n  orchestration_path: &Path,\n  network: Network,\n  coordinator_key: <Ristretto as Ciphersuite>::G,\n  bitcoin_key: <Ristretto as Ciphersuite>::G,\n  ethereum_key: <Ristretto as Ciphersuite>::G,\n  monero_key: <Ristretto as Ciphersuite>::G,\n) {\n  let setup = mimalloc(Os::Debian).to_string() +\n    &build_serai_service(\"\", network.release(), network.db(), \"serai-message-queue\");\n\n  let env_vars = [\n    (\"COORDINATOR_KEY\", hex::encode(coordinator_key.to_bytes())),\n    (\"BITCOIN_KEY\", hex::encode(bitcoin_key.to_bytes())),\n    (\"ETHEREUM_KEY\", hex::encode(ethereum_key.to_bytes())),\n    (\"MONERO_KEY\", hex::encode(monero_key.to_bytes())),\n    (\"DB_PATH\", \"/volume/message-queue-db\".to_string()),\n    (\"RUST_LOG\", \"info,serai_message_queue=trace\".to_string()),\n  ];\n  let mut env_vars_str = String::new();\n  for (env_var, value) in env_vars {\n    env_vars_str += &format!(r#\"{env_var}=${{{env_var}:=\"{value}\"}} \"#);\n  }\n\n  let run_message_queue = format!(\n    r#\"\n# Copy the Message Queue binary and relevant license\nCOPY --from=builder --chown=messagequeue /serai/bin/serai-message-queue /bin\nCOPY --from=builder --chown=messagequeue /serai/AGPL-3.0 .\n\n# Run message-queue\nEXPOSE 2287\nCMD {env_vars_str} serai-message-queue\n\"#\n  );\n\n  let run = os(Os::Debian, \"\", \"messagequeue\") + &run_message_queue;\n  let res = setup + &run;\n\n  let mut message_queue_path = orchestration_path.to_path_buf();\n  message_queue_path.push(\"message-queue\");\n  message_queue_path.push(\"Dockerfile\");\n\n  write_dockerfile(message_queue_path, &res);\n}\n"
  },
  {
    "path": "orchestration/src/mimalloc.rs",
    "content": "use crate::Os;\n\npub fn mimalloc(os: Os) -> &'static str {\n  const ALPINE_MIMALLOC: &str = r#\"\nFROM alpine:latest AS mimalloc-alpine\n\nRUN apk update && apk upgrade && apk --no-cache add gcc g++ libc-dev make cmake git\nRUN git clone https://github.com/microsoft/mimalloc && \\\n  cd mimalloc && \\\n  git checkout 43ce4bd7fd34bcc730c1c7471c99995597415488 && \\\n  mkdir -p out/secure && \\\n  cd out/secure && \\\n  cmake -DMI_SECURE=ON ../.. && \\\n  make && \\\n  cp ./libmimalloc-secure.so ../../../libmimalloc.so\n\"#;\n\n  const DEBIAN_MIMALLOC: &str = r#\"\nFROM debian:trixie-slim AS mimalloc-debian\n\nRUN apt update && apt upgrade -y && apt install -y gcc g++ make cmake git\nRUN git clone https://github.com/microsoft/mimalloc && \\\n  cd mimalloc && \\\n  git checkout 43ce4bd7fd34bcc730c1c7471c99995597415488 && \\\n  mkdir -p out/secure && \\\n  cd out/secure && \\\n  cmake -DMI_SECURE=ON ../.. && \\\n  make && \\\n  cp ./libmimalloc-secure.so ../../../libmimalloc.so\n\"#;\n\n  match os {\n    Os::Alpine => ALPINE_MIMALLOC,\n    Os::Debian => DEBIAN_MIMALLOC,\n  }\n}\n"
  },
  {
    "path": "orchestration/src/networks/bitcoin.rs",
    "content": "use std::path::Path;\n\nuse crate::{Network, Os, mimalloc, os, write_dockerfile};\n\npub fn bitcoin(orchestration_path: &Path, network: Network) {\n  #[rustfmt::skip]\n  const DOWNLOAD_BITCOIN: &str = r#\"\nFROM alpine:latest AS bitcoin\n\nENV BITCOIN_VERSION=27.1\n\nRUN apk --no-cache add wget git gnupg\n\n# Download Bitcoin\nRUN wget -4 https://bitcoincore.org/bin/bitcoin-core-${BITCOIN_VERSION}/bitcoin-${BITCOIN_VERSION}-$(uname -m)-linux-gnu.tar.gz\nRUN wget -4 https://bitcoincore.org/bin/bitcoin-core-${BITCOIN_VERSION}/SHA256SUMS\nRUN wget -4 https://bitcoincore.org/bin/bitcoin-core-${BITCOIN_VERSION}/SHA256SUMS.asc\n\n# Verify all sigs and check for a valid signature from laanwj -- 71A3\nRUN git clone https://github.com/bitcoin-core/guix.sigs && \\\n  cd guix.sigs/builder-keys && \\\n  find . -iname '*.gpg' -exec gpg --import {} \\; && \\\n  gpg --verify --status-fd 1 --verify ../../SHA256SUMS.asc ../../SHA256SUMS | grep \"^\\[GNUPG:\\] VALIDSIG.*71A3B16735405025D447E8F274810B012346C9A6\"\n\nRUN grep bitcoin-${BITCOIN_VERSION}-$(uname -m)-linux-gnu.tar.gz SHA256SUMS | sha256sum -c\n\n# Prepare Image\nRUN tar xzvf bitcoin-${BITCOIN_VERSION}-$(uname -m)-linux-gnu.tar.gz\nRUN mv bitcoin-${BITCOIN_VERSION}/bin/bitcoind .\n\"#;\n\n  let setup = mimalloc(Os::Debian).to_string() + DOWNLOAD_BITCOIN;\n\n  let run_bitcoin = format!(\n    r#\"\nCOPY --from=bitcoin --chown=bitcoin bitcoind /bin\n\nEXPOSE 8332 8333\n\nADD /orchestration/{}/networks/bitcoin/run.sh /\nCMD [\"/run.sh\"]\n\"#,\n    network.label()\n  );\n\n  let run = os(Os::Debian, \"\", \"bitcoin\") + &run_bitcoin;\n  let res = setup + &run;\n\n  let mut bitcoin_path = orchestration_path.to_path_buf();\n  bitcoin_path.push(\"networks\");\n  bitcoin_path.push(\"bitcoin\");\n  bitcoin_path.push(\"Dockerfile\");\n\n  write_dockerfile(bitcoin_path, &res);\n}\n"
  },
  {
    "path": "orchestration/src/networks/ethereum/consensus/lighthouse.rs",
    "content": "use crate::Network;\n\npub fn lighthouse(network: Network) -> (String, String, String) {\n  assert_ne!(network, Network::Dev);\n\n  #[rustfmt::skip]\n  const DOWNLOAD_LIGHTHOUSE: &str = r#\"\nFROM alpine:latest AS lighthouse\n\nENV LIGHTHOUSE_VERSION=5.1.3\n\nRUN apk --no-cache add wget git gnupg\n\n# Download lighthouse\nRUN wget -4 https://github.com/sigp/lighthouse/releases/download/v${LIGHTHOUSE_VERSION}/lighthouse-v${LIGHTHOUSE_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz\nRUN wget -4 https://github.com/sigp/lighthouse/releases/download/v${LIGHTHOUSE_VERSION}/lighthouse-v${LIGHTHOUSE_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz.asc\n\n# Verify the signature\ngpg --keyserver keyserver.ubuntu.com --recv-keys 15E66D941F697E28F49381F426416DC3F30674B0\ngpg --verify lighthouse-v${LIGHTHOUSE_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz.asc lighthouse-v${LIGHTHOUSE_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz\n\n# Extract lighthouse\nRUN tar xvf lighthouse-v${LIGHTHOUSE_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz\n\"#;\n\n  let run_lighthouse = format!(\n    r#\"\nCOPY --from=lighthouse --chown=ethereum lighthouse /bin\n\nADD /orchestration/{}/networks/ethereum/consensus/lighthouse/run.sh /consensus_layer.sh\n\"#,\n    network.label()\n  );\n\n  (DOWNLOAD_LIGHTHOUSE.to_string(), String::new(), run_lighthouse)\n}\n"
  },
  {
    "path": "orchestration/src/networks/ethereum/consensus/mod.rs",
    "content": "mod lighthouse;\n#[allow(unused)]\npub use lighthouse::lighthouse;\n\nmod nimbus;\npub use nimbus::nimbus;\n"
  },
  {
    "path": "orchestration/src/networks/ethereum/consensus/nimbus.rs",
    "content": "use crate::Network;\n\npub fn nimbus(network: Network) -> (String, String, String) {\n  assert_ne!(network, Network::Dev);\n\n  let platform = match std::env::consts::ARCH {\n    \"x86_64\" => \"amd64\",\n    \"arm\" => \"arm32v7\",\n    \"aarch64\" => \"arm64v8\",\n    _ => panic!(\"unsupported platform\"),\n  };\n\n  #[rustfmt::skip]\n  let checksum = match platform {\n    \"amd64\" => \"5da10222cfb555ce2e3820ece12e8e30318945e3ed4b2b88d295963c879daeee071623c47926f880f3db89ce537fd47c6b26fe37e47aafbae3222b58bcec2fba\",\n    \"arm32v7\" => \"7055da77bfa1186ee2e7ce2a48b923d45ccb039592f529c58d93d55a62bca46566ada451bd7497c3ae691260544f0faf303602afd85ccc18388fdfdac0bb2b45\",\n    \"arm64v8\" => \"1a68f44598462abfade0dbeb6adf10b52614ba03605a8bf487b99493deb41468317926ef2d657479fcc26fce640aeebdbd880956beec3fb110b5abc97bd83556\",\n    _ => panic!(\"unsupported platform\"),\n  };\n\n  #[rustfmt::skip]\n  let download_nimbus = format!(r#\"\nFROM alpine:latest AS nimbus\n\nENV NIMBUS_VERSION=24.3.0\nENV NIMBUS_COMMIT=dc19b082\n\nRUN apk --no-cache add wget\n\n# Download nimbus\nRUN wget -4 https://github.com/status-im/nimbus-eth2/releases/download/v${{NIMBUS_VERSION}}/nimbus-eth2_Linux_{platform}_${{NIMBUS_VERSION}}_${{NIMBUS_COMMIT}}.tar.gz\n\n# Extract nimbus\nRUN tar xvf nimbus-eth2_Linux_{platform}_${{NIMBUS_VERSION}}_${{NIMBUS_COMMIT}}.tar.gz\nRUN mv nimbus-eth2_Linux_{platform}_${{NIMBUS_VERSION}}_${{NIMBUS_COMMIT}}/build/nimbus_beacon_node ./nimbus\n\n# Verify the checksum\nRUN sha512sum nimbus | grep {checksum}\n\"#);\n\n  let run_nimbus = format!(\n    r#\"\nCOPY --from=nimbus --chown=ethereum nimbus /bin\n\nADD /orchestration/{}/networks/ethereum/consensus/nimbus/run.sh /consensus_layer.sh\n\"#,\n    network.label()\n  );\n\n  (download_nimbus, String::new(), run_nimbus)\n}\n"
  },
  {
    "path": "orchestration/src/networks/ethereum/execution/anvil.rs",
    "content": "use crate::Network;\n\npub fn anvil(network: Network) -> (String, String, String) {\n  assert_eq!(network, Network::Dev);\n\n  const ANVIL_SETUP: &str = r#\"\nRUN curl -L https://foundry.paradigm.xyz | bash || exit 0\nRUN ~/.foundry/bin/foundryup\n\nEXPOSE 8545\n\"#;\n\n  (String::new(), \"RUN apt install git curl -y\".to_string(), ANVIL_SETUP.to_string())\n}\n"
  },
  {
    "path": "orchestration/src/networks/ethereum/execution/mod.rs",
    "content": "mod reth;\npub use reth::reth;\n\nmod anvil;\npub use anvil::anvil;\n"
  },
  {
    "path": "orchestration/src/networks/ethereum/execution/reth.rs",
    "content": "use crate::Network;\n\npub fn reth(network: Network) -> (String, String, String) {\n  assert_ne!(network, Network::Dev);\n\n  #[rustfmt::skip]\n  const DOWNLOAD_RETH: &str = r#\"\nFROM alpine:latest AS reth\n\nENV RETH_VERSION=0.2.0-beta.6\n\nRUN apk --no-cache add wget git gnupg\n\n# Download reth\nRUN wget -4 https://github.com/paradigmxyz/reth/releases/download/v${RETH_VERSION}/reth-v${RETH_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz\nRUN wget -4 https://github.com/paradigmxyz/reth/releases/download/v${RETH_VERSION}/reth-v${RETH_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz.asc\n\n# Verify the signature\ngpg --keyserver keyserver.ubuntu.com --recv-keys A3AE097C89093A124049DF1F5391A3C4100530B4\ngpg --verify reth-v${RETH_VERSION}-$(uname -m).tar.gz.asc reth-v${RETH_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz\n\n# Extract reth\nRUN tar xvf reth-v${RETH_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz\n\"#;\n\n  let run_reth = format!(\n    r#\"\nCOPY --from=reth --chown=ethereum reth /bin\n\nEXPOSE 30303 9001 8545\n\nADD /orchestration/{}/networks/ethereum/execution/reth/run.sh /execution_layer.sh\n\"#,\n    network.label()\n  );\n\n  (DOWNLOAD_RETH.to_string(), String::new(), run_reth)\n}\n"
  },
  {
    "path": "orchestration/src/networks/ethereum/mod.rs",
    "content": "use std::path::Path;\n\nuse crate::{Network, Os, mimalloc, os, write_dockerfile};\n\nmod execution;\nuse execution::*;\n\nmod consensus;\nuse consensus::*;\n\npub fn ethereum(orchestration_path: &Path, network: Network) {\n  let ((el_download, el_run_as_root, el_run), (cl_download, cl_run_as_root, cl_run)) =\n    if network == Network::Dev {\n      (anvil(network), (String::new(), String::new(), String::new()))\n    } else {\n      // TODO: Select an EL/CL based off a RNG seeded from the public key\n      (reth(network), nimbus(network))\n    };\n\n  let download = mimalloc(Os::Alpine).to_string() + &el_download + &cl_download;\n\n  let run = format!(\n    r#\"\nADD /orchestration/{}/networks/ethereum/run.sh /run.sh\nCMD [\"/run.sh\"]\n\"#,\n    network.label()\n  );\n  let run = mimalloc(Os::Debian).to_string() +\n    &os(Os::Debian, &(el_run_as_root + \"\\r\\n\" + &cl_run_as_root), \"ethereum\") +\n    &el_run +\n    &cl_run +\n    &run;\n\n  let res = download + &run;\n\n  let mut ethereum_path = orchestration_path.to_path_buf();\n  ethereum_path.push(\"networks\");\n  ethereum_path.push(\"ethereum\");\n  ethereum_path.push(\"Dockerfile\");\n\n  write_dockerfile(ethereum_path, &res);\n}\n"
  },
  {
    "path": "orchestration/src/networks/mod.rs",
    "content": "mod bitcoin;\npub use bitcoin::*;\n\nmod ethereum;\npub use ethereum::*;\n\nmod monero;\npub use monero::*;\n"
  },
  {
    "path": "orchestration/src/networks/monero.rs",
    "content": "use std::path::Path;\n\nuse crate::{Network, Os, mimalloc, write_dockerfile};\n\nfn monero_internal(\n  network: Network,\n  os: Os,\n  orchestration_path: &Path,\n  folder: &str,\n  monero_binary: &str,\n  ports: &str,\n) {\n  const MONERO_VERSION: &str = \"0.18.3.4\";\n\n  let arch = match std::env::consts::ARCH {\n    // We probably would run this without issues yet it's not worth needing to provide support for\n    \"x86\" | \"arm\" => panic!(\"unsupported architecture, please download a 64-bit OS\"),\n    \"x86_64\" => \"x64\",\n    \"aarch64\" => \"armv8\",\n    _ => panic!(\"unsupported architecture\"),\n  };\n\n  #[rustfmt::skip]\n  let download_monero = format!(r#\"\nFROM alpine:latest AS monero\n\nRUN apk --no-cache add wget gnupg\n\n# Download Monero\nRUN wget -4 https://downloads.getmonero.org/cli/monero-linux-{arch}-v{MONERO_VERSION}.tar.bz2\n\n# Verify Binary -- fingerprint from https://github.com/monero-project/monero-site/issues/1949\nADD orchestration/{}/networks/monero/hashes-v{MONERO_VERSION}.txt .\nRUN gpg --keyserver hkp://keyserver.ubuntu.com:80 --keyserver-options no-self-sigs-only --receive-keys 81AC591FE9C4B65C5806AFC3F0AF4D462A0BDF92 && \\\n  gpg --verify hashes-v{MONERO_VERSION}.txt && \\\n  grep \"$(sha256sum monero-linux-{arch}-v{MONERO_VERSION}.tar.bz2 | cut -c 1-64)\" hashes-v{MONERO_VERSION}.txt\n\n# Extract it\nRUN tar -xvjf monero-linux-{arch}-v{MONERO_VERSION}.tar.bz2 --strip-components=1\n\"#,\n    network.label(),\n  );\n\n  let setup = mimalloc(os).to_string() + &download_monero;\n\n  let run_monero = format!(\n    r#\"\nCOPY --from=monero --chown=monero:nogroup {monero_binary} /bin\n\nEXPOSE {ports}\n\nADD /orchestration/{}/networks/{folder}/run.sh /\nCMD [\"/run.sh\"]\n\"#,\n    network.label(),\n  );\n\n  let run =\n    crate::os(os, if os == Os::Alpine { \"RUN apk --no-cache add gcompat\" } else { \"\" }, \"monero\") +\n      &run_monero;\n  let res = setup + &run;\n\n  let mut monero_path = orchestration_path.to_path_buf();\n  monero_path.push(\"networks\");\n  monero_path.push(folder);\n  monero_path.push(\"Dockerfile\");\n\n  write_dockerfile(monero_path, &res);\n}\n\npub fn monero(orchestration_path: &Path, network: Network) {\n  monero_internal(network, Os::Debian, orchestration_path, \"monero\", \"monerod\", \"18080 18081\")\n}\n\npub fn monero_wallet_rpc(orchestration_path: &Path) {\n  monero_internal(\n    Network::Dev,\n    Os::Debian,\n    orchestration_path,\n    \"monero-wallet-rpc\",\n    \"monero-wallet-rpc\",\n    \"18082\",\n  )\n}\n"
  },
  {
    "path": "orchestration/src/processor.rs",
    "content": "use std::path::Path;\n\nuse zeroize::Zeroizing;\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::{group::ff::PrimeField, Ciphersuite};\n\nuse crate::{Network, Os, mimalloc, os, build_serai_service, write_dockerfile};\n\n#[allow(clippy::needless_pass_by_value)]\npub fn processor(\n  orchestration_path: &Path,\n  network: Network,\n  coin: &'static str,\n  _coordinator_key: <Ristretto as Ciphersuite>::G,\n  coin_key: Zeroizing<<Ristretto as Ciphersuite>::F>,\n  entropy: Zeroizing<[u8; 32]>,\n) {\n  let setup = mimalloc(Os::Debian).to_string() +\n    &build_serai_service(\n      if coin == \"ethereum\" {\n        r#\"\nRUN cargo install svm-rs\nRUN svm install 0.8.26\nRUN svm use 0.8.26\n\"#\n      } else {\n        \"\"\n      },\n      network.release(),\n      &format!(\"binaries {} {coin}\", network.db()),\n      \"serai-processor\",\n    );\n\n  const ADDITIONAL_ROOT: &str = r#\"\n# Install ca-certificates\nRUN apt install -y ca-certificates\n\"#;\n\n  // TODO: Randomly generate these\n  const RPC_USER: &str = \"serai\";\n  const RPC_PASS: &str = \"seraidex\";\n  // TODO: Isolate networks\n  let hostname = format!(\"serai-{}-{coin}\", network.label());\n  let port = format!(\n    \"{}\",\n    match coin {\n      \"bitcoin\" => 8332,\n      \"ethereum\" => 8545,\n      \"monero\" => 18081,\n      _ => panic!(\"unrecognized external network\"),\n    }\n  );\n\n  let mut env_vars = vec![\n    (\"MESSAGE_QUEUE_RPC\", format!(\"serai-{}-message-queue\", network.label())),\n    (\"MESSAGE_QUEUE_KEY\", hex::encode(coin_key.to_repr())),\n    (\"ENTROPY\", hex::encode(entropy.as_ref())),\n    (\"NETWORK\", coin.to_string()),\n    (\"NETWORK_RPC_LOGIN\", format!(\"{RPC_USER}:{RPC_PASS}\")),\n    (\"NETWORK_RPC_HOSTNAME\", hostname),\n    (\"NETWORK_RPC_PORT\", port),\n    (\"DB_PATH\", \"/volume/processor-db\".to_string()),\n    (\"RUST_LOG\", \"info,serai_processor=debug\".to_string()),\n  ];\n  if coin == \"ethereum\" {\n    env_vars\n      .push((\"ETHEREUM_RELAYER_HOSTNAME\", format!(\"serai-{}-ethereum-relayer\", network.label())));\n    env_vars.push((\"ETHEREUM_RELAYER_PORT\", \"20830\".to_string()));\n  }\n  let mut env_vars_str = String::new();\n  for (env_var, value) in env_vars {\n    env_vars_str += &format!(r#\"{env_var}=${{{env_var}:=\"{value}\"}} \"#);\n  }\n\n  let run_processor = format!(\n    r#\"\n# Copy the Processor binary and relevant license\nCOPY --from=builder --chown=processor /serai/bin/serai-processor /bin/\nCOPY --from=builder --chown=processor /serai/AGPL-3.0 .\n\n# Run processor\nCMD {env_vars_str} serai-processor\n\"#\n  );\n\n  let run = os(Os::Debian, ADDITIONAL_ROOT, \"processor\") + &run_processor;\n  let res = setup + &run;\n\n  let mut processor_path = orchestration_path.to_path_buf();\n  processor_path.push(\"processor\");\n  processor_path.push(coin);\n  processor_path.push(\"Dockerfile\");\n\n  write_dockerfile(processor_path, &res);\n}\n"
  },
  {
    "path": "orchestration/src/serai.rs",
    "content": "use std::path::Path;\n\nuse zeroize::Zeroizing;\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::{group::ff::PrimeField, Ciphersuite};\n\nuse crate::{Network, Os, mimalloc, os, build_serai_service, write_dockerfile};\n\npub fn serai(\n  orchestration_path: &Path,\n  network: Network,\n  serai_key: &Zeroizing<<Ristretto as Ciphersuite>::F>,\n) {\n  // Always builds in release for performance reasons\n  let setup = mimalloc(Os::Debian).to_string() + &build_serai_service(\"\", true, \"\", \"serai-node\");\n  let setup_fast_epoch =\n    mimalloc(Os::Debian).to_string() + &build_serai_service(\"\", true, \"fast-epoch\", \"serai-node\");\n\n  let env_vars = [(\"KEY\", hex::encode(serai_key.to_repr()))];\n  let mut env_vars_str = String::new();\n  for (env_var, value) in env_vars {\n    env_vars_str += &format!(r#\"{env_var}=${{{env_var}:=\"{value}\"}} \"#);\n  }\n\n  let run_serai = format!(\n    r#\"\n# Copy the Serai binary and relevant license\nCOPY --from=builder --chown=serai /serai/bin/serai-node /bin/\nCOPY --from=builder --chown=serai /serai/AGPL-3.0 .\n\n# Run the Serai node\nEXPOSE 30333 9944\n\nADD /orchestration/{}/serai/run.sh /\nCMD {env_vars_str} \"/run.sh\"\n\"#,\n    network.label(),\n  );\n\n  let run = os(Os::Debian, \"\", \"serai\") + &run_serai;\n  let res = setup + &run;\n  let res_fast_epoch = setup_fast_epoch + &run;\n\n  let mut serai_path = orchestration_path.to_path_buf();\n  serai_path.push(\"serai\");\n\n  let mut serai_fast_epoch_path = serai_path.clone();\n\n  serai_path.push(\"Dockerfile\");\n  serai_fast_epoch_path.push(\"Dockerfile.fast-epoch\");\n\n  write_dockerfile(serai_path, &res);\n  write_dockerfile(serai_fast_epoch_path, &res_fast_epoch);\n}\n"
  },
  {
    "path": "orchestration/testnet/coordinator/.folder",
    "content": ""
  },
  {
    "path": "orchestration/testnet/message-queue/.folder",
    "content": ""
  },
  {
    "path": "orchestration/testnet/networks/bitcoin/run.sh",
    "content": "#!/bin/sh\n\nRPC_USER=\"${RPC_USER:=serai}\"\nRPC_PASS=\"${RPC_PASS:=seraidex}\"\n\nbitcoind -txindex -testnet -port=8333 \\\n  -rpcuser=$RPC_USER -rpcpassword=$RPC_PASS \\\n  -rpcbind=0.0.0.0 -rpcallowip=0.0.0.0/0 -rpcport=8332 \\\n  --datadir=/volume\n"
  },
  {
    "path": "orchestration/testnet/networks/ethereum/consensus/lighthouse/run.sh",
    "content": "#!/bin/sh\n\nRUST_LOG=info lighthouse bn --execution-endpoint http://localhost:8551 --execution-jwt /home/ethereum/.jwt\n"
  },
  {
    "path": "orchestration/testnet/networks/ethereum/consensus/nimbus/run.sh",
    "content": "#!/bin/sh\n\nexit 1\n"
  },
  {
    "path": "orchestration/testnet/networks/ethereum/execution/geth/run.sh",
    "content": "#!/bin/sh\n\n#geth --dev --networkid 5208 \\\n#  --http --http.api \"web3,net,eth,miner\" \\\n#  --http.addr 0.0.0.0 --http.port 8545 \\\n#  --http.vhosts=\"*\" --http.corsdomain \"*\"\n\nexit 1\n"
  },
  {
    "path": "orchestration/testnet/networks/ethereum/execution/reth/run.sh",
    "content": "#!/bin/sh\n\nRUST_LOG=info reth node --authrpc.jwtsecret /home/ethereum/.jwt\n"
  },
  {
    "path": "orchestration/testnet/networks/ethereum/run.sh",
    "content": "/execution_layer.sh & /consensus_layer.sh\n"
  },
  {
    "path": "orchestration/testnet/networks/ethereum-relayer/.folder",
    "content": "#!/bin/sh\n\nRPC_USER=\"${RPC_USER:=serai}\"\nRPC_PASS=\"${RPC_PASS:=seraidex}\"\n\n# Run Monero\nmonerod --non-interactive --regtest --offline --fixed-difficulty=1 \\\n  --no-zmq --rpc-bind-ip=0.0.0.0 --rpc-bind-port=18081 --confirm-external-bind \\\n  --rpc-access-control-origins \"*\" --disable-rpc-ban \\\n  --rpc-login=$RPC_USER:$RPC_PASS \\\n  $1\n"
  },
  {
    "path": "orchestration/testnet/networks/monero/hashes-v0.18.3.4.txt",
    "content": "-----BEGIN PGP SIGNED MESSAGE-----\nHash: SHA256\n\n# This GPG-signed message exists to confirm the SHA256 sums of Monero binaries.\n#\n# Please verify the signature against the key for binaryFate in the\n# source code repository (/utils/gpg_keys).\n#\n#\n## CLI\n15e4d7dfc2f9261a0a452b0f8fd157c33cdbc8a896e23d883ddd13e2480a3800  monero-android-armv7-v0.18.3.4.tar.bz2\nd9c9249d1408822ce36b346c6b9fb6b896cda16714d62117fb1c588a5201763c  monero-android-armv8-v0.18.3.4.tar.bz2\n360a551388922c8991a9ba4abaa88676b0fc7ec1fa4d0f4b5c0500847e0b946c  monero-freebsd-x64-v0.18.3.4.tar.bz2\n354603c56446fb0551cdd6933bce5a13590b7881e05979b7ec25d89e7e59a0e2  monero-linux-armv7-v0.18.3.4.tar.bz2\n33ca2f0055529d225b61314c56370e35606b40edad61c91c859f873ed67a1ea7  monero-linux-armv8-v0.18.3.4.tar.bz2\n88739a1521b9fda3154540268e416c7af016ed7857041c76ab8ed7d7674c71ca  monero-linux-riscv64-v0.18.3.4.tar.bz2\n51ba03928d189c1c11b5379cab17dd9ae8d2230056dc05c872d0f8dba4a87f1d  monero-linux-x64-v0.18.3.4.tar.bz2\nd7ca0878abff2919a0104d7ed29d9c35df9ca0ea1b6fb4ebf6c8f7607ffb9e41  monero-linux-x86-v0.18.3.4.tar.bz2\n44520cb3a05c2518ca9aeae1b2e3080fe2bba1e3596d014ceff1090dfcba8ab4  monero-mac-armv8-v0.18.3.4.tar.bz2\n32c449f562216d3d83154e708471236d07db7477d6b67f1936a0a85a5005f2b8  monero-mac-x64-v0.18.3.4.tar.bz2\n54a66db6c892b2a0999754841f4ca68511741b88ea3ab20c7cd504a027f465f5  monero-win-x64-v0.18.3.4.zip\n1a9824742aa1587023c3bddea788c115940cfd49371c78a8dd62c40113132d01  monero-win-x86-v0.18.3.4.zip\n7d4845ec0a3b52404d41785da348ec33509f0a5981e8a27c5fa55b18d696e139  monero-source-v0.18.3.4.tar.bz2\n#\n## GUI\n63349d5a7637cd0c5d1693a1a2e910a92cbb123903d57667077a36454845d7bf  monero-gui-install-win-x64-v0.18.3.4.exe\n2866f3a2be30e4c4113e6274cad1d6698f81c37ceebc6e8f084c57230a0f70a6  monero-gui-linux-x64-v0.18.3.4.tar.bz2\needbf827513607a3ef579077dacd573e65892b199102effef97dff9d73138ca6  monero-gui-mac-armv8-v0.18.3.4.dmg\n54eb151d7511a9f26130864e2c02f258344803b2b68311c8be29850d7faef359  monero-gui-mac-x64-v0.18.3.4.dmg\nb5d42dddd722e728e480337f89038c8ea606c6507bf0c88ddf2af25050c9b751  monero-gui-win-x64-v0.18.3.4.zip\n2f1d643bb2cc08e5eb334a6bfd649b0aa95ceb6178ff2f90448d5ef8d2a752a6  monero-gui-source-v0.18.3.4.tar.bz2\n#\n#\n# ~binaryFate\n-----BEGIN PGP SIGNATURE-----\n\niQIzBAEBCAAdFiEEgaxZH+nEtlxYBq/D8K9NRioL35IFAmbF8bAACgkQ8K9NRioL\n35KQAQ/7BP9j0Tx+zlFs3zbVIFXzfoPbGo2/uerM4xUWX/NUoI7XDTGWV2lpcR1x\no6eqstbuHciY0Aj2MsICsdqD+1PYW0EBZlfNLMrk161c3nQMJcjCE65uIhbLkOSs\n6SUakmpxkueQOE/Ug5Afaa/JBATVTxLTmqSCI7Ai9NplF+6KNauXQXNrlwO/gHcd\nwhYDmsqp2JyOtMpMlpOckzLgg7Oroj7B0LBf78Z13p1naUyPooBaIEXSdKm5g2HI\nvPd+z1bOVIluqPBnYWUwL7EmXy08/broejHGliQ+2iY9IsmDDx6rnSe/oprNEDic\nl+/w3KvPcTkBh8hJLVDyYieYdVYHqOktIPlR1dKV512CnuP1ljr/CXjJmkAkXHlg\nbObMUCIM9UYqp1I+KDaArjYNbzkHK02Lu6sak49GXgEuq66m9t4isF2GdcHrbERs\ncLGsnhkTO2LtnGcziOC2l9XSzL41swxe0GrkK0rdeiyDCGAlb7hllevFy7zlT90l\nJw670TyFVBs8fUFHk/tOtT0ivSDJJg8m9waBzi/46ksOvuid6p3P3a0agqu3uclj\nrscSpk0JS3E/3+A/N0IaiTmUO5zSjbsCrSnxQjcfrRRtERL+6JVHFVlW+nJzYWWH\nu0O7bNZSqEruR4aTEtsddLgs57I10thDR5SUONuAqbEq8EYN8OE=\n=aLFR\n-----END PGP SIGNATURE-----\n"
  },
  {
    "path": "orchestration/testnet/networks/monero/run.sh",
    "content": "#!/bin/sh\n\nRPC_USER=\"${RPC_USER:=serai}\"\nRPC_PASS=\"${RPC_PASS:=seraidex}\"\n\n# Run Monero\nmonerod --non-interactive --stagenet \\\n  --no-zmq --rpc-bind-ip=0.0.0.0 --rpc-bind-port=18081 --confirm-external-bind \\\n  --rpc-access-control-origins \"*\" --disable-rpc-ban \\\n  --rpc-login=$RPC_USER:$RPC_PASS \\\n  --data-dir=/volume\n"
  },
  {
    "path": "orchestration/testnet/processor/bitcoin/.folder",
    "content": ""
  },
  {
    "path": "orchestration/testnet/processor/ethereum/.folder",
    "content": ""
  },
  {
    "path": "orchestration/testnet/processor/monero/.folder",
    "content": ""
  },
  {
    "path": "orchestration/testnet/serai/run.sh",
    "content": "#!/bin/sh\n\nserai-node --base-path /volume --unsafe-rpc-external --rpc-cors all --chain testnet --validator\n"
  },
  {
    "path": "patches/directories-next/Cargo.toml",
    "content": "[package]\nname = \"directories-next\"\nversion = \"2.0.0\"\ndescription = \"Patch from directories-next back to directories\"\nlicense = \"MIT\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/patches/directories-next\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nkeywords = []\nedition = \"2021\"\nrust-version = \"1.74\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[dependencies]\ndirectories = \"5\"\n"
  },
  {
    "path": "patches/directories-next/src/lib.rs",
    "content": "pub use directories::*;\n"
  },
  {
    "path": "patches/home/Cargo.toml",
    "content": "[package]\nname = \"home\"\nversion = \"0.5.99\"\ndescription = \"Replacement for `home` which uses the `std` impl\"\nlicense = \"MIT\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/patches/home\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nkeywords = []\nedition = \"2024\"\nrust-version = \"1.85\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[workspace]\n"
  },
  {
    "path": "patches/home/src/lib.rs",
    "content": "pub use std::env::home_dir;\n"
  },
  {
    "path": "patches/matches/Cargo.toml",
    "content": "[package]\nname = \"matches\"\nversion = \"0.1.10\"\ndescription = \"Replacement for the matches polyfill which uses the std impl\"\nlicense = \"MIT\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/patches/matches\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nkeywords = []\nedition = \"2021\"\nrust-version = \"1.56\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n"
  },
  {
    "path": "patches/matches/src/lib.rs",
    "content": "pub use std::matches;\n"
  },
  {
    "path": "patches/option-ext/Cargo.toml",
    "content": "[package]\nname = \"option-ext\"\nversion = \"0.2.0\"\ndescription = \"Non-MPL option-ext with the exactly needed API for directories\"\nlicense = \"MIT\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/patches/option-ext\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nkeywords = []\nedition = \"2021\"\nrust-version = \"1.74\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n"
  },
  {
    "path": "patches/option-ext/src/lib.rs",
    "content": "pub trait OptionExt<T: PartialEq> {\n  fn contains(&self, x: &T) -> bool;\n}\nimpl<T: PartialEq> OptionExt<T> for Option<T> {\n  fn contains(&self, x: &T) -> bool {\n    self.as_ref() == Some(x)\n  }\n}\n"
  },
  {
    "path": "processor/Cargo.toml",
    "content": "[package]\nname = \"serai-processor\"\nversion = \"0.1.0\"\ndescription = \"Multichain processor premised on canonicity to reach distributed consensus automatically\"\nlicense = \"AGPL-3.0-only\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/processor\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nkeywords = []\nedition = \"2021\"\npublish = false\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\n# Macros\nasync-trait = { version = \"0.1\", default-features = false }\nzeroize = { version = \"1\", default-features = false, features = [\"std\"] }\nthiserror = { version = \"1\", default-features = false }\n\n# Libs\nrand_core = { version = \"0.6\", default-features = false, features = [\"std\", \"getrandom\"] }\nrand_chacha = { version = \"0.3\", default-features = false, features = [\"std\"] }\n\n# Encoders\nconst-hex = { version = \"1\", default-features = false }\nhex = { version = \"0.4\", default-features = false, features = [\"std\"] }\nscale = { package = \"parity-scale-codec\", version = \"3\", default-features = false, features = [\"std\"] }\nborsh = { version = \"1\", default-features = false, features = [\"std\", \"derive\", \"de_strict_order\"] }\nserde_json = { version = \"1\", default-features = false, features = [\"std\"] }\n\n# Cryptography\nblake2 = { version = \"0.10\", default-features = false, features = [\"std\"] }\ndalek-ff-group = { path = \"../crypto/dalek-ff-group\", default-features = false, features = [\"std\"] }\nciphersuite = { path = \"../crypto/ciphersuite\", default-features = false, features = [\"std\"] }\n\ntranscript = { package = \"flexible-transcript\", path = \"../crypto/transcript\", default-features = false, features = [\"std\"] }\ndkg-pedpop = { path = \"../crypto/dkg/pedpop\", default-features = false }\nfrost = { package = \"modular-frost\", path = \"../crypto/frost\", default-features = false, features = [\"ristretto\"] }\nfrost-schnorrkel = { path = \"../crypto/schnorrkel\", default-features = false }\n\n# Bitcoin/Ethereum\nk256 = { version = \"^0.13.1\", default-features = false, features = [\"std\"], optional = true }\n\n# Bitcoin\nsecp256k1 = { version = \"0.29\", default-features = false, features = [\"std\", \"global-context\", \"rand-std\"], optional = true }\nbitcoin-serai = { path = \"../networks/bitcoin\", default-features = false, features = [\"std\"], optional = true }\n\n# Ethereum\nciphersuite-kp256 = { path = \"../crypto/ciphersuite/kp256\", default-features = false, features = [\"std\"], optional = true }\nethereum-serai = { path = \"../networks/ethereum\", default-features = false, optional = true }\n\n# Monero\nmonero-simple-request-rpc = { git = \"https://github.com/monero-oxide/monero-oxide\", rev = \"32e6b5fe5ba9e1ea3e68da882550005122a11d22\", default-features = false, optional = true }\nmonero-wallet = { git = \"https://github.com/monero-oxide/monero-oxide\", rev = \"32e6b5fe5ba9e1ea3e68da882550005122a11d22\", default-features = false, features = [\"std\", \"multisig\", \"compile-time-generators\"], optional = true }\n\n# Application\nlog = { version = \"0.4\", default-features = false, features = [\"std\"] }\nenv_logger = { version = \"0.10\", default-features = false, features = [\"humantime\"], optional = true }\ntokio = { version = \"1\", default-features = false, features = [\"rt-multi-thread\", \"sync\", \"time\", \"macros\"] }\n\nzalloc = { path = \"../common/zalloc\" }\nserai-db = { path = \"../common/db\" }\nserai-env = { path = \"../common/env\", optional = true }\n# TODO: Replace with direct usage of primitives\nserai-client = { path = \"../substrate/client\", default-features = false, features = [\"serai\"] }\n\nmessages = { package = \"serai-processor-messages\", path = \"./messages\" }\n\nmessage-queue = { package = \"serai-message-queue\", path = \"../message-queue\", optional = true }\n\n[dev-dependencies]\nfrost = { package = \"modular-frost\", path = \"../crypto/frost\", features = [\"tests\"] }\n\nsp-application-crypto = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false, features = [\"std\"] }\n\nethereum-serai = { path = \"../networks/ethereum\", default-features = false, features = [\"tests\"] }\n\ndockertest = \"0.5\"\nserai-docker-tests = { path = \"../tests/docker\" }\n\n[features]\nsecp256k1 = [\"k256\", \"frost/secp256k1\"]\nbitcoin = [\"dep:secp256k1\", \"secp256k1\", \"bitcoin-serai\", \"serai-client/bitcoin\"]\n\nethereum = [\"secp256k1\", \"ciphersuite-kp256\", \"ethereum-serai/tests\"]\n\ned25519 = [\"frost/ed25519\"]\nmonero = [\"ed25519\", \"monero-simple-request-rpc\", \"monero-wallet\", \"serai-client/monero\"]\n\nbinaries = [\"env_logger\", \"serai-env\", \"message-queue\"]\nparity-db = [\"serai-db/parity-db\"]\nrocksdb = [\"serai-db/rocksdb\"]\n"
  },
  {
    "path": "processor/LICENSE",
    "content": "AGPL-3.0-only license\n\nCopyright (c) 2022-2023 Luke Parker\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License Version 3 as\npublished by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n"
  },
  {
    "path": "processor/README.md",
    "content": "# Processor\n\nThe Serai processor scans a specified external network, communicating with the\ncoordinator. For details on its exact messaging flow, and overall policies,\nplease view `docs/processor`.\n"
  },
  {
    "path": "processor/messages/Cargo.toml",
    "content": "[package]\nname = \"serai-processor-messages\"\nversion = \"0.1.0\"\ndescription = \"Messages sent and received by the processor\"\nlicense = \"AGPL-3.0-only\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/processor/messages\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nkeywords = []\nedition = \"2021\"\npublish = false\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nscale = { package = \"parity-scale-codec\", version = \"3\", default-features = false, features = [\"std\"] }\nborsh = { version = \"1\", default-features = false, features = [\"std\", \"derive\", \"de_strict_order\"] }\n\ndkg = { path = \"../../crypto/dkg\", default-features = false, features = [\"std\", \"borsh\"] }\n\nserai-primitives = { path = \"../../substrate/primitives\", default-features = false, features = [\"std\", \"borsh\"] }\nin-instructions-primitives = { package = \"serai-in-instructions-primitives\", path = \"../../substrate/in-instructions/primitives\", default-features = false, features = [\"std\", \"borsh\"] }\ncoins-primitives = { package = \"serai-coins-primitives\", path = \"../../substrate/coins/primitives\", default-features = false, features = [\"std\", \"borsh\"] }\nvalidator-sets-primitives = { package = \"serai-validator-sets-primitives\", path = \"../../substrate/validator-sets/primitives\", default-features = false, features = [\"std\", \"borsh\"] }\n"
  },
  {
    "path": "processor/messages/LICENSE",
    "content": "AGPL-3.0-only license\n\nCopyright (c) 2023 Luke Parker\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License Version 3 as\npublished by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n"
  },
  {
    "path": "processor/messages/src/lib.rs",
    "content": "#![expect(clippy::cast_possible_truncation)]\n\nuse std::collections::HashMap;\n\nuse scale::{Encode, Decode};\nuse borsh::{BorshSerialize, BorshDeserialize};\n\nuse dkg::{Participant, ThresholdParams};\n\nuse serai_primitives::BlockHash;\nuse in_instructions_primitives::{Batch, SignedBatch};\nuse coins_primitives::OutInstructionWithBalance;\nuse validator_sets_primitives::{Session, KeyPair};\n\n#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]\npub struct SubstrateContext {\n  pub serai_time: u64,\n  pub network_latest_finalized_block: BlockHash,\n}\n\npub mod key_gen {\n  use super::*;\n\n  #[derive(\n    Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode, BorshSerialize, BorshDeserialize,\n  )]\n  pub struct KeyGenId {\n    pub session: Session,\n    pub attempt: u32,\n  }\n\n  #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]\n  pub enum CoordinatorMessage {\n    // Instructs the Processor to begin the key generation process.\n    // TODO: Should this be moved under Substrate?\n    GenerateKey {\n      id: KeyGenId,\n      params: ThresholdParams,\n      shares: u16,\n    },\n    // Received commitments for the specified key generation protocol.\n    Commitments {\n      id: KeyGenId,\n      commitments: HashMap<Participant, Vec<u8>>,\n    },\n    // Received shares for the specified key generation protocol.\n    Shares {\n      id: KeyGenId,\n      shares: Vec<HashMap<Participant, Vec<u8>>>,\n    },\n    /// Instruction to verify a blame accusation.\n    VerifyBlame {\n      id: KeyGenId,\n      accuser: Participant,\n      accused: Participant,\n      share: Vec<u8>,\n      blame: Option<Vec<u8>>,\n    },\n  }\n\n  impl CoordinatorMessage {\n    pub fn required_block(&self) -> Option<BlockHash> {\n      None\n    }\n  }\n\n  #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]\n  pub enum ProcessorMessage {\n    // Created commitments for the specified key generation protocol.\n    Commitments {\n      id: KeyGenId,\n      commitments: Vec<Vec<u8>>,\n    },\n    // Participant published invalid commitments.\n    InvalidCommitments {\n      id: KeyGenId,\n      faulty: Participant,\n    },\n    // Created shares for the specified key generation protocol.\n    Shares {\n      id: KeyGenId,\n      shares: Vec<HashMap<Participant, Vec<u8>>>,\n    },\n    // Participant published an invalid share.\n    #[rustfmt::skip]\n    InvalidShare {\n      id: KeyGenId,\n      accuser: Participant,\n      faulty: Participant,\n      blame: Option<Vec<u8>>,\n    },\n    // Resulting keys from the specified key generation protocol.\n    GeneratedKeyPair {\n      id: KeyGenId,\n      substrate_key: [u8; 32],\n      network_key: Vec<u8>,\n    },\n    // Blame this participant.\n    Blame {\n      id: KeyGenId,\n      participant: Participant,\n    },\n  }\n}\n\npub mod sign {\n  use super::*;\n\n  #[derive(Clone, PartialEq, Eq, Hash, Debug, Encode, Decode, BorshSerialize, BorshDeserialize)]\n  pub struct SignId {\n    pub session: Session,\n    pub id: [u8; 32],\n    pub attempt: u32,\n  }\n\n  #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]\n  pub enum CoordinatorMessage {\n    // Received preprocesses for the specified signing protocol.\n    Preprocesses { id: SignId, preprocesses: HashMap<Participant, Vec<u8>> },\n    // Received shares for the specified signing protocol.\n    Shares { id: SignId, shares: HashMap<Participant, Vec<u8>> },\n    // Re-attempt a signing protocol.\n    Reattempt { id: SignId },\n    // Completed a signing protocol already.\n    Completed { session: Session, id: [u8; 32], tx: Vec<u8> },\n  }\n\n  impl CoordinatorMessage {\n    pub fn required_block(&self) -> Option<BlockHash> {\n      None\n    }\n\n    pub fn session(&self) -> Session {\n      match self {\n        CoordinatorMessage::Preprocesses { id, .. } |\n        CoordinatorMessage::Shares { id, .. } |\n        CoordinatorMessage::Reattempt { id } => id.session,\n        CoordinatorMessage::Completed { session, .. } => *session,\n      }\n    }\n  }\n\n  #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]\n  pub enum ProcessorMessage {\n    // Participant sent an invalid message during the sign protocol.\n    InvalidParticipant { id: SignId, participant: Participant },\n    // Created preprocess for the specified signing protocol.\n    Preprocess { id: SignId, preprocesses: Vec<Vec<u8>> },\n    // Signed share for the specified signing protocol.\n    Share { id: SignId, shares: Vec<Vec<u8>> },\n    // Completed a signing protocol already.\n    Completed { session: Session, id: [u8; 32], tx: Vec<u8> },\n  }\n}\n\npub mod coordinator {\n  use super::*;\n\n  pub fn cosign_block_msg(block_number: u64, block: [u8; 32]) -> Vec<u8> {\n    const DST: &[u8] = b\"Cosign\";\n    let mut res = vec![u8::try_from(DST.len()).unwrap()];\n    res.extend(DST);\n    res.extend(block_number.to_le_bytes());\n    res.extend(block);\n    res\n  }\n\n  #[derive(\n    Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode, BorshSerialize, BorshDeserialize,\n  )]\n  pub enum SubstrateSignableId {\n    CosigningSubstrateBlock([u8; 32]),\n    Batch(u32),\n    SlashReport,\n  }\n\n  #[derive(Clone, PartialEq, Eq, Hash, Debug, Encode, Decode, BorshSerialize, BorshDeserialize)]\n  pub struct SubstrateSignId {\n    pub session: Session,\n    pub id: SubstrateSignableId,\n    pub attempt: u32,\n  }\n\n  #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]\n  pub enum CoordinatorMessage {\n    CosignSubstrateBlock { id: SubstrateSignId, block_number: u64 },\n    SignSlashReport { id: SubstrateSignId, report: Vec<([u8; 32], u32)> },\n    SubstratePreprocesses { id: SubstrateSignId, preprocesses: HashMap<Participant, [u8; 64]> },\n    SubstrateShares { id: SubstrateSignId, shares: HashMap<Participant, [u8; 32]> },\n    // Re-attempt a batch signing protocol.\n    BatchReattempt { id: SubstrateSignId },\n  }\n\n  impl CoordinatorMessage {\n    // The Coordinator will only send Batch messages once the Batch ID has been recognized\n    // The ID will only be recognized when the block is acknowledged by a super-majority of the\n    // network *and the local node*\n    // This synchrony obtained lets us ignore the synchrony requirement offered here\n    pub fn required_block(&self) -> Option<BlockHash> {\n      None\n    }\n  }\n\n  #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]\n  pub struct PlanMeta {\n    pub session: Session,\n    pub id: [u8; 32],\n  }\n\n  #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]\n  pub enum ProcessorMessage {\n    SubstrateBlockAck { block: u64, plans: Vec<PlanMeta> },\n    InvalidParticipant { id: SubstrateSignId, participant: Participant },\n    CosignPreprocess { id: SubstrateSignId, preprocesses: Vec<[u8; 64]> },\n    BatchPreprocess { id: SubstrateSignId, block: BlockHash, preprocesses: Vec<[u8; 64]> },\n    SlashReportPreprocess { id: SubstrateSignId, preprocesses: Vec<[u8; 64]> },\n    SubstrateShare { id: SubstrateSignId, shares: Vec<[u8; 32]> },\n    // TODO: Make these signatures [u8; 64]?\n    CosignedBlock { block_number: u64, block: [u8; 32], signature: Vec<u8> },\n    SignedSlashReport { session: Session, signature: Vec<u8> },\n  }\n}\n\npub mod substrate {\n  use super::*;\n\n  #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]\n  pub enum CoordinatorMessage {\n    ConfirmKeyPair {\n      context: SubstrateContext,\n      session: Session,\n      key_pair: KeyPair,\n    },\n    SubstrateBlock {\n      context: SubstrateContext,\n      block: u64,\n      burns: Vec<OutInstructionWithBalance>,\n      batches: Vec<u32>,\n    },\n  }\n\n  impl CoordinatorMessage {\n    pub fn required_block(&self) -> Option<BlockHash> {\n      let context = match self {\n        CoordinatorMessage::ConfirmKeyPair { context, .. } |\n        CoordinatorMessage::SubstrateBlock { context, .. } => context,\n      };\n      Some(context.network_latest_finalized_block)\n    }\n  }\n\n  #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]\n  pub enum ProcessorMessage {\n    Batch { batch: Batch },\n    SignedBatch { batch: SignedBatch },\n  }\n}\n\nmacro_rules! impl_from {\n  ($from: ident, $to: ident, $via: ident) => {\n    impl From<$from::$to> for $to {\n      fn from(msg: $from::$to) -> $to {\n        $to::$via(msg)\n      }\n    }\n  };\n}\n\n#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]\npub enum CoordinatorMessage {\n  KeyGen(key_gen::CoordinatorMessage),\n  Sign(sign::CoordinatorMessage),\n  Coordinator(coordinator::CoordinatorMessage),\n  Substrate(substrate::CoordinatorMessage),\n}\n\nimpl_from!(key_gen, CoordinatorMessage, KeyGen);\nimpl_from!(sign, CoordinatorMessage, Sign);\nimpl_from!(coordinator, CoordinatorMessage, Coordinator);\nimpl_from!(substrate, CoordinatorMessage, Substrate);\n\nimpl CoordinatorMessage {\n  pub fn required_block(&self) -> Option<BlockHash> {\n    let required = match self {\n      CoordinatorMessage::KeyGen(msg) => msg.required_block(),\n      CoordinatorMessage::Sign(msg) => msg.required_block(),\n      CoordinatorMessage::Coordinator(msg) => msg.required_block(),\n      CoordinatorMessage::Substrate(msg) => msg.required_block(),\n    };\n\n    // 0 is used when Serai hasn't acknowledged *any* block for this network, which also means\n    // there's no need to wait for the block in question\n    if required == Some(BlockHash([0; 32])) {\n      return None;\n    }\n    required\n  }\n}\n\n#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]\npub enum ProcessorMessage {\n  KeyGen(key_gen::ProcessorMessage),\n  Sign(sign::ProcessorMessage),\n  Coordinator(coordinator::ProcessorMessage),\n  Substrate(substrate::ProcessorMessage),\n}\n\nimpl_from!(key_gen, ProcessorMessage, KeyGen);\nimpl_from!(sign, ProcessorMessage, Sign);\nimpl_from!(coordinator, ProcessorMessage, Coordinator);\nimpl_from!(substrate, ProcessorMessage, Substrate);\n\n// Intent generation code\n\nconst COORDINATOR_UID: u8 = 0;\nconst PROCESSOR_UID: u8 = 1;\n\nconst TYPE_KEY_GEN_UID: u8 = 2;\nconst TYPE_SIGN_UID: u8 = 3;\nconst TYPE_COORDINATOR_UID: u8 = 4;\nconst TYPE_SUBSTRATE_UID: u8 = 5;\n\nimpl CoordinatorMessage {\n  /// The intent for this message, which should be unique across the validator's entire system,\n  /// including all of its processors.\n  ///\n  /// This doesn't use H(msg.serialize()) as it's meant to be unique to intent, not unique to\n  /// values. While the values should be consistent per intent, that assumption isn't required\n  /// here.\n  pub fn intent(&self) -> Vec<u8> {\n    match self {\n      CoordinatorMessage::KeyGen(msg) => {\n        // Unique since key gen ID embeds the session and attempt\n        let (sub, id) = match msg {\n          key_gen::CoordinatorMessage::GenerateKey { id, .. } => (0, id),\n          key_gen::CoordinatorMessage::Commitments { id, .. } => (1, id),\n          key_gen::CoordinatorMessage::Shares { id, .. } => (2, id),\n          key_gen::CoordinatorMessage::VerifyBlame { id, .. } => (3, id),\n        };\n\n        let mut res = vec![COORDINATOR_UID, TYPE_KEY_GEN_UID, sub];\n        res.extend(&id.encode());\n        res\n      }\n      CoordinatorMessage::Sign(msg) => {\n        let (sub, id) = match msg {\n          // Unique since SignId includes a hash of the network, and specific transaction info\n          sign::CoordinatorMessage::Preprocesses { id, .. } => (0, id.encode()),\n          sign::CoordinatorMessage::Shares { id, .. } => (1, id.encode()),\n          sign::CoordinatorMessage::Reattempt { id } => (2, id.encode()),\n          // The coordinator should report all reported completions to the processor\n          // Accordingly, the intent is a combination of plan ID and actual TX\n          // While transaction alone may suffice, that doesn't cover cross-chain TX ID conflicts,\n          // which are possible\n          sign::CoordinatorMessage::Completed { id, tx, .. } => (3, (id, tx).encode()),\n        };\n\n        let mut res = vec![COORDINATOR_UID, TYPE_SIGN_UID, sub];\n        res.extend(&id);\n        res\n      }\n      CoordinatorMessage::Coordinator(msg) => {\n        let (sub, id) = match msg {\n          // Unique since this ID contains the hash of the block being cosigned\n          coordinator::CoordinatorMessage::CosignSubstrateBlock { id, .. } => (0, id.encode()),\n          // Unique since there's only one of these per session/attempt, and ID is inclusive to\n          // both\n          coordinator::CoordinatorMessage::SignSlashReport { id, .. } => (1, id.encode()),\n          // Unique since this embeds the batch ID (including its network) and attempt\n          coordinator::CoordinatorMessage::SubstratePreprocesses { id, .. } => (2, id.encode()),\n          coordinator::CoordinatorMessage::SubstrateShares { id, .. } => (3, id.encode()),\n          coordinator::CoordinatorMessage::BatchReattempt { id, .. } => (4, id.encode()),\n        };\n\n        let mut res = vec![COORDINATOR_UID, TYPE_COORDINATOR_UID, sub];\n        res.extend(&id);\n        res\n      }\n      CoordinatorMessage::Substrate(msg) => {\n        let (sub, id) = match msg {\n          // Unique since there's only one key pair for a session\n          substrate::CoordinatorMessage::ConfirmKeyPair { session, .. } => (0, session.encode()),\n          substrate::CoordinatorMessage::SubstrateBlock { block, .. } => (1, block.encode()),\n        };\n\n        let mut res = vec![COORDINATOR_UID, TYPE_SUBSTRATE_UID, sub];\n        res.extend(&id);\n        res\n      }\n    }\n  }\n}\n\nimpl ProcessorMessage {\n  /// The intent for this message, which should be unique across the validator's entire system,\n  /// including all of its processors.\n  ///\n  /// This doesn't use H(msg.serialize()) as it's meant to be unique to intent, not unique to\n  /// values. While the values should be consistent per intent, that assumption isn't required\n  /// here.\n  pub fn intent(&self) -> Vec<u8> {\n    match self {\n      ProcessorMessage::KeyGen(msg) => {\n        let (sub, id) = match msg {\n          // Unique since KeyGenId\n          key_gen::ProcessorMessage::Commitments { id, .. } => (0, id),\n          key_gen::ProcessorMessage::InvalidCommitments { id, .. } => (1, id),\n          key_gen::ProcessorMessage::Shares { id, .. } => (2, id),\n          key_gen::ProcessorMessage::InvalidShare { id, .. } => (3, id),\n          key_gen::ProcessorMessage::GeneratedKeyPair { id, .. } => (4, id),\n          key_gen::ProcessorMessage::Blame { id, .. } => (5, id),\n        };\n\n        let mut res = vec![PROCESSOR_UID, TYPE_KEY_GEN_UID, sub];\n        res.extend(&id.encode());\n        res\n      }\n      ProcessorMessage::Sign(msg) => {\n        let (sub, id) = match msg {\n          // Unique since SignId\n          sign::ProcessorMessage::InvalidParticipant { id, .. } => (0, id.encode()),\n          sign::ProcessorMessage::Preprocess { id, .. } => (1, id.encode()),\n          sign::ProcessorMessage::Share { id, .. } => (2, id.encode()),\n          // Unique since a processor will only sign a TX once\n          sign::ProcessorMessage::Completed { id, .. } => (3, id.to_vec()),\n        };\n\n        let mut res = vec![PROCESSOR_UID, TYPE_SIGN_UID, sub];\n        res.extend(&id);\n        res\n      }\n      ProcessorMessage::Coordinator(msg) => {\n        let (sub, id) = match msg {\n          coordinator::ProcessorMessage::SubstrateBlockAck { block, .. } => (0, block.encode()),\n          // Unique since SubstrateSignId\n          coordinator::ProcessorMessage::InvalidParticipant { id, .. } => (1, id.encode()),\n          coordinator::ProcessorMessage::CosignPreprocess { id, .. } => (2, id.encode()),\n          coordinator::ProcessorMessage::BatchPreprocess { id, .. } => (3, id.encode()),\n          coordinator::ProcessorMessage::SlashReportPreprocess { id, .. } => (4, id.encode()),\n          coordinator::ProcessorMessage::SubstrateShare { id, .. } => (5, id.encode()),\n          // Unique since only one instance of a signature matters\n          coordinator::ProcessorMessage::CosignedBlock { block, .. } => (6, block.encode()),\n          coordinator::ProcessorMessage::SignedSlashReport { .. } => (7, vec![]),\n        };\n\n        let mut res = vec![PROCESSOR_UID, TYPE_COORDINATOR_UID, sub];\n        res.extend(&id);\n        res\n      }\n      ProcessorMessage::Substrate(msg) => {\n        let (sub, id) = match msg {\n          // Unique since network and ID binding\n          substrate::ProcessorMessage::Batch { batch } => (0, (batch.network, batch.id).encode()),\n          substrate::ProcessorMessage::SignedBatch { batch, .. } => {\n            (1, (batch.batch.network, batch.batch.id).encode())\n          }\n        };\n\n        let mut res = vec![PROCESSOR_UID, TYPE_SUBSTRATE_UID, sub];\n        res.extend(&id);\n        res\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "processor/src/additional_key.rs",
    "content": "use ciphersuite::Ciphersuite;\n\nuse crate::networks::Network;\n\n// Generate a static additional key for a given chain in a globally consistent manner\n// Doesn't consider the current group key to increase the simplicity of verifying Serai's status\n// Takes an index, k, to support protocols which use multiple secondary keys\n// Presumably a view key\npub fn additional_key<N: Network>(k: u64) -> <N::Curve as Ciphersuite>::F {\n  <N::Curve as Ciphersuite>::hash_to_F(\n    b\"Serai DEX Additional Key\",\n    &[N::ID.as_bytes(), &k.to_le_bytes()].concat(),\n  )\n}\n"
  },
  {
    "path": "processor/src/batch_signer.rs",
    "content": "use core::{marker::PhantomData, fmt};\nuse std::collections::HashMap;\n\nuse rand_core::OsRng;\n\nuse frost::{\n  curve::Ristretto,\n  ThresholdKeys, FrostError,\n  algorithm::Algorithm,\n  sign::{\n    Writable, PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine,\n    AlgorithmSignMachine, AlgorithmSignatureMachine,\n  },\n};\nuse frost_schnorrkel::Schnorrkel;\n\nuse log::{info, debug, warn};\n\nuse serai_client::{\n  primitives::{ExternalNetworkId, BlockHash},\n  in_instructions::primitives::{Batch, SignedBatch, batch_message},\n  validator_sets::primitives::Session,\n};\n\nuse messages::coordinator::*;\nuse crate::{Get, DbTxn, Db, create_db};\n\ncreate_db!(\n  BatchSignerDb {\n    CompletedDb: (id: u32) -> (),\n    AttemptDb: (id: u32, attempt: u32) -> (),\n    BatchDb: (block: BlockHash) -> SignedBatch\n  }\n);\n\ntype Preprocess = <AlgorithmMachine<Ristretto, Schnorrkel> as PreprocessMachine>::Preprocess;\ntype SignatureShare = <AlgorithmSignMachine<Ristretto, Schnorrkel> as SignMachine<\n  <Schnorrkel as Algorithm<Ristretto>>::Signature,\n>>::SignatureShare;\n\npub struct BatchSigner<D: Db> {\n  db: PhantomData<D>,\n\n  network: ExternalNetworkId,\n  session: Session,\n  keys: Vec<ThresholdKeys<Ristretto>>,\n\n  signable: HashMap<u32, Batch>,\n  attempt: HashMap<u32, u32>,\n  #[allow(clippy::type_complexity)]\n  preprocessing: HashMap<u32, (Vec<AlgorithmSignMachine<Ristretto, Schnorrkel>>, Vec<Preprocess>)>,\n  #[allow(clippy::type_complexity)]\n  signing: HashMap<u32, (AlgorithmSignatureMachine<Ristretto, Schnorrkel>, Vec<SignatureShare>)>,\n}\n\nimpl<D: Db> fmt::Debug for BatchSigner<D> {\n  fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {\n    fmt\n      .debug_struct(\"BatchSigner\")\n      .field(\"signable\", &self.signable)\n      .field(\"attempt\", &self.attempt)\n      .finish_non_exhaustive()\n  }\n}\n\nimpl<D: Db> BatchSigner<D> {\n  pub fn new(\n    network: ExternalNetworkId,\n    session: Session,\n    keys: Vec<ThresholdKeys<Ristretto>>,\n  ) -> BatchSigner<D> {\n    assert!(!keys.is_empty());\n    BatchSigner {\n      db: PhantomData,\n\n      network,\n      session,\n      keys,\n\n      signable: HashMap::new(),\n      attempt: HashMap::new(),\n      preprocessing: HashMap::new(),\n      signing: HashMap::new(),\n    }\n  }\n\n  fn verify_id(&self, id: &SubstrateSignId) -> Result<(Session, u32, u32), ()> {\n    let SubstrateSignId { session, id, attempt } = id;\n    let SubstrateSignableId::Batch(id) = id else { panic!(\"BatchSigner handed non-Batch\") };\n\n    assert_eq!(session, &self.session);\n\n    // Check the attempt lines up\n    match self.attempt.get(id) {\n      // If we don't have an attempt logged, it's because the coordinator is faulty OR because we\n      // rebooted OR we detected the signed batch on chain\n      // The latter is the expected flow for batches not actively being participated in\n      None => {\n        warn!(\"not attempting batch {id} #{attempt}\");\n        Err(())?;\n      }\n      Some(our_attempt) => {\n        if attempt != our_attempt {\n          warn!(\"sent signing data for batch {id} #{attempt} yet we have attempt #{our_attempt}\");\n          Err(())?;\n        }\n      }\n    }\n\n    Ok((*session, *id, *attempt))\n  }\n\n  #[must_use]\n  fn attempt(\n    &mut self,\n    txn: &mut D::Transaction<'_>,\n    id: u32,\n    attempt: u32,\n  ) -> Option<ProcessorMessage> {\n    // See above commentary for why this doesn't emit SignedBatch\n    if CompletedDb::get(txn, id).is_some() {\n      return None;\n    }\n\n    // Check if we're already working on this attempt\n    if let Some(curr_attempt) = self.attempt.get(&id) {\n      if curr_attempt >= &attempt {\n        warn!(\"told to attempt {id} #{attempt} yet we're already working on {curr_attempt}\");\n        return None;\n      }\n    }\n\n    // Start this attempt\n    let block = if let Some(batch) = self.signable.get(&id) {\n      batch.block\n    } else {\n      warn!(\"told to attempt signing a batch we aren't currently signing for\");\n      return None;\n    };\n\n    // Delete any existing machines\n    self.preprocessing.remove(&id);\n    self.signing.remove(&id);\n\n    // Update the attempt number\n    self.attempt.insert(id, attempt);\n\n    info!(\"signing batch {id} #{attempt}\");\n\n    // If we reboot mid-sign, the current design has us abort all signs and wait for latter\n    // attempts/new signing protocols\n    // This is distinct from the DKG which will continue DKG sessions, even on reboot\n    // This is because signing is tolerant of failures of up to 1/3rd of the group\n    // The DKG requires 100% participation\n    // While we could apply similar tricks as the DKG (a seeded RNG) to achieve support for\n    // reboots, it's not worth the complexity when messing up here leaks our secret share\n    //\n    // Despite this, on reboot, we'll get told of active signing items, and may be in this\n    // branch again for something we've already attempted\n    //\n    // Only run if this hasn't already been attempted\n    // TODO: This isn't complete as this txn may not be committed with the expected timing\n    if AttemptDb::get(txn, id, attempt).is_some() {\n      warn!(\n        \"already attempted batch {id}, attempt #{attempt}. this is an error if we didn't reboot\"\n      );\n      return None;\n    }\n    AttemptDb::set(txn, id, attempt, &());\n\n    let mut machines = vec![];\n    let mut preprocesses = vec![];\n    let mut serialized_preprocesses = vec![];\n    for keys in &self.keys {\n      // b\"substrate\" is a literal from sp-core\n      let machine = AlgorithmMachine::new(Schnorrkel::new(b\"substrate\"), keys.clone());\n\n      let (machine, preprocess) = machine.preprocess(&mut OsRng);\n      machines.push(machine);\n      serialized_preprocesses.push(preprocess.serialize().try_into().unwrap());\n      preprocesses.push(preprocess);\n    }\n    self.preprocessing.insert(id, (machines, preprocesses));\n\n    let id = SubstrateSignId { session: self.session, id: SubstrateSignableId::Batch(id), attempt };\n\n    // Broadcast our preprocesses\n    Some(ProcessorMessage::BatchPreprocess { id, block, preprocesses: serialized_preprocesses })\n  }\n\n  #[must_use]\n  pub fn sign(&mut self, txn: &mut D::Transaction<'_>, batch: Batch) -> Option<ProcessorMessage> {\n    debug_assert_eq!(self.network, batch.network);\n    let id = batch.id;\n    if CompletedDb::get(txn, id).is_some() {\n      debug!(\"Sign batch order for ID we've already completed signing\");\n      // See batch_signed for commentary on why this simply returns\n      return None;\n    }\n\n    self.signable.insert(id, batch);\n    self.attempt(txn, id, 0)\n  }\n\n  #[must_use]\n  pub fn handle(\n    &mut self,\n    txn: &mut D::Transaction<'_>,\n    msg: CoordinatorMessage,\n  ) -> Option<messages::ProcessorMessage> {\n    match msg {\n      CoordinatorMessage::CosignSubstrateBlock { .. } => {\n        panic!(\"BatchSigner passed CosignSubstrateBlock\")\n      }\n\n      CoordinatorMessage::SignSlashReport { .. } => {\n        panic!(\"Cosigner passed SignSlashReport\")\n      }\n\n      CoordinatorMessage::SubstratePreprocesses { id, preprocesses } => {\n        let (session, id, attempt) = self.verify_id(&id).ok()?;\n\n        let substrate_sign_id =\n          SubstrateSignId { session, id: SubstrateSignableId::Batch(id), attempt };\n\n        let (machines, our_preprocesses) = match self.preprocessing.remove(&id) {\n          // Either rebooted or RPC error, or some invariant\n          None => {\n            warn!(\"not preprocessing for {id}. this is an error if we didn't reboot\");\n            return None;\n          }\n          Some(preprocess) => preprocess,\n        };\n\n        let mut parsed = HashMap::new();\n        for l in {\n          let mut keys = preprocesses.keys().copied().collect::<Vec<_>>();\n          keys.sort();\n          keys\n        } {\n          let mut preprocess_ref = preprocesses.get(&l).unwrap().as_slice();\n          let Ok(res) = machines[0].read_preprocess(&mut preprocess_ref) else {\n            return Some(\n              (ProcessorMessage::InvalidParticipant { id: substrate_sign_id, participant: l })\n                .into(),\n            );\n          };\n          if !preprocess_ref.is_empty() {\n            return Some(\n              (ProcessorMessage::InvalidParticipant { id: substrate_sign_id, participant: l })\n                .into(),\n            );\n          }\n          parsed.insert(l, res);\n        }\n        let preprocesses = parsed;\n\n        // Only keep a single machine as we only need one to get the signature\n        let mut signature_machine = None;\n        let mut shares = vec![];\n        let mut serialized_shares = vec![];\n        for (m, machine) in machines.into_iter().enumerate() {\n          let mut preprocesses = preprocesses.clone();\n          for (i, our_preprocess) in our_preprocesses.clone().into_iter().enumerate() {\n            if i != m {\n              assert!(preprocesses.insert(self.keys[i].params().i(), our_preprocess).is_none());\n            }\n          }\n\n          let (machine, share) = match machine\n            .sign(preprocesses, &batch_message(&self.signable[&id]))\n          {\n            Ok(res) => res,\n            Err(e) => match e {\n              FrostError::InternalError(_) |\n              FrostError::InvalidParticipant(_, _) |\n              FrostError::InvalidSigningSet(_) |\n              FrostError::InvalidParticipantQuantity(_, _) |\n              FrostError::DuplicatedParticipant(_) |\n              FrostError::MissingParticipant(_) => unreachable!(),\n\n              FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => {\n                return Some(\n                  (ProcessorMessage::InvalidParticipant { id: substrate_sign_id, participant: l })\n                    .into(),\n                )\n              }\n            },\n          };\n          if m == 0 {\n            signature_machine = Some(machine);\n          }\n\n          let mut share_bytes = [0; 32];\n          share_bytes.copy_from_slice(&share.serialize());\n          serialized_shares.push(share_bytes);\n\n          shares.push(share);\n        }\n        self.signing.insert(id, (signature_machine.unwrap(), shares));\n\n        // Broadcast our shares\n        Some(\n          (ProcessorMessage::SubstrateShare { id: substrate_sign_id, shares: serialized_shares })\n            .into(),\n        )\n      }\n\n      CoordinatorMessage::SubstrateShares { id, shares } => {\n        let (session, id, attempt) = self.verify_id(&id).ok()?;\n\n        let substrate_sign_id =\n          SubstrateSignId { session, id: SubstrateSignableId::Batch(id), attempt };\n\n        let (machine, our_shares) = match self.signing.remove(&id) {\n          // Rebooted, RPC error, or some invariant\n          None => {\n            // If preprocessing has this ID, it means we were never sent the preprocess by the\n            // coordinator\n            if self.preprocessing.contains_key(&id) {\n              panic!(\"never preprocessed yet signing?\");\n            }\n\n            warn!(\"not preprocessing for {id}. this is an error if we didn't reboot\");\n            return None;\n          }\n          Some(signing) => signing,\n        };\n\n        let mut parsed = HashMap::new();\n        for l in {\n          let mut keys = shares.keys().copied().collect::<Vec<_>>();\n          keys.sort();\n          keys\n        } {\n          let mut share_ref = shares.get(&l).unwrap().as_slice();\n          let Ok(res) = machine.read_share(&mut share_ref) else {\n            return Some(\n              (ProcessorMessage::InvalidParticipant { id: substrate_sign_id, participant: l })\n                .into(),\n            );\n          };\n          if !share_ref.is_empty() {\n            return Some(\n              (ProcessorMessage::InvalidParticipant { id: substrate_sign_id, participant: l })\n                .into(),\n            );\n          }\n          parsed.insert(l, res);\n        }\n        let mut shares = parsed;\n\n        for (i, our_share) in our_shares.into_iter().enumerate().skip(1) {\n          assert!(shares.insert(self.keys[i].params().i(), our_share).is_none());\n        }\n\n        let sig = match machine.complete(shares) {\n          Ok(res) => res,\n          Err(e) => match e {\n            FrostError::InternalError(_) |\n            FrostError::InvalidParticipant(_, _) |\n            FrostError::InvalidSigningSet(_) |\n            FrostError::InvalidParticipantQuantity(_, _) |\n            FrostError::DuplicatedParticipant(_) |\n            FrostError::MissingParticipant(_) => unreachable!(),\n\n            FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => {\n              return Some(\n                (ProcessorMessage::InvalidParticipant { id: substrate_sign_id, participant: l })\n                  .into(),\n              )\n            }\n          },\n        };\n\n        info!(\"signed batch {id} with attempt #{attempt}\");\n\n        let batch =\n          SignedBatch { batch: self.signable.remove(&id).unwrap(), signature: sig.into() };\n\n        // Save the batch in case it's needed for recovery\n        BatchDb::set(txn, batch.batch.block, &batch);\n        CompletedDb::set(txn, id, &());\n\n        // Stop trying to sign for this batch\n        assert!(self.attempt.remove(&id).is_some());\n        assert!(self.preprocessing.remove(&id).is_none());\n        assert!(self.signing.remove(&id).is_none());\n\n        Some((messages::substrate::ProcessorMessage::SignedBatch { batch }).into())\n      }\n\n      CoordinatorMessage::BatchReattempt { id } => {\n        let SubstrateSignableId::Batch(batch_id) = id.id else {\n          panic!(\"BatchReattempt passed non-Batch ID\")\n        };\n        self.attempt(txn, batch_id, id.attempt).map(Into::into)\n      }\n    }\n  }\n\n  pub fn batch_signed(&mut self, txn: &mut D::Transaction<'_>, id: u32) {\n    // Stop trying to sign for this batch\n    CompletedDb::set(txn, id, &());\n\n    self.signable.remove(&id);\n    self.attempt.remove(&id);\n    self.preprocessing.remove(&id);\n    self.signing.remove(&id);\n\n    // This doesn't emit SignedBatch because it doesn't have access to the SignedBatch\n    // This function is expected to only be called once Substrate acknowledges this block,\n    // which means its batch must have been signed\n    // While a successive batch's signing would also cause this block to be acknowledged, Substrate\n    // guarantees a batch's ordered inclusion\n\n    // This also doesn't return any messages since all mutation from the Batch being signed happens\n    // on the substrate::CoordinatorMessage::SubstrateBlock message (which SignedBatch is meant to\n    // end up triggering)\n  }\n}\n"
  },
  {
    "path": "processor/src/coordinator.rs",
    "content": "use messages::{ProcessorMessage, CoordinatorMessage};\n\nuse message_queue::{Service, Metadata, client::MessageQueue};\n\n#[derive(Clone, PartialEq, Eq, Debug)]\npub struct Message {\n  pub id: u64,\n  pub msg: CoordinatorMessage,\n}\n\n#[async_trait::async_trait]\npub trait Coordinator {\n  async fn send(&mut self, msg: impl Send + Into<ProcessorMessage>);\n  async fn recv(&mut self) -> Message;\n  async fn ack(&mut self, msg: Message);\n}\n\n#[async_trait::async_trait]\nimpl Coordinator for MessageQueue {\n  async fn send(&mut self, msg: impl Send + Into<ProcessorMessage>) {\n    let msg: ProcessorMessage = msg.into();\n    let metadata = Metadata { from: self.service, to: Service::Coordinator, intent: msg.intent() };\n    let msg = borsh::to_vec(&msg).unwrap();\n\n    self.queue(metadata, msg).await;\n  }\n\n  async fn recv(&mut self) -> Message {\n    let msg = self.next(Service::Coordinator).await;\n\n    let id = msg.id;\n\n    // Deserialize it into a CoordinatorMessage\n    let msg: CoordinatorMessage =\n      borsh::from_slice(&msg.msg).expect(\"message wasn't a borsh-encoded CoordinatorMessage\");\n\n    return Message { id, msg };\n  }\n\n  async fn ack(&mut self, msg: Message) {\n    MessageQueue::ack(self, Service::Coordinator, msg.id).await\n  }\n}\n"
  },
  {
    "path": "processor/src/cosigner.rs",
    "content": "use core::fmt;\nuse std::collections::HashMap;\n\nuse rand_core::OsRng;\n\nuse frost::{\n  curve::Ristretto,\n  ThresholdKeys, FrostError,\n  algorithm::Algorithm,\n  sign::{\n    Writable, PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine,\n    AlgorithmSignMachine, AlgorithmSignatureMachine,\n  },\n};\nuse frost_schnorrkel::Schnorrkel;\n\nuse log::{info, warn};\n\nuse serai_client::validator_sets::primitives::Session;\n\nuse messages::coordinator::*;\nuse crate::{Get, DbTxn, create_db};\n\ncreate_db! {\n  CosignerDb {\n    Completed: (id: [u8; 32]) -> (),\n    Attempt: (id: [u8; 32], attempt: u32) -> (),\n  }\n}\n\ntype Preprocess = <AlgorithmMachine<Ristretto, Schnorrkel> as PreprocessMachine>::Preprocess;\ntype SignatureShare = <AlgorithmSignMachine<Ristretto, Schnorrkel> as SignMachine<\n  <Schnorrkel as Algorithm<Ristretto>>::Signature,\n>>::SignatureShare;\n\npub struct Cosigner {\n  session: Session,\n  keys: Vec<ThresholdKeys<Ristretto>>,\n\n  block_number: u64,\n  id: [u8; 32],\n  attempt: u32,\n  #[allow(clippy::type_complexity)]\n  preprocessing: Option<(Vec<AlgorithmSignMachine<Ristretto, Schnorrkel>>, Vec<Preprocess>)>,\n  #[allow(clippy::type_complexity)]\n  signing: Option<(AlgorithmSignatureMachine<Ristretto, Schnorrkel>, Vec<SignatureShare>)>,\n}\n\nimpl fmt::Debug for Cosigner {\n  fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {\n    fmt\n      .debug_struct(\"Cosigner\")\n      .field(\"session\", &self.session)\n      .field(\"block_number\", &self.block_number)\n      .field(\"id\", &self.id)\n      .field(\"attempt\", &self.attempt)\n      .field(\"preprocessing\", &self.preprocessing.is_some())\n      .field(\"signing\", &self.signing.is_some())\n      .finish_non_exhaustive()\n  }\n}\n\nimpl Cosigner {\n  pub fn new(\n    txn: &mut impl DbTxn,\n    session: Session,\n    keys: Vec<ThresholdKeys<Ristretto>>,\n    block_number: u64,\n    id: [u8; 32],\n    attempt: u32,\n  ) -> Option<(Cosigner, ProcessorMessage)> {\n    assert!(!keys.is_empty());\n\n    if Completed::get(txn, id).is_some() {\n      return None;\n    }\n\n    if Attempt::get(txn, id, attempt).is_some() {\n      warn!(\n        \"already attempted cosigning {}, attempt #{}. this is an error if we didn't reboot\",\n        hex::encode(id),\n        attempt,\n      );\n      return None;\n    }\n    Attempt::set(txn, id, attempt, &());\n\n    info!(\"cosigning block {} with attempt #{}\", hex::encode(id), attempt);\n\n    let mut machines = vec![];\n    let mut preprocesses = vec![];\n    let mut serialized_preprocesses = vec![];\n    for keys in &keys {\n      // b\"substrate\" is a literal from sp-core\n      let machine = AlgorithmMachine::new(Schnorrkel::new(b\"substrate\"), keys.clone());\n\n      let (machine, preprocess) = machine.preprocess(&mut OsRng);\n      machines.push(machine);\n      serialized_preprocesses.push(preprocess.serialize().try_into().unwrap());\n      preprocesses.push(preprocess);\n    }\n    let preprocessing = Some((machines, preprocesses));\n\n    let substrate_sign_id =\n      SubstrateSignId { session, id: SubstrateSignableId::CosigningSubstrateBlock(id), attempt };\n\n    Some((\n      Cosigner { session, keys, block_number, id, attempt, preprocessing, signing: None },\n      ProcessorMessage::CosignPreprocess {\n        id: substrate_sign_id,\n        preprocesses: serialized_preprocesses,\n      },\n    ))\n  }\n\n  #[must_use]\n  pub fn handle(\n    &mut self,\n    txn: &mut impl DbTxn,\n    msg: CoordinatorMessage,\n  ) -> Option<ProcessorMessage> {\n    match msg {\n      CoordinatorMessage::CosignSubstrateBlock { .. } => {\n        panic!(\"Cosigner passed CosignSubstrateBlock\")\n      }\n\n      CoordinatorMessage::SignSlashReport { .. } => {\n        panic!(\"Cosigner passed SignSlashReport\")\n      }\n\n      CoordinatorMessage::SubstratePreprocesses { id, preprocesses } => {\n        assert_eq!(id.session, self.session);\n        let SubstrateSignableId::CosigningSubstrateBlock(block) = id.id else {\n          panic!(\"cosigner passed Batch\")\n        };\n        if block != self.id {\n          panic!(\"given preprocesses for a distinct block than cosigner is signing\")\n        }\n        if id.attempt != self.attempt {\n          panic!(\"given preprocesses for a distinct attempt than cosigner is signing\")\n        }\n\n        let (machines, our_preprocesses) = match self.preprocessing.take() {\n          // Either rebooted or RPC error, or some invariant\n          None => {\n            warn!(\n              \"not preprocessing for {}. this is an error if we didn't reboot\",\n              hex::encode(block),\n            );\n            return None;\n          }\n          Some(preprocess) => preprocess,\n        };\n\n        let mut parsed = HashMap::new();\n        for l in {\n          let mut keys = preprocesses.keys().copied().collect::<Vec<_>>();\n          keys.sort();\n          keys\n        } {\n          let mut preprocess_ref = preprocesses.get(&l).unwrap().as_slice();\n          let Ok(res) = machines[0].read_preprocess(&mut preprocess_ref) else {\n            return Some(ProcessorMessage::InvalidParticipant { id, participant: l });\n          };\n          if !preprocess_ref.is_empty() {\n            return Some(ProcessorMessage::InvalidParticipant { id, participant: l });\n          }\n          parsed.insert(l, res);\n        }\n        let preprocesses = parsed;\n\n        // Only keep a single machine as we only need one to get the signature\n        let mut signature_machine = None;\n        let mut shares = vec![];\n        let mut serialized_shares = vec![];\n        for (m, machine) in machines.into_iter().enumerate() {\n          let mut preprocesses = preprocesses.clone();\n          for (i, our_preprocess) in our_preprocesses.clone().into_iter().enumerate() {\n            if i != m {\n              assert!(preprocesses.insert(self.keys[i].params().i(), our_preprocess).is_none());\n            }\n          }\n\n          let (machine, share) =\n            match machine.sign(preprocesses, &cosign_block_msg(self.block_number, self.id)) {\n              Ok(res) => res,\n              Err(e) => match e {\n                FrostError::InternalError(_) |\n                FrostError::InvalidParticipant(_, _) |\n                FrostError::InvalidSigningSet(_) |\n                FrostError::InvalidParticipantQuantity(_, _) |\n                FrostError::DuplicatedParticipant(_) |\n                FrostError::MissingParticipant(_) => unreachable!(),\n\n                FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => {\n                  return Some(ProcessorMessage::InvalidParticipant { id, participant: l })\n                }\n              },\n            };\n          if m == 0 {\n            signature_machine = Some(machine);\n          }\n\n          let mut share_bytes = [0; 32];\n          share_bytes.copy_from_slice(&share.serialize());\n          serialized_shares.push(share_bytes);\n\n          shares.push(share);\n        }\n        self.signing = Some((signature_machine.unwrap(), shares));\n\n        // Broadcast our shares\n        Some(ProcessorMessage::SubstrateShare { id, shares: serialized_shares })\n      }\n\n      CoordinatorMessage::SubstrateShares { id, shares } => {\n        assert_eq!(id.session, self.session);\n        let SubstrateSignableId::CosigningSubstrateBlock(block) = id.id else {\n          panic!(\"cosigner passed Batch\")\n        };\n        if block != self.id {\n          panic!(\"given preprocesses for a distinct block than cosigner is signing\")\n        }\n        if id.attempt != self.attempt {\n          panic!(\"given preprocesses for a distinct attempt than cosigner is signing\")\n        }\n\n        let (machine, our_shares) = match self.signing.take() {\n          // Rebooted, RPC error, or some invariant\n          None => {\n            // If preprocessing has this ID, it means we were never sent the preprocess by the\n            // coordinator\n            if self.preprocessing.is_some() {\n              panic!(\"never preprocessed yet signing?\");\n            }\n\n            warn!(\n              \"not preprocessing for {}. this is an error if we didn't reboot\",\n              hex::encode(block)\n            );\n            return None;\n          }\n          Some(signing) => signing,\n        };\n\n        let mut parsed = HashMap::new();\n        for l in {\n          let mut keys = shares.keys().copied().collect::<Vec<_>>();\n          keys.sort();\n          keys\n        } {\n          let mut share_ref = shares.get(&l).unwrap().as_slice();\n          let Ok(res) = machine.read_share(&mut share_ref) else {\n            return Some(ProcessorMessage::InvalidParticipant { id, participant: l });\n          };\n          if !share_ref.is_empty() {\n            return Some(ProcessorMessage::InvalidParticipant { id, participant: l });\n          }\n          parsed.insert(l, res);\n        }\n        let mut shares = parsed;\n\n        for (i, our_share) in our_shares.into_iter().enumerate().skip(1) {\n          assert!(shares.insert(self.keys[i].params().i(), our_share).is_none());\n        }\n\n        let sig = match machine.complete(shares) {\n          Ok(res) => res,\n          Err(e) => match e {\n            FrostError::InternalError(_) |\n            FrostError::InvalidParticipant(_, _) |\n            FrostError::InvalidSigningSet(_) |\n            FrostError::InvalidParticipantQuantity(_, _) |\n            FrostError::DuplicatedParticipant(_) |\n            FrostError::MissingParticipant(_) => unreachable!(),\n\n            FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => {\n              return Some(ProcessorMessage::InvalidParticipant { id, participant: l })\n            }\n          },\n        };\n\n        info!(\"cosigned {} with attempt #{}\", hex::encode(block), id.attempt);\n\n        Completed::set(txn, block, &());\n\n        Some(ProcessorMessage::CosignedBlock {\n          block_number: self.block_number,\n          block,\n          signature: sig.to_bytes().to_vec(),\n        })\n      }\n      CoordinatorMessage::BatchReattempt { .. } => panic!(\"BatchReattempt passed to Cosigner\"),\n    }\n  }\n}\n"
  },
  {
    "path": "processor/src/db.rs",
    "content": "use std::io::Read;\n\nuse scale::{Encode, Decode};\nuse serai_client::validator_sets::primitives::{Session, KeyPair};\n\npub use serai_db::*;\n\nuse crate::networks::{Block, Network};\n\ncreate_db!(\n  MainDb {\n    HandledMessageDb: (id: u64) -> (),\n    PendingActivationsDb: () -> Vec<u8>\n  }\n);\n\nimpl PendingActivationsDb {\n  pub fn pending_activation<N: Network>(\n    getter: &impl Get,\n  ) -> Option<(<N::Block as Block<N>>::Id, Session, KeyPair)> {\n    if let Some(bytes) = Self::get(getter) {\n      if !bytes.is_empty() {\n        let mut slice = bytes.as_slice();\n        let (session, key_pair) = <(Session, KeyPair)>::decode(&mut slice).unwrap();\n        let mut block_before_queue_block = <N::Block as Block<N>>::Id::default();\n        slice.read_exact(block_before_queue_block.as_mut()).unwrap();\n        assert!(slice.is_empty());\n        return Some((block_before_queue_block, session, key_pair));\n      }\n    }\n    None\n  }\n  pub fn set_pending_activation<N: Network>(\n    txn: &mut impl DbTxn,\n    block_before_queue_block: &<N::Block as Block<N>>::Id,\n    session: Session,\n    key_pair: KeyPair,\n  ) {\n    let mut buf = (session, key_pair).encode();\n    buf.extend(block_before_queue_block.as_ref());\n    Self::set(txn, &buf);\n  }\n}\n"
  },
  {
    "path": "processor/src/key_gen.rs",
    "content": "use std::collections::HashMap;\n\nuse zeroize::Zeroizing;\n\nuse rand_core::SeedableRng;\nuse rand_chacha::ChaCha20Rng;\n\nuse transcript::{Transcript, RecommendedTranscript};\nuse ciphersuite::group::GroupEncoding;\nuse dkg_pedpop::*;\nuse frost::{\n  curve::{Ciphersuite, Ristretto},\n  dkg::{Participant, ThresholdParams, ThresholdKeys},\n};\n\nuse log::info;\n\nuse serai_client::validator_sets::primitives::{Session, KeyPair};\nuse messages::key_gen::*;\n\nuse crate::{Get, DbTxn, Db, create_db, networks::Network};\n\n#[derive(Debug)]\npub struct KeyConfirmed<C: Ciphersuite> {\n  pub substrate_keys: Vec<ThresholdKeys<Ristretto>>,\n  pub network_keys: Vec<ThresholdKeys<C>>,\n}\n\ncreate_db!(\n  KeyGenDb {\n    ParamsDb: (session: &Session, attempt: u32) -> (ThresholdParams, u16),\n    // Not scoped to the set since that'd have latter attempts overwrite former\n    // A former attempt may become the finalized attempt, even if it doesn't in a timely manner\n    // Overwriting its commitments would be accordingly poor\n    CommitmentsDb: (key: &KeyGenId) -> HashMap<Participant, Vec<u8>>,\n    GeneratedKeysDb: (session: &Session, substrate_key: &[u8; 32], network_key: &[u8]) -> Vec<u8>,\n    // These do assume a key is only used once across sets, which holds true so long as a single\n    // participant is honest in their execution of the protocol\n    KeysDb: (network_key: &[u8]) -> Vec<u8>,\n    SessionDb: (network_key: &[u8]) -> Session,\n    NetworkKeyDb: (session: Session) -> Vec<u8>,\n  }\n);\n\nimpl GeneratedKeysDb {\n  #[allow(clippy::type_complexity)]\n  fn read_keys<N: Network>(\n    getter: &impl Get,\n    key: &[u8],\n  ) -> Option<(Vec<u8>, (Vec<ThresholdKeys<Ristretto>>, Vec<ThresholdKeys<N::Curve>>))> {\n    let keys_vec = getter.get(key)?;\n    let mut keys_ref: &[u8] = keys_vec.as_ref();\n\n    let mut substrate_keys = vec![];\n    let mut network_keys = vec![];\n    while !keys_ref.is_empty() {\n      substrate_keys.push(ThresholdKeys::read(&mut keys_ref).unwrap());\n      let mut these_network_keys = ThresholdKeys::read(&mut keys_ref).unwrap();\n      N::tweak_keys(&mut these_network_keys);\n      network_keys.push(these_network_keys);\n    }\n    Some((keys_vec, (substrate_keys, network_keys)))\n  }\n\n  fn save_keys<N: Network>(\n    txn: &mut impl DbTxn,\n    id: &KeyGenId,\n    substrate_keys: &[ThresholdKeys<Ristretto>],\n    network_keys: &[ThresholdKeys<N::Curve>],\n  ) {\n    let mut keys = Zeroizing::new(vec![]);\n    for (substrate_keys, network_keys) in substrate_keys.iter().zip(network_keys) {\n      keys.extend(substrate_keys.serialize().as_slice());\n      keys.extend(network_keys.serialize().as_slice());\n    }\n    txn.put(\n      Self::key(\n        &id.session,\n        &substrate_keys[0].group_key().to_bytes(),\n        network_keys[0].group_key().to_bytes().as_ref(),\n      ),\n      keys,\n    );\n  }\n}\n\nimpl KeysDb {\n  fn confirm_keys<N: Network>(\n    txn: &mut impl DbTxn,\n    session: Session,\n    key_pair: &KeyPair,\n  ) -> (Vec<ThresholdKeys<Ristretto>>, Vec<ThresholdKeys<N::Curve>>) {\n    let (keys_vec, keys) = GeneratedKeysDb::read_keys::<N>(\n      txn,\n      &GeneratedKeysDb::key(&session, &key_pair.0 .0, key_pair.1.as_ref()),\n    )\n    .unwrap();\n    assert_eq!(key_pair.0 .0, keys.0[0].group_key().to_bytes());\n    assert_eq!(\n      {\n        let network_key: &[u8] = key_pair.1.as_ref();\n        network_key\n      },\n      keys.1[0].group_key().to_bytes().as_ref(),\n    );\n    txn.put(Self::key(key_pair.1.as_ref()), keys_vec);\n    NetworkKeyDb::set(txn, session, &key_pair.1.clone().into_inner());\n    SessionDb::set(txn, key_pair.1.as_ref(), &session);\n    keys\n  }\n\n  #[allow(clippy::type_complexity)]\n  fn keys<N: Network>(\n    getter: &impl Get,\n    network_key: &<N::Curve as Ciphersuite>::G,\n  ) -> Option<(Session, (Vec<ThresholdKeys<Ristretto>>, Vec<ThresholdKeys<N::Curve>>))> {\n    let res =\n      GeneratedKeysDb::read_keys::<N>(getter, &Self::key(network_key.to_bytes().as_ref()))?.1;\n    assert_eq!(&res.1[0].group_key(), network_key);\n    Some((SessionDb::get(getter, network_key.to_bytes().as_ref()).unwrap(), res))\n  }\n\n  pub fn substrate_keys_by_session<N: Network>(\n    getter: &impl Get,\n    session: Session,\n  ) -> Option<Vec<ThresholdKeys<Ristretto>>> {\n    let network_key = NetworkKeyDb::get(getter, session)?;\n    Some(GeneratedKeysDb::read_keys::<N>(getter, &Self::key(&network_key))?.1 .0)\n  }\n}\n\ntype SecretShareMachines<N> =\n  Vec<(SecretShareMachine<Ristretto>, SecretShareMachine<<N as Network>::Curve>)>;\ntype KeyMachines<N> = Vec<(KeyMachine<Ristretto>, KeyMachine<<N as Network>::Curve>)>;\n\n#[derive(Debug)]\npub struct KeyGen<N: Network, D: Db> {\n  db: D,\n  entropy: Zeroizing<[u8; 32]>,\n\n  active_commit: HashMap<Session, (SecretShareMachines<N>, Vec<Vec<u8>>)>,\n  #[allow(clippy::type_complexity)]\n  active_share: HashMap<Session, (KeyMachines<N>, Vec<HashMap<Participant, Vec<u8>>>)>,\n}\n\nimpl<N: Network, D: Db> KeyGen<N, D> {\n  #[allow(clippy::new_ret_no_self)]\n  pub fn new(db: D, entropy: Zeroizing<[u8; 32]>) -> KeyGen<N, D> {\n    KeyGen { db, entropy, active_commit: HashMap::new(), active_share: HashMap::new() }\n  }\n\n  pub fn in_set(&self, session: &Session) -> bool {\n    // We determine if we're in set using if we have the parameters for a session's key generation\n    // The usage of 0 for the attempt is valid so long as we aren't malicious and accordingly\n    // aren't fatally slashed\n    // TODO: Revisit once we do DKG removals for being offline\n    ParamsDb::get(&self.db, session, 0).is_some()\n  }\n\n  #[allow(clippy::type_complexity)]\n  pub fn keys(\n    &self,\n    key: &<N::Curve as Ciphersuite>::G,\n  ) -> Option<(Session, (Vec<ThresholdKeys<Ristretto>>, Vec<ThresholdKeys<N::Curve>>))> {\n    // This is safe, despite not having a txn, since it's a static value\n    // It doesn't change over time/in relation to other operations\n    KeysDb::keys::<N>(&self.db, key)\n  }\n\n  pub fn substrate_keys_by_session(\n    &self,\n    session: Session,\n  ) -> Option<Vec<ThresholdKeys<Ristretto>>> {\n    KeysDb::substrate_keys_by_session::<N>(&self.db, session)\n  }\n\n  pub fn handle(\n    &mut self,\n    txn: &mut D::Transaction<'_>,\n    msg: CoordinatorMessage,\n  ) -> ProcessorMessage {\n    const SUBSTRATE_KEY_CONTEXT: &str = \"substrate\";\n    const NETWORK_KEY_CONTEXT: &str = \"network\";\n    let context = |id: &KeyGenId, key| -> [u8; 32] {\n      // TODO2: Also embed the chain ID/genesis block\n      <blake2::Blake2s256 as blake2::digest::Digest>::digest(\n        format!(\n          \"Serai Key Gen. Session: {:?}, Network: {:?}, Attempt: {}, Key: {}\",\n          id.session,\n          N::NETWORK,\n          id.attempt,\n          key,\n        )\n        .as_bytes(),\n      )\n      .into()\n    };\n\n    let rng = |label, id: KeyGenId| {\n      let mut transcript = RecommendedTranscript::new(label);\n      transcript.append_message(b\"entropy\", &self.entropy);\n      transcript.append_message(b\"context\", context(&id, \"rng\"));\n      ChaCha20Rng::from_seed(transcript.rng_seed(b\"rng\"))\n    };\n    let coefficients_rng = |id| rng(b\"Key Gen Coefficients\", id);\n    let secret_shares_rng = |id| rng(b\"Key Gen Secret Shares\", id);\n    let share_rng = |id| rng(b\"Key Gen Share\", id);\n\n    let key_gen_machines = |id, params: ThresholdParams, shares| {\n      let mut rng = coefficients_rng(id);\n      let mut machines = vec![];\n      let mut commitments = vec![];\n      for s in 0 .. shares {\n        let params = ThresholdParams::new(\n          params.t(),\n          params.n(),\n          Participant::new(u16::from(params.i()) + s).unwrap(),\n        )\n        .unwrap();\n        let substrate = KeyGenMachine::new(params, context(&id, SUBSTRATE_KEY_CONTEXT))\n          .generate_coefficients(&mut rng);\n        let network = KeyGenMachine::new(params, context(&id, NETWORK_KEY_CONTEXT))\n          .generate_coefficients(&mut rng);\n        machines.push((substrate.0, network.0));\n        let mut serialized = vec![];\n        substrate.1.write(&mut serialized).unwrap();\n        network.1.write(&mut serialized).unwrap();\n        commitments.push(serialized);\n      }\n      (machines, commitments)\n    };\n\n    let secret_share_machines = |id,\n                                 params: ThresholdParams,\n                                 machines: SecretShareMachines<N>,\n                                 commitments: HashMap<Participant, Vec<u8>>|\n     -> Result<_, ProcessorMessage> {\n      let mut rng = secret_shares_rng(id);\n\n      #[allow(clippy::type_complexity)]\n      fn handle_machine<C: Ciphersuite>(\n        rng: &mut ChaCha20Rng,\n        id: KeyGenId,\n        machine: SecretShareMachine<C>,\n        commitments: HashMap<Participant, EncryptionKeyMessage<C, Commitments<C>>>,\n      ) -> Result<\n        (KeyMachine<C>, HashMap<Participant, EncryptedMessage<C, SecretShare<C::F>>>),\n        ProcessorMessage,\n      > {\n        match machine.generate_secret_shares(rng, commitments) {\n          Ok(res) => Ok(res),\n          Err(e) => match e {\n            PedPoPError::InvalidCommitments(i) => {\n              Err(ProcessorMessage::InvalidCommitments { id, faulty: i })?\n            }\n            _ => panic!(\"unknown error: {e:?}\"),\n          },\n        }\n      }\n\n      let mut substrate_commitments = HashMap::new();\n      let mut network_commitments = HashMap::new();\n      for i in 1 ..= params.n() {\n        let i = Participant::new(i).unwrap();\n        let mut commitments = commitments[&i].as_slice();\n        substrate_commitments.insert(\n          i,\n          EncryptionKeyMessage::<Ristretto, Commitments<Ristretto>>::read(&mut commitments, params)\n            .map_err(|_| ProcessorMessage::InvalidCommitments { id, faulty: i })?,\n        );\n        network_commitments.insert(\n          i,\n          EncryptionKeyMessage::<N::Curve, Commitments<N::Curve>>::read(&mut commitments, params)\n            .map_err(|_| ProcessorMessage::InvalidCommitments { id, faulty: i })?,\n        );\n        if !commitments.is_empty() {\n          // Malicious Participant included extra bytes in their commitments\n          // (a potential DoS attack)\n          Err(ProcessorMessage::InvalidCommitments { id, faulty: i })?;\n        }\n      }\n\n      let mut key_machines = vec![];\n      let mut shares = vec![];\n      for (m, (substrate_machine, network_machine)) in machines.into_iter().enumerate() {\n        let actual_i = Participant::new(u16::from(params.i()) + u16::try_from(m).unwrap()).unwrap();\n\n        let mut substrate_commitments = substrate_commitments.clone();\n        substrate_commitments.remove(&actual_i);\n        let (substrate_machine, mut substrate_shares) =\n          handle_machine::<Ristretto>(&mut rng, id, substrate_machine, substrate_commitments)?;\n\n        let mut network_commitments = network_commitments.clone();\n        network_commitments.remove(&actual_i);\n        let (network_machine, network_shares) =\n          handle_machine(&mut rng, id, network_machine, network_commitments.clone())?;\n\n        key_machines.push((substrate_machine, network_machine));\n\n        let mut these_shares: HashMap<_, _> =\n          substrate_shares.drain().map(|(i, share)| (i, share.serialize())).collect();\n        for (i, share) in &mut these_shares {\n          share.extend(network_shares[i].serialize());\n        }\n        shares.push(these_shares);\n      }\n      Ok((key_machines, shares))\n    };\n\n    match msg {\n      CoordinatorMessage::GenerateKey { id, params, shares } => {\n        info!(\"Generating new key. ID: {id:?} Params: {params:?} Shares: {shares}\");\n\n        // Remove old attempts\n        if self.active_commit.remove(&id.session).is_none() &&\n          self.active_share.remove(&id.session).is_none()\n        {\n          // If we haven't handled this session before, save the params\n          ParamsDb::set(txn, &id.session, id.attempt, &(params, shares));\n        }\n\n        let (machines, commitments) = key_gen_machines(id, params, shares);\n        self.active_commit.insert(id.session, (machines, commitments.clone()));\n\n        ProcessorMessage::Commitments { id, commitments }\n      }\n\n      CoordinatorMessage::Commitments { id, mut commitments } => {\n        info!(\"Received commitments for {:?}\", id);\n\n        if self.active_share.contains_key(&id.session) {\n          // We should've been told of a new attempt before receiving commitments again\n          // The coordinator is either missing messages or repeating itself\n          // Either way, it's faulty\n          panic!(\"commitments when already handled commitments\");\n        }\n\n        let (params, share_quantity) = ParamsDb::get(txn, &id.session, id.attempt).unwrap();\n\n        // Unwrap the machines, rebuilding them if we didn't have them in our cache\n        // We won't if the processor rebooted\n        // This *may* be inconsistent if we receive a KeyGen for attempt x, then commitments for\n        // attempt y\n        // The coordinator is trusted to be proper in this regard\n        let (prior, our_commitments) = self\n          .active_commit\n          .remove(&id.session)\n          .unwrap_or_else(|| key_gen_machines(id, params, share_quantity));\n\n        for (i, our_commitments) in our_commitments.into_iter().enumerate() {\n          assert!(commitments\n            .insert(\n              Participant::new(u16::from(params.i()) + u16::try_from(i).unwrap()).unwrap(),\n              our_commitments,\n            )\n            .is_none());\n        }\n\n        CommitmentsDb::set(txn, &id, &commitments);\n\n        match secret_share_machines(id, params, prior, commitments) {\n          Ok((machines, shares)) => {\n            self.active_share.insert(id.session, (machines, shares.clone()));\n            ProcessorMessage::Shares { id, shares }\n          }\n          Err(e) => e,\n        }\n      }\n\n      CoordinatorMessage::Shares { id, shares } => {\n        info!(\"Received shares for {:?}\", id);\n\n        let (params, share_quantity) = ParamsDb::get(txn, &id.session, id.attempt).unwrap();\n\n        // Same commentary on inconsistency as above exists\n        let (machines, our_shares) = self.active_share.remove(&id.session).unwrap_or_else(|| {\n          let prior = key_gen_machines(id, params, share_quantity).0;\n          let (machines, shares) =\n            secret_share_machines(id, params, prior, CommitmentsDb::get(txn, &id).unwrap())\n              .expect(\"got Shares for a key gen which faulted\");\n          (machines, shares)\n        });\n\n        let mut rng = share_rng(id);\n\n        fn handle_machine<C: Ciphersuite>(\n          rng: &mut ChaCha20Rng,\n          id: KeyGenId,\n          // These are the params of our first share, not this machine's shares\n          params: ThresholdParams,\n          m: usize,\n          machine: KeyMachine<C>,\n          shares_ref: &mut HashMap<Participant, &[u8]>,\n        ) -> Result<ThresholdKeys<C>, ProcessorMessage> {\n          let params = ThresholdParams::new(\n            params.t(),\n            params.n(),\n            Participant::new(u16::from(params.i()) + u16::try_from(m).unwrap()).unwrap(),\n          )\n          .unwrap();\n\n          // Parse the shares\n          let mut shares = HashMap::new();\n          for i in 1 ..= params.n() {\n            let i = Participant::new(i).unwrap();\n            let Some(share) = shares_ref.get_mut(&i) else { continue };\n            shares.insert(\n              i,\n              EncryptedMessage::<C, SecretShare<C::F>>::read(share, params).map_err(|_| {\n                ProcessorMessage::InvalidShare { id, accuser: params.i(), faulty: i, blame: None }\n              })?,\n            );\n          }\n\n          Ok(\n            (match machine.calculate_share(rng, shares) {\n              Ok(res) => res,\n              Err(e) => match e {\n                PedPoPError::InvalidShare { participant, blame } => {\n                  Err(ProcessorMessage::InvalidShare {\n                    id,\n                    accuser: params.i(),\n                    faulty: participant,\n                    blame: Some(blame.map(|blame| blame.serialize())).flatten(),\n                  })?\n                }\n                _ => panic!(\"unknown error: {e:?}\"),\n              },\n            })\n            .complete(),\n          )\n        }\n\n        let mut substrate_keys = vec![];\n        let mut network_keys = vec![];\n        for (m, machines) in machines.into_iter().enumerate() {\n          let mut shares_ref: HashMap<Participant, &[u8]> =\n            shares[m].iter().map(|(i, shares)| (*i, shares.as_ref())).collect();\n          for (i, our_shares) in our_shares.iter().enumerate() {\n            if m != i {\n              assert!(shares_ref\n                .insert(\n                  Participant::new(u16::from(params.i()) + u16::try_from(i).unwrap()).unwrap(),\n                  our_shares\n                    [&Participant::new(u16::from(params.i()) + u16::try_from(m).unwrap()).unwrap()]\n                    .as_ref(),\n                )\n                .is_none());\n            }\n          }\n\n          let these_substrate_keys =\n            match handle_machine(&mut rng, id, params, m, machines.0, &mut shares_ref) {\n              Ok(keys) => keys,\n              Err(msg) => return msg,\n            };\n          let mut these_network_keys =\n            match handle_machine(&mut rng, id, params, m, machines.1, &mut shares_ref) {\n              Ok(keys) => keys,\n              Err(msg) => return msg,\n            };\n\n          for i in 1 ..= params.n() {\n            let i = Participant::new(i).unwrap();\n            let Some(shares) = shares_ref.get(&i) else { continue };\n            if !shares.is_empty() {\n              return ProcessorMessage::InvalidShare {\n                id,\n                accuser: these_substrate_keys.params().i(),\n                faulty: i,\n                blame: None,\n              };\n            }\n          }\n\n          N::tweak_keys(&mut these_network_keys);\n\n          substrate_keys.push(these_substrate_keys);\n          network_keys.push(these_network_keys);\n        }\n\n        let mut generated_substrate_key = None;\n        let mut generated_network_key = None;\n        for keys in substrate_keys.iter().zip(&network_keys) {\n          if generated_substrate_key.is_none() {\n            generated_substrate_key = Some(keys.0.group_key());\n            generated_network_key = Some(keys.1.group_key());\n          } else {\n            assert_eq!(generated_substrate_key, Some(keys.0.group_key()));\n            assert_eq!(generated_network_key, Some(keys.1.group_key()));\n          }\n        }\n\n        GeneratedKeysDb::save_keys::<N>(txn, &id, &substrate_keys, &network_keys);\n\n        ProcessorMessage::GeneratedKeyPair {\n          id,\n          substrate_key: generated_substrate_key.unwrap().to_bytes(),\n          // TODO: This can be made more efficient since tweaked keys may be a subset of keys\n          network_key: generated_network_key.unwrap().to_bytes().as_ref().to_vec(),\n        }\n      }\n\n      CoordinatorMessage::VerifyBlame { id, accuser, accused, share, blame } => {\n        let params = ParamsDb::get(txn, &id.session, id.attempt).unwrap().0;\n\n        let mut share_ref = share.as_slice();\n        let Ok(substrate_share) = EncryptedMessage::<\n          Ristretto,\n          SecretShare<<Ristretto as Ciphersuite>::F>,\n        >::read(&mut share_ref, params) else {\n          return ProcessorMessage::Blame { id, participant: accused };\n        };\n        let Ok(network_share) = EncryptedMessage::<\n          N::Curve,\n          SecretShare<<N::Curve as Ciphersuite>::F>,\n        >::read(&mut share_ref, params) else {\n          return ProcessorMessage::Blame { id, participant: accused };\n        };\n        if !share_ref.is_empty() {\n          return ProcessorMessage::Blame { id, participant: accused };\n        }\n\n        let mut substrate_commitment_msgs = HashMap::new();\n        let mut network_commitment_msgs = HashMap::new();\n        let commitments = CommitmentsDb::get(txn, &id).unwrap();\n        for (i, commitments) in commitments {\n          let mut commitments = commitments.as_slice();\n          substrate_commitment_msgs\n            .insert(i, EncryptionKeyMessage::<_, _>::read(&mut commitments, params).unwrap());\n          network_commitment_msgs\n            .insert(i, EncryptionKeyMessage::<_, _>::read(&mut commitments, params).unwrap());\n        }\n\n        // There is a mild DoS here where someone with a valid blame bloats it to the maximum size\n        // Given the ambiguity, and limited potential to DoS (this being called means *someone* is\n        // getting fatally slashed) voids the need to ensure blame is minimal\n        let substrate_blame =\n          blame.clone().and_then(|blame| EncryptionKeyProof::read(&mut blame.as_slice()).ok());\n        let network_blame =\n          blame.clone().and_then(|blame| EncryptionKeyProof::read(&mut blame.as_slice()).ok());\n\n        let substrate_blame = AdditionalBlameMachine::new(\n          context(&id, SUBSTRATE_KEY_CONTEXT),\n          params.n(),\n          substrate_commitment_msgs,\n        )\n        .unwrap()\n        .blame(accuser, accused, substrate_share, substrate_blame);\n        let network_blame = AdditionalBlameMachine::new(\n          context(&id, NETWORK_KEY_CONTEXT),\n          params.n(),\n          network_commitment_msgs,\n        )\n        .unwrap()\n        .blame(accuser, accused, network_share, network_blame);\n\n        // If the accused was blamed for either, mark them as at fault\n        if (substrate_blame == accused) || (network_blame == accused) {\n          return ProcessorMessage::Blame { id, participant: accused };\n        }\n\n        ProcessorMessage::Blame { id, participant: accuser }\n      }\n    }\n  }\n\n  // This should only be called if we're participating, hence taking our instance\n  #[allow(clippy::unused_self)]\n  pub fn confirm(\n    &mut self,\n    txn: &mut D::Transaction<'_>,\n    session: Session,\n    key_pair: &KeyPair,\n  ) -> KeyConfirmed<N::Curve> {\n    info!(\n      \"Confirmed key pair {} {} for {:?}\",\n      hex::encode(key_pair.0),\n      hex::encode(&key_pair.1),\n      session,\n    );\n\n    let (substrate_keys, network_keys) = KeysDb::confirm_keys::<N>(txn, session, key_pair);\n\n    KeyConfirmed { substrate_keys, network_keys }\n  }\n}\n"
  },
  {
    "path": "processor/src/lib.rs",
    "content": "#![allow(dead_code)]\n\nmod plan;\npub use plan::*;\n\nmod db;\npub(crate) use db::*;\n\nmod key_gen;\n\npub mod networks;\npub(crate) mod multisigs;\n\nmod additional_key;\npub use additional_key::additional_key;\n"
  },
  {
    "path": "processor/src/main.rs",
    "content": "use std::{time::Duration, collections::HashMap};\n\nuse zeroize::{Zeroize, Zeroizing};\n\nuse transcript::{Transcript, RecommendedTranscript};\nuse ciphersuite::{group::GroupEncoding, Ciphersuite};\n\nuse log::{info, warn};\nuse tokio::time::sleep;\n\nuse serai_client::{\n  primitives::{BlockHash, ExternalNetworkId},\n  validator_sets::primitives::{Session, KeyPair},\n};\n\nuse messages::{\n  coordinator::{\n    SubstrateSignableId, PlanMeta, CoordinatorMessage as CoordinatorCoordinatorMessage,\n  },\n  CoordinatorMessage,\n};\n\nuse serai_env as env;\n\nuse message_queue::{Service, client::MessageQueue};\n\nmod plan;\npub use plan::*;\n\nmod networks;\nuse networks::{Block, Network};\n#[cfg(feature = \"bitcoin\")]\nuse networks::Bitcoin;\n#[cfg(feature = \"ethereum\")]\nuse networks::Ethereum;\n#[cfg(feature = \"monero\")]\nuse networks::Monero;\n\nmod additional_key;\npub use additional_key::additional_key;\n\nmod db;\npub use db::*;\n\nmod coordinator;\npub use coordinator::*;\n\nmod key_gen;\nuse key_gen::{SessionDb, KeyConfirmed, KeyGen};\n\nmod signer;\nuse signer::Signer;\n\nmod cosigner;\nuse cosigner::Cosigner;\n\nmod batch_signer;\nuse batch_signer::BatchSigner;\n\nmod slash_report_signer;\nuse slash_report_signer::SlashReportSigner;\n\nmod multisigs;\nuse multisigs::{MultisigEvent, MultisigManager};\n\n#[cfg(test)]\nmod tests;\n\n#[global_allocator]\nstatic ALLOCATOR: zalloc::ZeroizingAlloc<std::alloc::System> =\n  zalloc::ZeroizingAlloc(std::alloc::System);\n\n// Items which are mutably borrowed by Tributary.\n// Any exceptions to this have to be carefully monitored in order to ensure consistency isn't\n// violated.\nstruct TributaryMutable<N: Network, D: Db> {\n  // The following are actually mutably borrowed by Substrate as well.\n  // - Substrate triggers key gens, and determines which to use.\n  // - SubstrateBlock events cause scheduling which causes signing.\n  //\n  // This is still considered Tributary-mutable as most mutation (preprocesses/shares) happens by\n  // the Tributary.\n  //\n  // Creation of tasks is by Substrate, yet this is safe since the mutable borrow is transferred to\n  // Tributary.\n  //\n  // Tributary stops mutating a key gen attempt before Substrate is made aware of it, ensuring\n  // Tributary drops its mutable borrow before Substrate acquires it. Tributary will maintain a\n  // mutable borrow on the *key gen task*, yet the finalization code can successfully run for any\n  // attempt.\n  //\n  // The only other note is how the scanner may cause a signer task to be dropped, effectively\n  // invalidating the Tributary's mutable borrow. The signer is coded to allow for attempted usage\n  // of a dropped task.\n  key_gen: KeyGen<N, D>,\n  signers: HashMap<Session, Signer<N, D>>,\n\n  // This is also mutably borrowed by the Scanner.\n  // The Scanner starts new sign tasks.\n  // The Tributary mutates already-created signed tasks, potentially completing them.\n  // Substrate may mark tasks as completed, invalidating any existing mutable borrows.\n  // The safety of this follows as written above.\n\n  // There should only be one BatchSigner at a time (see #277)\n  batch_signer: Option<BatchSigner<D>>,\n\n  // Solely mutated by the tributary.\n  cosigner: Option<Cosigner>,\n  slash_report_signer: Option<SlashReportSigner>,\n}\n\n// Items which are mutably borrowed by Substrate.\n// Any exceptions to this have to be carefully monitored in order to ensure consistency isn't\n// violated.\n\n/*\n  The MultisigManager contains the Scanner and Schedulers.\n\n  The scanner is expected to autonomously operate, scanning blocks as they appear. When a block is\n  sufficiently confirmed, the scanner causes the Substrate signer to sign a batch. It itself only\n  mutates its list of finalized blocks, to protect against re-orgs, and its in-memory state though.\n\n  Disk mutations to the scan-state only happens once the relevant `Batch` is included on Substrate.\n  It can't be mutated as soon as the `Batch` is signed as we need to know the order of `Batch`s\n  relevant to `Burn`s.\n\n  Schedulers take in new outputs, confirmed in `Batch`s, and outbound payments, triggered by\n  `Burn`s.\n\n  Substrate also decides when to move to a new multisig, hence why this entire object is\n  Substate-mutable.\n\n  Since MultisigManager should always be verifiable, and the Tributary is temporal, MultisigManager\n  being entirely SubstrateMutable shows proper data pipe-lining.\n*/\n\ntype SubstrateMutable<N, D> = MultisigManager<D, N>;\n\nasync fn handle_coordinator_msg<D: Db, N: Network, Co: Coordinator>(\n  txn: &mut D::Transaction<'_>,\n  network: &N,\n  coordinator: &mut Co,\n  tributary_mutable: &mut TributaryMutable<N, D>,\n  substrate_mutable: &mut SubstrateMutable<N, D>,\n  msg: &Message,\n) {\n  // If this message expects a higher block number than we have, halt until synced\n  async fn wait<N: Network, D: Db>(\n    txn: &D::Transaction<'_>,\n    substrate_mutable: &SubstrateMutable<N, D>,\n    block_hash: &BlockHash,\n  ) {\n    let mut needed_hash = <N::Block as Block<N>>::Id::default();\n    needed_hash.as_mut().copy_from_slice(&block_hash.0);\n\n    loop {\n      // Ensure our scanner has scanned this block, which means our daemon has this block at\n      // a sufficient depth\n      if substrate_mutable.block_number(txn, &needed_hash).await.is_none() {\n        warn!(\n          \"node is desynced. we haven't scanned {} which should happen after {} confirms\",\n          hex::encode(&needed_hash),\n          N::CONFIRMATIONS,\n        );\n        sleep(Duration::from_secs(10)).await;\n        continue;\n      };\n      break;\n    }\n\n    // TODO2: Sanity check we got an AckBlock (or this is the AckBlock) for the block in question\n\n    /*\n    let synced = |context: &SubstrateContext, key| -> Result<(), ()> {\n      // Check that we've synced this block and can actually operate on it ourselves\n      let latest = scanner.latest_scanned(key);\n      if usize::try_from(context.network_latest_finalized_block).unwrap() < latest {\n        log::warn!(\n          \"external network node disconnected/desynced from rest of the network. \\\n          our block: {latest:?}, network's acknowledged: {}\",\n          context.network_latest_finalized_block,\n        );\n        Err(())?;\n      }\n      Ok(())\n    };\n    */\n  }\n\n  if let Some(required) = msg.msg.required_block() {\n    // wait only reads from, it doesn't mutate, substrate_mutable\n    wait(txn, substrate_mutable, &required).await;\n  }\n\n  async fn activate_key<N: Network, D: Db>(\n    network: &N,\n    substrate_mutable: &mut SubstrateMutable<N, D>,\n    tributary_mutable: &mut TributaryMutable<N, D>,\n    txn: &mut D::Transaction<'_>,\n    session: Session,\n    key_pair: KeyPair,\n    activation_number: usize,\n  ) {\n    info!(\"activating {session:?}'s keys at {activation_number}\");\n\n    let network_key = <N as Network>::Curve::read_G::<&[u8]>(&mut key_pair.1.as_ref())\n      .expect(\"Substrate finalized invalid point as a network's key\");\n\n    if tributary_mutable.key_gen.in_set(&session) {\n      // See TributaryMutable's struct definition for why this block is safe\n      let KeyConfirmed { substrate_keys, network_keys } =\n        tributary_mutable.key_gen.confirm(txn, session, &key_pair);\n      if session.0 == 0 {\n        tributary_mutable.batch_signer =\n          Some(BatchSigner::new(N::NETWORK, session, substrate_keys));\n      }\n      tributary_mutable\n        .signers\n        .insert(session, Signer::new(network.clone(), session, network_keys));\n    }\n\n    substrate_mutable.add_key(txn, activation_number, network_key).await;\n  }\n\n  match msg.msg.clone() {\n    CoordinatorMessage::KeyGen(msg) => {\n      coordinator.send(tributary_mutable.key_gen.handle(txn, msg)).await;\n    }\n\n    CoordinatorMessage::Sign(msg) => {\n      if let Some(msg) = tributary_mutable\n        .signers\n        .get_mut(&msg.session())\n        .expect(\"coordinator told us to sign with a signer we don't have\")\n        .handle(txn, msg)\n        .await\n      {\n        coordinator.send(msg).await;\n      }\n    }\n\n    CoordinatorMessage::Coordinator(msg) => match msg {\n      CoordinatorCoordinatorMessage::CosignSubstrateBlock { id, block_number } => {\n        let SubstrateSignableId::CosigningSubstrateBlock(block) = id.id else {\n          panic!(\"CosignSubstrateBlock id didn't have a CosigningSubstrateBlock\")\n        };\n        let Some(keys) = tributary_mutable.key_gen.substrate_keys_by_session(id.session) else {\n          panic!(\"didn't have key shares for the key we were told to cosign with\");\n        };\n        if let Some((cosigner, msg)) =\n          Cosigner::new(txn, id.session, keys, block_number, block, id.attempt)\n        {\n          tributary_mutable.cosigner = Some(cosigner);\n          coordinator.send(msg).await;\n        } else {\n          log::warn!(\"Cosigner::new returned None\");\n        }\n      }\n      CoordinatorCoordinatorMessage::SignSlashReport { id, report } => {\n        assert_eq!(id.id, SubstrateSignableId::SlashReport);\n        let Some(keys) = tributary_mutable.key_gen.substrate_keys_by_session(id.session) else {\n          panic!(\"didn't have key shares for the key we were told to perform a slash report with\");\n        };\n        if let Some((slash_report_signer, msg)) =\n          SlashReportSigner::new(txn, N::NETWORK, id.session, keys, report, id.attempt)\n        {\n          tributary_mutable.slash_report_signer = Some(slash_report_signer);\n          coordinator.send(msg).await;\n        } else {\n          log::warn!(\"SlashReportSigner::new returned None\");\n        }\n      }\n      _ => {\n        let (is_cosign, is_batch, is_slash_report) = match msg {\n          CoordinatorCoordinatorMessage::CosignSubstrateBlock { .. } |\n          CoordinatorCoordinatorMessage::SignSlashReport { .. } => (false, false, false),\n          CoordinatorCoordinatorMessage::SubstratePreprocesses { ref id, .. } |\n          CoordinatorCoordinatorMessage::SubstrateShares { ref id, .. } => (\n            matches!(&id.id, SubstrateSignableId::CosigningSubstrateBlock(_)),\n            matches!(&id.id, SubstrateSignableId::Batch(_)),\n            matches!(&id.id, SubstrateSignableId::SlashReport),\n          ),\n          CoordinatorCoordinatorMessage::BatchReattempt { .. } => (false, true, false),\n        };\n\n        if is_cosign {\n          if let Some(cosigner) = tributary_mutable.cosigner.as_mut() {\n            if let Some(msg) = cosigner.handle(txn, msg) {\n              coordinator.send(msg).await;\n            }\n          } else {\n            log::warn!(\n              \"received message for cosigner yet didn't have a cosigner. {}\",\n              \"this is an error if we didn't reboot\",\n            );\n          }\n        } else if is_batch {\n          if let Some(msg) = tributary_mutable\n            .batch_signer\n            .as_mut()\n            .expect(\n              \"coordinator told us to sign a batch when we don't currently have a Substrate signer\",\n            )\n            .handle(txn, msg)\n          {\n            coordinator.send(msg).await;\n          }\n        } else if is_slash_report {\n          if let Some(slash_report_signer) = tributary_mutable.slash_report_signer.as_mut() {\n            if let Some(msg) = slash_report_signer.handle(txn, msg) {\n              coordinator.send(msg).await;\n            }\n          } else {\n            log::warn!(\n              \"received message for slash report signer yet didn't have {}\",\n              \"a slash report signer. this is an error if we didn't reboot\",\n            );\n          }\n        }\n      }\n    },\n\n    CoordinatorMessage::Substrate(msg) => {\n      match msg {\n        messages::substrate::CoordinatorMessage::ConfirmKeyPair { context, session, key_pair } => {\n          // This is the first key pair for this network so no block has been finalized yet\n          // TODO: Write documentation for this in docs/\n          // TODO: Use an Option instead of a magic?\n          if context.network_latest_finalized_block.0 == [0; 32] {\n            assert!(tributary_mutable.signers.is_empty());\n            assert!(tributary_mutable.batch_signer.is_none());\n            assert!(tributary_mutable.cosigner.is_none());\n            // We can't check this as existing is no longer pub\n            // assert!(substrate_mutable.existing.as_ref().is_none());\n\n            // Wait until a network's block's time exceeds Serai's time\n            // These time calls are extremely expensive for what they do, yet they only run when\n            // confirming the first key pair, before any network activity has occurred, so they\n            // should be fine\n\n            // If the latest block number is 10, then the block indexed by 1 has 10 confirms\n            // 10 + 1 - 10 = 1\n            let mut block_i;\n            while {\n              block_i = (network.get_latest_block_number_with_retries().await + 1)\n                .saturating_sub(N::CONFIRMATIONS);\n              network.get_block_with_retries(block_i).await.time(network).await < context.serai_time\n            } {\n              info!(\n                \"serai confirmed the first key pair for a set. {} {}\",\n                \"we're waiting for a network's finalized block's time to exceed unix time \",\n                context.serai_time,\n              );\n              sleep(Duration::from_secs(5)).await;\n            }\n\n            // Find the first block to do so\n            let mut earliest = block_i;\n            // earliest > 0 prevents a panic if Serai creates keys before the genesis block\n            // which... should be impossible\n            // Yet a prevented panic is a prevented panic\n            while (earliest > 0) &&\n              (network.get_block_with_retries(earliest - 1).await.time(network).await >=\n                context.serai_time)\n            {\n              earliest -= 1;\n            }\n\n            // Use this as the activation block\n            let activation_number = earliest;\n\n            activate_key(\n              network,\n              substrate_mutable,\n              tributary_mutable,\n              txn,\n              session,\n              key_pair,\n              activation_number,\n            )\n            .await;\n          } else {\n            let mut block_before_queue_block = <N::Block as Block<N>>::Id::default();\n            block_before_queue_block\n              .as_mut()\n              .copy_from_slice(&context.network_latest_finalized_block.0);\n            // We can't set these keys for activation until we know their queue block, which we\n            // won't until the next Batch is confirmed\n            // Set this variable so when we get the next Batch event, we can handle it\n            PendingActivationsDb::set_pending_activation::<N>(\n              txn,\n              &block_before_queue_block,\n              session,\n              key_pair,\n            );\n          }\n        }\n\n        messages::substrate::CoordinatorMessage::SubstrateBlock {\n          context,\n          block: substrate_block,\n          burns,\n          batches,\n        } => {\n          if let Some((block, session, key_pair)) =\n            PendingActivationsDb::pending_activation::<N>(txn)\n          {\n            // Only run if this is a Batch belonging to a distinct block\n            if context.network_latest_finalized_block.as_ref() != block.as_ref() {\n              let mut queue_block = <N::Block as Block<N>>::Id::default();\n              queue_block.as_mut().copy_from_slice(context.network_latest_finalized_block.as_ref());\n\n              let activation_number = substrate_mutable\n                .block_number(txn, &queue_block)\n                .await\n                .expect(\"KeyConfirmed from context we haven't synced\") +\n                N::CONFIRMATIONS;\n\n              activate_key(\n                network,\n                substrate_mutable,\n                tributary_mutable,\n                txn,\n                session,\n                key_pair,\n                activation_number,\n              )\n              .await;\n              //clear pending activation\n              txn.del(PendingActivationsDb::key());\n            }\n          }\n\n          // Since this block was acknowledged, we no longer have to sign the batches within it\n          if let Some(batch_signer) = tributary_mutable.batch_signer.as_mut() {\n            for batch_id in batches {\n              batch_signer.batch_signed(txn, batch_id);\n            }\n          }\n\n          let (acquired_lock, to_sign) =\n            substrate_mutable.substrate_block(txn, network, context, burns).await;\n\n          // Send SubstrateBlockAck, with relevant plan IDs, before we trigger the signing of these\n          // plans\n          if !tributary_mutable.signers.is_empty() {\n            coordinator\n              .send(messages::coordinator::ProcessorMessage::SubstrateBlockAck {\n                block: substrate_block,\n                plans: to_sign\n                  .iter()\n                  .filter_map(|signable| {\n                    SessionDb::get(txn, signable.0.to_bytes().as_ref())\n                      .map(|session| PlanMeta { session, id: signable.1 })\n                  })\n                  .collect(),\n              })\n              .await;\n          }\n\n          // See commentary in TributaryMutable for why this is safe\n          let signers = &mut tributary_mutable.signers;\n          for (key, id, tx, eventuality) in to_sign {\n            if let Some(session) = SessionDb::get(txn, key.to_bytes().as_ref()) {\n              let signer = signers.get_mut(&session).unwrap();\n              if let Some(msg) = signer.sign_transaction(txn, id, tx, &eventuality).await {\n                coordinator.send(msg).await;\n              }\n            }\n          }\n\n          // This is not premature, even if this block had multiple `Batch`s created, as the first\n          // `Batch` alone will trigger all Plans/Eventualities/Signs\n          if acquired_lock {\n            substrate_mutable.release_scanner_lock().await;\n          }\n        }\n      }\n    }\n  }\n}\n\nasync fn boot<N: Network, D: Db, Co: Coordinator>(\n  raw_db: &mut D,\n  network: &N,\n  coordinator: &mut Co,\n) -> (D, TributaryMutable<N, D>, SubstrateMutable<N, D>) {\n  let mut entropy_transcript = {\n    let entropy = Zeroizing::new(env::var(\"ENTROPY\").expect(\"entropy wasn't specified\"));\n    if entropy.len() != 64 {\n      panic!(\"entropy isn't the right length\");\n    }\n    let mut bytes =\n      Zeroizing::new(hex::decode(entropy).map_err(|_| ()).expect(\"entropy wasn't hex-formatted\"));\n    if bytes.len() != 32 {\n      bytes.zeroize();\n      panic!(\"entropy wasn't 32 bytes\");\n    }\n    let mut entropy = Zeroizing::new([0; 32]);\n    let entropy_mut: &mut [u8] = entropy.as_mut();\n    entropy_mut.copy_from_slice(bytes.as_ref());\n\n    let mut transcript = RecommendedTranscript::new(b\"Serai Processor Entropy\");\n    transcript.append_message(b\"entropy\", entropy);\n    transcript\n  };\n\n  // TODO: Save a hash of the entropy to the DB and make sure the entropy didn't change\n\n  let mut entropy = |label| {\n    let mut challenge = entropy_transcript.challenge(label);\n    let mut res = Zeroizing::new([0; 32]);\n    let res_mut: &mut [u8] = res.as_mut();\n    res_mut.copy_from_slice(&challenge[.. 32]);\n    challenge.zeroize();\n    res\n  };\n\n  // We don't need to re-issue GenerateKey orders because the coordinator is expected to\n  // schedule/notify us of new attempts\n  // TODO: Is this above comment still true? Not at all due to the planned lack of DKG timeouts?\n  let key_gen = KeyGen::<N, _>::new(raw_db.clone(), entropy(b\"key-gen_entropy\"));\n\n  let (multisig_manager, current_keys, actively_signing) =\n    MultisigManager::new(raw_db, network).await;\n\n  let mut batch_signer = None;\n  let mut signers = HashMap::new();\n\n  for (i, key) in current_keys.iter().enumerate() {\n    let Some((session, (substrate_keys, network_keys))) = key_gen.keys(key) else { continue };\n    let network_key = network_keys[0].group_key();\n\n    // If this is the oldest key, load the BatchSigner for it as the active BatchSigner\n    // The new key only takes responsibility once the old key is fully deprecated\n    //\n    // We don't have to load any state for this since the Scanner will re-fire any events\n    // necessary, only no longer scanning old blocks once Substrate acks them\n    if i == 0 {\n      batch_signer = Some(BatchSigner::new(N::NETWORK, session, substrate_keys));\n    }\n\n    // The Scanner re-fires events as needed for batch_signer yet not signer\n    // This is due to the transactions which we start signing from due to a block not being\n    // guaranteed to be signed before we stop scanning the block on reboot\n    // We could simplify the Signer flow by delaying when it acks a block, yet that'd:\n    // 1) Increase the startup time\n    // 2) Cause re-emission of Batch events, which we'd need to check the safety of\n    //    (TODO: Do anyways?)\n    // 3) Violate the attempt counter (TODO: Is this already being violated?)\n    let mut signer = Signer::new(network.clone(), session, network_keys);\n\n    // Sign any TXs being actively signed\n    for (plan, tx, eventuality) in &actively_signing {\n      if plan.key == network_key {\n        let mut txn = raw_db.txn();\n        if let Some(msg) =\n          signer.sign_transaction(&mut txn, plan.id(), tx.clone(), eventuality).await\n        {\n          coordinator.send(msg).await;\n        }\n        // This should only have re-writes of existing data\n        drop(txn);\n      }\n    }\n\n    signers.insert(session, signer);\n  }\n\n  // Spawn a task to rebroadcast signed TXs yet to be mined into a finalized block\n  // This hedges against being dropped due to full mempools, temporarily too low of a fee...\n  tokio::spawn(Signer::<N, D>::rebroadcast_task(raw_db.clone(), network.clone()));\n\n  (\n    raw_db.clone(),\n    TributaryMutable { key_gen, batch_signer, cosigner: None, slash_report_signer: None, signers },\n    multisig_manager,\n  )\n}\n\n#[allow(clippy::await_holding_lock)] // Needed for txn, unfortunately can't be down-scoped\nasync fn run<N: Network, D: Db, Co: Coordinator>(mut raw_db: D, network: N, mut coordinator: Co) {\n  // We currently expect a contextless bidirectional mapping between these two values\n  // (which is that any value of A can be interpreted as B and vice versa)\n  // While we can write a contextual mapping, we have yet to do so\n  // This check ensures no network which doesn't have a bidirectional mapping is defined\n  assert_eq!(<N::Block as Block<N>>::Id::default().as_ref().len(), BlockHash([0u8; 32]).0.len());\n\n  let (main_db, mut tributary_mutable, mut substrate_mutable) =\n    boot(&mut raw_db, &network, &mut coordinator).await;\n\n  // We can't load this from the DB as we can't guarantee atomic increments with the ack function\n  // TODO: Load with a slight tolerance\n  let mut last_coordinator_msg = None;\n\n  loop {\n    let mut txn = raw_db.txn();\n\n    log::trace!(\"new db txn in run\");\n\n    let mut outer_msg = None;\n\n    tokio::select! {\n      // This blocks the entire processor until it finishes handling this message\n      // KeyGen specifically may take a notable amount of processing time\n      // While that shouldn't be an issue in practice, as after processing an attempt it'll handle\n      // the other messages in the queue, it may be beneficial to parallelize these\n      // They could potentially be parallelized by type (KeyGen, Sign, Substrate) without issue\n      msg = coordinator.recv() => {\n        if let Some(last_coordinator_msg) = last_coordinator_msg {\n          assert_eq!(msg.id, last_coordinator_msg + 1);\n        }\n        last_coordinator_msg = Some(msg.id);\n\n        // Only handle this if we haven't already\n        if HandledMessageDb::get(&main_db, msg.id).is_none() {\n          HandledMessageDb::set(&mut txn, msg.id, &());\n\n          // This is isolated to better think about how its ordered, or rather, about how the other\n          // cases aren't ordered\n          //\n          // While the coordinator messages are ordered, they're not deterministically ordered\n          // Tributary-caused messages are deterministically ordered, and Substrate-caused messages\n          // are deterministically-ordered, yet they're both shoved into a singular queue\n          // The order at which they're shoved in together isn't deterministic\n          //\n          // This is safe so long as Tributary and Substrate messages don't both expect mutable\n          // references over the same data\n          handle_coordinator_msg(\n            &mut txn,\n            &network,\n            &mut coordinator,\n            &mut tributary_mutable,\n            &mut substrate_mutable,\n            &msg,\n          ).await;\n        }\n\n        outer_msg = Some(msg);\n      },\n\n      scanner_event = substrate_mutable.next_scanner_event() => {\n        let msg = substrate_mutable.scanner_event_to_multisig_event(\n          &mut txn,\n          &network,\n          scanner_event\n        ).await;\n\n        match msg {\n          MultisigEvent::Batches(retired_key_new_key, batches) => {\n            // Start signing this batch\n            for batch in batches {\n              info!(\"created batch {} ({} instructions)\", batch.id, batch.instructions.len());\n\n              // The coordinator expects BatchPreprocess to immediately follow Batch\n              coordinator.send(\n                messages::substrate::ProcessorMessage::Batch { batch: batch.clone() }\n              ).await;\n\n              if let Some(batch_signer) = tributary_mutable.batch_signer.as_mut() {\n                if let Some(msg) = batch_signer.sign(&mut txn, batch) {\n                  coordinator.send(msg).await;\n                }\n              }\n            }\n\n            if let Some((retired_key, new_key)) = retired_key_new_key {\n              // Safe to mutate since all signing operations are done and no more will be added\n              if let Some(retired_session) = SessionDb::get(&txn, retired_key.to_bytes().as_ref()) {\n                tributary_mutable.signers.remove(&retired_session);\n              }\n              tributary_mutable.batch_signer.take();\n              let keys = tributary_mutable.key_gen.keys(&new_key);\n              if let Some((session, (substrate_keys, _))) = keys {\n                tributary_mutable.batch_signer =\n                  Some(BatchSigner::new(N::NETWORK, session, substrate_keys));\n              }\n            }\n          },\n          MultisigEvent::Completed(key, id, tx) => {\n            if let Some(session) = SessionDb::get(&txn, &key) {\n              let signer = tributary_mutable.signers.get_mut(&session).unwrap();\n              if let Some(msg) = signer.completed(&mut txn, id, &tx) {\n                coordinator.send(msg).await;\n              }\n            }\n          }\n        }\n      },\n    }\n\n    txn.commit();\n    if let Some(msg) = outer_msg {\n      coordinator.ack(msg).await;\n    }\n  }\n}\n\n#[tokio::main]\nasync fn main() {\n  // Override the panic handler with one which will panic if any tokio task panics\n  {\n    let existing = std::panic::take_hook();\n    std::panic::set_hook(Box::new(move |panic| {\n      existing(panic);\n      const MSG: &str = \"exiting the process due to a task panicking\";\n      println!(\"{MSG}\");\n      log::error!(\"{MSG}\");\n      std::process::exit(1);\n    }));\n  }\n\n  if std::env::var(\"RUST_LOG\").is_err() {\n    std::env::set_var(\"RUST_LOG\", serai_env::var(\"RUST_LOG\").unwrap_or_else(|| \"info\".to_string()));\n  }\n  env_logger::init();\n\n  #[allow(unused_variables, unreachable_code)]\n  let db = {\n    #[cfg(all(feature = \"parity-db\", feature = \"rocksdb\"))]\n    panic!(\"built with parity-db and rocksdb\");\n    #[cfg(all(feature = \"parity-db\", not(feature = \"rocksdb\")))]\n    let db =\n      serai_db::new_parity_db(&serai_env::var(\"DB_PATH\").expect(\"path to DB wasn't specified\"));\n    #[cfg(feature = \"rocksdb\")]\n    let db =\n      serai_db::new_rocksdb(&serai_env::var(\"DB_PATH\").expect(\"path to DB wasn't specified\"));\n    db\n  };\n\n  // Network configuration\n  let url = {\n    let login = env::var(\"NETWORK_RPC_LOGIN\").expect(\"network RPC login wasn't specified\");\n    let hostname = env::var(\"NETWORK_RPC_HOSTNAME\").expect(\"network RPC hostname wasn't specified\");\n    let port = env::var(\"NETWORK_RPC_PORT\").expect(\"network port domain wasn't specified\");\n    \"http://\".to_string() + &login + \"@\" + &hostname + \":\" + &port\n  };\n  let network_id = match env::var(\"NETWORK\").expect(\"network wasn't specified\").as_str() {\n    \"bitcoin\" => ExternalNetworkId::Bitcoin,\n    \"ethereum\" => ExternalNetworkId::Ethereum,\n    \"monero\" => ExternalNetworkId::Monero,\n    _ => panic!(\"unrecognized network\"),\n  };\n\n  let coordinator = MessageQueue::from_env(Service::Processor(network_id));\n\n  // This allow is necessary since each configuration deletes the other networks from the following\n  // match arms. So we match all cases but since all cases already there according to the compiler\n  // we put this to allow clippy to get pass this.\n  #[allow(unreachable_patterns)]\n  match network_id {\n    #[cfg(feature = \"bitcoin\")]\n    ExternalNetworkId::Bitcoin => run(db, Bitcoin::new(url).await, coordinator).await,\n    #[cfg(feature = \"ethereum\")]\n    ExternalNetworkId::Ethereum => {\n      let relayer_hostname = env::var(\"ETHEREUM_RELAYER_HOSTNAME\")\n        .expect(\"ethereum relayer hostname wasn't specified\")\n        .clone();\n      let relayer_port =\n        env::var(\"ETHEREUM_RELAYER_PORT\").expect(\"ethereum relayer port wasn't specified\");\n      let relayer_url = relayer_hostname + \":\" + &relayer_port;\n      run(db.clone(), Ethereum::new(db, url, relayer_url).await, coordinator).await\n    }\n    #[cfg(feature = \"monero\")]\n    ExternalNetworkId::Monero => run(db, Monero::new(url).await, coordinator).await,\n    _ => panic!(\"spawning a processor for an unsupported network\"),\n  }\n}\n"
  },
  {
    "path": "processor/src/multisigs/db.rs",
    "content": "use std::io;\n\nuse ciphersuite::Ciphersuite;\npub use serai_db::*;\n\nuse scale::{Encode, Decode};\n#[rustfmt::skip]\nuse serai_client::{\n  in_instructions::primitives::InInstructionWithBalance,\n  primitives::ExternalBalance\n};\n\nuse crate::{\n  Get, Plan,\n  networks::{Output, Transaction, Network},\n};\n\n#[derive(Clone, PartialEq, Eq, Debug)]\npub enum PlanFromScanning<N: Network> {\n  Refund(N::Output, N::Address),\n  Forward(N::Output),\n}\n\nimpl<N: Network> PlanFromScanning<N> {\n  fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {\n    let mut kind = [0xff];\n    reader.read_exact(&mut kind)?;\n    match kind[0] {\n      0 => {\n        let output = N::Output::read(reader)?;\n\n        let mut address_vec_len = [0; 4];\n        reader.read_exact(&mut address_vec_len)?;\n        let mut address_vec =\n          vec![0; usize::try_from(u32::from_le_bytes(address_vec_len)).unwrap()];\n        reader.read_exact(&mut address_vec)?;\n        let address =\n          N::Address::try_from(address_vec).map_err(|_| \"invalid address saved to disk\").unwrap();\n\n        Ok(PlanFromScanning::Refund(output, address))\n      }\n      1 => {\n        let output = N::Output::read(reader)?;\n        Ok(PlanFromScanning::Forward(output))\n      }\n      _ => panic!(\"reading unrecognized PlanFromScanning\"),\n    }\n  }\n  fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {\n    match self {\n      PlanFromScanning::Refund(output, address) => {\n        writer.write_all(&[0])?;\n        output.write(writer)?;\n\n        let address_vec: Vec<u8> =\n          address.clone().try_into().map_err(|_| \"invalid address being refunded to\").unwrap();\n        writer.write_all(&u32::try_from(address_vec.len()).unwrap().to_le_bytes())?;\n        writer.write_all(&address_vec)\n      }\n      PlanFromScanning::Forward(output) => {\n        writer.write_all(&[1])?;\n        output.write(writer)\n      }\n    }\n  }\n}\n\ncreate_db!(\n  MultisigsDb {\n    NextBatchDb: () -> u32,\n    PlanDb: (id: &[u8]) -> Vec<u8>,\n    PlansFromScanningDb: (block_number: u64) -> Vec<u8>,\n    OperatingCostsDb: () -> u64,\n    ResolvedDb: (tx: &[u8]) -> [u8; 32],\n    SigningDb: (key: &[u8]) -> Vec<u8>,\n    ForwardedOutputDb: (balance: ExternalBalance) -> Vec<u8>,\n    DelayedOutputDb: () -> Vec<u8>\n  }\n);\n\nimpl PlanDb {\n  pub fn save_active_plan<N: Network>(\n    txn: &mut impl DbTxn,\n    key: &[u8],\n    block_number: usize,\n    plan: &Plan<N>,\n    operating_costs_at_time: u64,\n  ) {\n    let id = plan.id();\n\n    {\n      let mut signing = SigningDb::get(txn, key).unwrap_or_default();\n\n      // If we've already noted we're signing this, return\n      assert_eq!(signing.len() % 32, 0);\n      for i in 0 .. (signing.len() / 32) {\n        if signing[(i * 32) .. ((i + 1) * 32)] == id {\n          return;\n        }\n      }\n\n      signing.extend(&id);\n      SigningDb::set(txn, key, &signing);\n    }\n\n    {\n      let mut buf = block_number.to_le_bytes().to_vec();\n      plan.write(&mut buf).unwrap();\n      buf.extend(&operating_costs_at_time.to_le_bytes());\n      Self::set(txn, &id, &buf);\n    }\n  }\n\n  pub fn active_plans<N: Network>(getter: &impl Get, key: &[u8]) -> Vec<(u64, Plan<N>, u64)> {\n    let signing = SigningDb::get(getter, key).unwrap_or_default();\n    let mut res = vec![];\n\n    assert_eq!(signing.len() % 32, 0);\n    for i in 0 .. (signing.len() / 32) {\n      let id = &signing[(i * 32) .. ((i + 1) * 32)];\n      let buf = Self::get(getter, id).unwrap();\n\n      let block_number = u64::from_le_bytes(buf[.. 8].try_into().unwrap());\n      let plan = Plan::<N>::read::<&[u8]>(&mut &buf[8 ..]).unwrap();\n      assert_eq!(id, &plan.id());\n      let operating_costs = u64::from_le_bytes(buf[(buf.len() - 8) ..].try_into().unwrap());\n      res.push((block_number, plan, operating_costs));\n    }\n    res\n  }\n\n  pub fn plan_by_key_with_self_change<N: Network>(\n    getter: &impl Get,\n    key: <N::Curve as Ciphersuite>::G,\n    id: [u8; 32],\n  ) -> bool {\n    let plan = Plan::<N>::read::<&[u8]>(&mut &Self::get(getter, &id).unwrap()[8 ..]).unwrap();\n    assert_eq!(plan.id(), id);\n    if let Some(change) = N::change_address(plan.key) {\n      (key == plan.key) && (Some(change) == plan.change)\n    } else {\n      false\n    }\n  }\n}\n\nimpl OperatingCostsDb {\n  pub fn take_operating_costs(txn: &mut impl DbTxn) -> u64 {\n    let existing = Self::get(txn).unwrap_or_default();\n    txn.del(Self::key());\n    existing\n  }\n  pub fn set_operating_costs(txn: &mut impl DbTxn, amount: u64) {\n    if amount != 0 {\n      Self::set(txn, &amount);\n    }\n  }\n}\n\nimpl ResolvedDb {\n  pub fn resolve_plan<N: Network>(\n    txn: &mut impl DbTxn,\n    key: &[u8],\n    plan: [u8; 32],\n    resolution: &<N::Transaction as Transaction<N>>::Id,\n  ) {\n    let mut signing = SigningDb::get(txn, key).unwrap_or_default();\n    assert_eq!(signing.len() % 32, 0);\n\n    let mut found = false;\n    for i in 0 .. (signing.len() / 32) {\n      let start = i * 32;\n      let end = i + 32;\n      if signing[start .. end] == plan {\n        found = true;\n        signing = [&signing[.. start], &signing[end ..]].concat();\n        break;\n      }\n    }\n\n    if !found {\n      log::warn!(\"told to finish signing {} yet wasn't actively signing it\", hex::encode(plan));\n    }\n    SigningDb::set(txn, key, &signing);\n    Self::set(txn, resolution.as_ref(), &plan);\n  }\n}\n\nimpl PlansFromScanningDb {\n  pub fn set_plans_from_scanning<N: Network>(\n    txn: &mut impl DbTxn,\n    block_number: usize,\n    plans: Vec<PlanFromScanning<N>>,\n  ) {\n    let mut buf = vec![];\n    for plan in plans {\n      plan.write(&mut buf).unwrap();\n    }\n    Self::set(txn, block_number.try_into().unwrap(), &buf);\n  }\n\n  pub fn take_plans_from_scanning<N: Network>(\n    txn: &mut impl DbTxn,\n    block_number: usize,\n  ) -> Option<Vec<PlanFromScanning<N>>> {\n    let block_number = u64::try_from(block_number).unwrap();\n    let res = Self::get(txn, block_number).map(|plans| {\n      let mut plans_ref = plans.as_slice();\n      let mut res = vec![];\n      while !plans_ref.is_empty() {\n        res.push(PlanFromScanning::<N>::read(&mut plans_ref).unwrap());\n      }\n      res\n    });\n    if res.is_some() {\n      txn.del(Self::key(block_number));\n    }\n    res\n  }\n}\n\nimpl ForwardedOutputDb {\n  pub fn save_forwarded_output(txn: &mut impl DbTxn, instruction: &InInstructionWithBalance) {\n    let mut existing = Self::get(txn, instruction.balance).unwrap_or_default();\n    existing.extend(instruction.encode());\n    Self::set(txn, instruction.balance, &existing);\n  }\n\n  pub fn take_forwarded_output(\n    txn: &mut impl DbTxn,\n    balance: ExternalBalance,\n  ) -> Option<InInstructionWithBalance> {\n    let outputs = Self::get(txn, balance)?;\n    let mut outputs_ref = outputs.as_slice();\n    let res = InInstructionWithBalance::decode(&mut outputs_ref).unwrap();\n    assert!(outputs_ref.len() < outputs.len());\n    if outputs_ref.is_empty() {\n      txn.del(Self::key(balance));\n    } else {\n      Self::set(txn, balance, &outputs);\n    }\n    Some(res)\n  }\n}\n\nimpl DelayedOutputDb {\n  pub fn save_delayed_output(txn: &mut impl DbTxn, instruction: &InInstructionWithBalance) {\n    let mut existing = Self::get(txn).unwrap_or_default();\n    existing.extend(instruction.encode());\n    Self::set(txn, &existing);\n  }\n\n  pub fn take_delayed_outputs(txn: &mut impl DbTxn) -> Vec<InInstructionWithBalance> {\n    let Some(outputs) = Self::get(txn) else { return vec![] };\n    txn.del(Self::key());\n\n    let mut outputs_ref = outputs.as_slice();\n    let mut res = vec![];\n    while !outputs_ref.is_empty() {\n      res.push(InInstructionWithBalance::decode(&mut outputs_ref).unwrap());\n    }\n    res\n  }\n}\n"
  },
  {
    "path": "processor/src/multisigs/mod.rs",
    "content": "use core::time::Duration;\nuse std::collections::HashSet;\n\nuse ciphersuite::{group::GroupEncoding, Ciphersuite};\n\nuse scale::{Encode, Decode};\nuse messages::SubstrateContext;\n\nuse serai_client::{\n  primitives::{MAX_DATA_LEN, ExternalAddress, BlockHash, Data},\n  in_instructions::primitives::{\n    InInstructionWithBalance, Batch, RefundableInInstruction, Shorthand, MAX_BATCH_SIZE,\n  },\n  coins::primitives::{OutInstruction, OutInstructionWithBalance},\n};\n\nuse log::{info, error};\n\nuse tokio::time::sleep;\n\n#[cfg(not(test))]\nmod scanner;\n#[cfg(test)]\npub mod scanner;\n\nuse scanner::{ScannerEvent, ScannerHandle, Scanner};\n\nmod db;\nuse db::*;\n\npub(crate) mod scheduler;\nuse scheduler::Scheduler;\n\nuse crate::{\n  Get, Db, Payment, Plan,\n  networks::{OutputType, Output, SignableTransaction, Eventuality, Block, PreparedSend, Network},\n};\n\n// InInstructionWithBalance from an external output\nfn instruction_from_output<N: Network>(\n  output: &N::Output,\n) -> (Option<ExternalAddress>, Option<InInstructionWithBalance>) {\n  assert_eq!(output.kind(), OutputType::External);\n\n  let presumed_origin = output.presumed_origin().map(|address| {\n    ExternalAddress::new(\n      address\n        .try_into()\n        .map_err(|_| ())\n        .expect(\"presumed origin couldn't be converted to a Vec<u8>\"),\n    )\n    .expect(\"presumed origin exceeded address limits\")\n  });\n\n  let mut data = output.data();\n  let max_data_len = usize::try_from(MAX_DATA_LEN).unwrap();\n  if data.len() > max_data_len {\n    error!(\n      \"data in output {} exceeded MAX_DATA_LEN ({MAX_DATA_LEN}): {}. skipping\",\n      hex::encode(output.id()),\n      data.len(),\n    );\n    return (presumed_origin, None);\n  }\n\n  let shorthand = match Shorthand::decode(&mut data) {\n    Ok(shorthand) => shorthand,\n    Err(e) => {\n      info!(\"data in output {} wasn't valid shorthand: {e:?}\", hex::encode(output.id()));\n      return (presumed_origin, None);\n    }\n  };\n  let instruction = match RefundableInInstruction::try_from(shorthand) {\n    Ok(instruction) => instruction,\n    Err(e) => {\n      info!(\n        \"shorthand in output {} wasn't convertible to a RefundableInInstruction: {e:?}\",\n        hex::encode(output.id())\n      );\n      return (presumed_origin, None);\n    }\n  };\n\n  let mut balance = output.balance();\n  // Deduct twice the cost to aggregate to prevent economic attacks by malicious miners against\n  // other users\n  balance.amount.0 -= 2 * N::COST_TO_AGGREGATE;\n\n  (\n    instruction.origin.or(presumed_origin),\n    Some(InInstructionWithBalance { instruction: instruction.instruction, balance }),\n  )\n}\n\n#[derive(Clone, Copy, PartialEq, Eq, Debug)]\nenum RotationStep {\n  // Use the existing multisig for all actions (steps 1-3)\n  UseExisting,\n  // Use the new multisig as change (step 4)\n  NewAsChange,\n  // The existing multisig is expected to solely forward transactions at this point (step 5)\n  ForwardFromExisting,\n  // The existing multisig is expected to finish its own transactions and do nothing more\n  // (step 6)\n  ClosingExisting,\n}\n\n// This explicitly shouldn't take the database as we prepare Plans we won't execute for fee\n// estimates\nasync fn prepare_send<N: Network>(\n  network: &N,\n  block_number: usize,\n  plan: Plan<N>,\n  operating_costs: u64,\n) -> PreparedSend<N> {\n  loop {\n    match network.prepare_send(block_number, plan.clone(), operating_costs).await {\n      Ok(prepared) => {\n        return prepared;\n      }\n      Err(e) => {\n        error!(\"couldn't prepare a send for plan {}: {e}\", hex::encode(plan.id()));\n        // The processor is either trying to create an invalid TX (fatal) or the node went\n        // offline\n        // The former requires a patch, the latter is a connection issue\n        // If the latter, this is an appropriate sleep. If the former, we should panic, yet\n        // this won't flood the console ad infinitum\n        sleep(Duration::from_secs(60)).await;\n      }\n    }\n  }\n}\n\npub struct MultisigViewer<N: Network> {\n  activation_block: usize,\n  key: <N::Curve as Ciphersuite>::G,\n  scheduler: N::Scheduler,\n}\n\n#[allow(clippy::type_complexity)]\n#[derive(Clone, Debug)]\npub enum MultisigEvent<N: Network> {\n  // Batches to publish\n  Batches(Option<(<N::Curve as Ciphersuite>::G, <N::Curve as Ciphersuite>::G)>, Vec<Batch>),\n  // Eventuality completion found on-chain\n  Completed(Vec<u8>, [u8; 32], <N::Eventuality as Eventuality>::Completion),\n}\n\npub struct MultisigManager<D: Db, N: Network> {\n  scanner: ScannerHandle<N, D>,\n  existing: Option<MultisigViewer<N>>,\n  new: Option<MultisigViewer<N>>,\n}\n\nimpl<D: Db, N: Network> MultisigManager<D, N> {\n  pub async fn new(\n    raw_db: &D,\n    network: &N,\n  ) -> (\n    Self,\n    Vec<<N::Curve as Ciphersuite>::G>,\n    Vec<(Plan<N>, N::SignableTransaction, N::Eventuality)>,\n  ) {\n    // The scanner has no long-standing orders to re-issue\n    let (mut scanner, current_keys) = Scanner::new(network.clone(), raw_db.clone());\n\n    let mut schedulers = vec![];\n\n    assert!(current_keys.len() <= 2);\n    let mut actively_signing = vec![];\n    for (_, key) in &current_keys {\n      schedulers.push(N::Scheduler::from_db(raw_db, *key, N::NETWORK).unwrap());\n\n      // Load any TXs being actively signed\n      let key = key.to_bytes();\n      for (block_number, plan, operating_costs) in PlanDb::active_plans::<N>(raw_db, key.as_ref()) {\n        let block_number = block_number.try_into().unwrap();\n\n        let id = plan.id();\n        info!(\"reloading plan {}: {:?}\", hex::encode(id), plan);\n\n        let key_bytes = plan.key.to_bytes();\n\n        let Some((tx, eventuality)) =\n          prepare_send(network, block_number, plan.clone(), operating_costs).await.tx\n        else {\n          panic!(\"previously created transaction is no longer being created\")\n        };\n\n        scanner\n          .register_eventuality(key_bytes.as_ref(), block_number, id, eventuality.clone())\n          .await;\n        actively_signing.push((plan, tx, eventuality));\n      }\n    }\n\n    (\n      MultisigManager {\n        scanner,\n        existing: current_keys.first().copied().map(|(activation_block, key)| MultisigViewer {\n          activation_block,\n          key,\n          scheduler: schedulers.remove(0),\n        }),\n        new: current_keys.get(1).copied().map(|(activation_block, key)| MultisigViewer {\n          activation_block,\n          key,\n          scheduler: schedulers.remove(0),\n        }),\n      },\n      current_keys.into_iter().map(|(_, key)| key).collect(),\n      actively_signing,\n    )\n  }\n\n  /// Returns the block number for a block hash, if it's known and all keys have scanned the block.\n  // This is guaranteed to atomically increment so long as no new keys are added to the scanner\n  // which activate at a block before the currently highest scanned block. This is prevented by\n  // the processor waiting for `Batch` inclusion before scanning too far ahead, and activation only\n  // happening after the \"too far ahead\" window.\n  pub async fn block_number<G: Get>(\n    &self,\n    getter: &G,\n    hash: &<N::Block as Block<N>>::Id,\n  ) -> Option<usize> {\n    let latest = ScannerHandle::<N, D>::block_number(getter, hash)?;\n\n    // While the scanner has cemented this block, that doesn't mean it's been scanned for all\n    // keys\n    // ram_scanned will return the lowest scanned block number out of all keys\n    if latest > self.scanner.ram_scanned().await {\n      return None;\n    }\n    Some(latest)\n  }\n\n  pub async fn add_key(\n    &mut self,\n    txn: &mut D::Transaction<'_>,\n    activation_block: usize,\n    external_key: <N::Curve as Ciphersuite>::G,\n  ) {\n    self.scanner.register_key(txn, activation_block, external_key).await;\n    let viewer = Some(MultisigViewer {\n      activation_block,\n      key: external_key,\n      scheduler: N::Scheduler::new::<D>(txn, external_key, N::NETWORK),\n    });\n\n    if self.existing.is_none() {\n      self.existing = viewer;\n      return;\n    }\n    self.new = viewer;\n  }\n\n  fn current_rotation_step(&self, block_number: usize) -> RotationStep {\n    let Some(new) = self.new.as_ref() else { return RotationStep::UseExisting };\n\n    // Period numbering here has no meaning other than these are the time values useful here, and\n    // the order they're calculated in. They have no reference/shared marker with anything else\n\n    // ESTIMATED_BLOCK_TIME_IN_SECONDS is fine to use here. While inaccurate, it shouldn't be\n    // drastically off, and even if it is, it's a hiccup to latency handling only possible when\n    // rotating. The error rate wouldn't be acceptable if it was allowed to accumulate over time,\n    // yet rotation occurs on Serai's clock, disconnecting any errors here from any prior.\n\n    // N::CONFIRMATIONS + 10 minutes\n    let period_1_start = new.activation_block +\n      N::CONFIRMATIONS +\n      (10usize * 60).div_ceil(N::ESTIMATED_BLOCK_TIME_IN_SECONDS);\n\n    // N::CONFIRMATIONS\n    let period_2_start = period_1_start + N::CONFIRMATIONS;\n\n    // 6 hours after period 2\n    // Also ensure 6 hours is greater than the amount of CONFIRMATIONS, for sanity purposes\n    let period_3_start =\n      period_2_start + ((6 * 60 * 60) / N::ESTIMATED_BLOCK_TIME_IN_SECONDS).max(N::CONFIRMATIONS);\n\n    if block_number < period_1_start {\n      RotationStep::UseExisting\n    } else if block_number < period_2_start {\n      RotationStep::NewAsChange\n    } else if block_number < period_3_start {\n      RotationStep::ForwardFromExisting\n    } else {\n      RotationStep::ClosingExisting\n    }\n  }\n\n  // Convert new Burns to Payments.\n  //\n  // Also moves payments from the old Scheduler to the new multisig if the step calls for it.\n  fn burns_to_payments(\n    &mut self,\n    txn: &mut D::Transaction<'_>,\n    step: RotationStep,\n    burns: Vec<OutInstructionWithBalance>,\n  ) -> (Vec<Payment<N>>, Vec<Payment<N>>) {\n    let mut payments = vec![];\n    for out in burns {\n      let OutInstructionWithBalance { instruction: OutInstruction { address, data }, balance } =\n        out;\n      assert_eq!(balance.coin.network(), N::NETWORK);\n\n      if let Ok(address) = N::Address::try_from(address.consume()) {\n        payments.push(Payment { address, data: data.map(Data::consume), balance });\n      }\n    }\n\n    let payments = payments;\n    match step {\n      RotationStep::UseExisting | RotationStep::NewAsChange => (payments, vec![]),\n      RotationStep::ForwardFromExisting | RotationStep::ClosingExisting => {\n        // Consume any payments the prior scheduler was unable to complete\n        // This should only actually matter once\n        let mut new_payments = self.existing.as_mut().unwrap().scheduler.consume_payments::<D>(txn);\n        // Add the new payments\n        new_payments.extend(payments);\n        (vec![], new_payments)\n      }\n    }\n  }\n\n  fn split_outputs_by_key(&self, outputs: Vec<N::Output>) -> (Vec<N::Output>, Vec<N::Output>) {\n    let mut existing_outputs = Vec::with_capacity(outputs.len());\n    let mut new_outputs = vec![];\n\n    let existing_key = self.existing.as_ref().unwrap().key;\n    let new_key = self.new.as_ref().map(|new| new.key);\n    for output in outputs {\n      if output.key() == existing_key {\n        existing_outputs.push(output);\n      } else {\n        assert_eq!(Some(output.key()), new_key);\n        new_outputs.push(output);\n      }\n    }\n\n    (existing_outputs, new_outputs)\n  }\n\n  fn refund_plan(\n    scheduler: &mut N::Scheduler,\n    txn: &mut D::Transaction<'_>,\n    output: N::Output,\n    refund_to: N::Address,\n  ) -> Plan<N> {\n    log::info!(\"creating refund plan for {}\", hex::encode(output.id()));\n    assert_eq!(output.kind(), OutputType::External);\n    scheduler.refund_plan::<D>(txn, output, refund_to)\n  }\n\n  // Returns the plan for forwarding if one is needed.\n  // Returns None if one is not needed to forward this output.\n  fn forward_plan(&mut self, txn: &mut D::Transaction<'_>, output: &N::Output) -> Option<Plan<N>> {\n    log::info!(\"creating forwarding plan for {}\", hex::encode(output.id()));\n    let res = self.existing.as_mut().unwrap().scheduler.forward_plan::<D>(\n      txn,\n      output.clone(),\n      self.new.as_ref().expect(\"forwarding plan yet no new multisig\").key,\n    );\n    if res.is_none() {\n      log::info!(\"no forwarding plan was necessary for {}\", hex::encode(output.id()));\n    }\n    res\n  }\n\n  // Filter newly received outputs due to the step being RotationStep::ClosingExisting.\n  //\n  // Returns the Plans for the `Branch`s which should be created off outputs which passed the\n  // filter.\n  fn filter_outputs_due_to_closing(\n    &mut self,\n    txn: &mut D::Transaction<'_>,\n    existing_outputs: &mut Vec<N::Output>,\n  ) -> Vec<Plan<N>> {\n    /*\n      The document says to only handle outputs we created. We don't know what outputs we\n      created. We do have an ordered view of equivalent outputs however, and can assume the\n      first (and likely only) ones are the ones we created.\n\n      Accordingly, only handling outputs we created should be definable as only handling\n      outputs from the resolution of Eventualities.\n\n      This isn't feasible. It requires knowing what Eventualities were completed in this block,\n      when we handle this block, which we don't know without fully serialized scanning + Batch\n      publication.\n\n      Take the following scenario:\n      1) A network uses 10 confirmations. Block x is scanned, meaning x+9a exists.\n      2) 67% of nodes process x, create, sign, and publish a TX, creating an Eventuality.\n      3) A reorganization to a shorter chain occurs, including the published TX in x+1b.\n      4) The 33% of nodes which are latent will be allowed to scan x+1b as soon as x+10b\n         exists. They won't wait for Serai to include the Batch for x until they try to scan\n         x+10b.\n      5) These latent nodes will handle x+1b, post-create an Eventuality, post-learn x+1b\n         contained resolutions, changing how x+1b should've been interpreted.\n\n      We either have to:\n      A) Fully serialize scanning (removing the ability to utilize throughput to allow higher\n         latency, at least while the step is `ClosingExisting`).\n      B) Create Eventualities immediately, which we can't do as then both the external\n         network's clock AND Serai's clock can trigger Eventualities, removing ordering.\n         We'd need to shift entirely to the external network's clock, only handling Burns\n         outside the parallelization window (which would be extremely latent).\n      C) Use a different mechanism to determine if we created an output.\n      D) Re-define which outputs are still to be handled after the 6 hour period expires, such\n         that the multisig's lifetime cannot be further extended yet it does fulfill its\n         responsibility.\n\n      External outputs to the existing multisig will be:\n      - Scanned before the rotation and unused (as used External outputs become Change)\n      - Forwarded immediately upon scanning\n      - Not scanned before the cut off time (and accordingly dropped)\n\n      For the first case, since they're scanned before the rotation and unused, they'll be\n      forwarded with all other available outputs (since they'll be available when scanned).\n\n      Change outputs will be:\n      - Scanned before the rotation and forwarded with all other available outputs\n      - Forwarded immediately upon scanning\n      - Not scanned before the cut off time, requiring an extension exclusive to these outputs\n\n      The important thing to note about honest Change outputs to the existing multisig is that\n      they'll only be created within `CONFIRMATIONS+1` blocks of the activation block. Also\n      important to note is that there's another explicit window of `CONFIRMATIONS` before the\n      6 hour window.\n\n      Eventualities are not guaranteed to be known before we scan the block containing their\n      resolution. They are guaranteed to be known within `CONFIRMATIONS-1` blocks however, due\n      to the limitation on how far we'll scan ahead.\n\n      This means we will know of all Eventualities related to Change outputs we need to forward\n      before the 6 hour period begins (as forwarding outputs will not create any Change outputs\n      to the existing multisig).\n\n      This means a definition of complete can be defined as:\n      1) Handled all Branch outputs\n      2) Forwarded all External outputs received before the end of 6 hour window\n      3) Forwarded the results of all Eventualities with Change, which will have been created\n         before the 6 hour window\n\n      How can we track and ensure this without needing to check if an output is from the\n      resolution of an Eventuality?\n\n      1) We only create Branch outputs before the 6 hour window starts. These are guaranteed\n         to appear within `CONFIRMATIONS` blocks. They will exist with arbitrary depth however,\n         meaning that upon completion they will spawn several more Eventualities. The further\n         created Eventualities re-risk being present after the 6 hour period ends.\n\n         We can:\n         1) Build a queue for Branch outputs, delaying their handling until relevant\n            Eventualities are guaranteed to be present.\n\n            This solution would theoretically work for all outputs and allow collapsing this\n            problem to simply:\n\n            > Accordingly, only handling outputs we created should be definable as only\n              handling outputs from the resolution of Eventualities.\n\n         2) Create all Eventualities under a Branch at time of Branch creation.\n            This idea fails as Plans are tightly bound to outputs.\n\n         3) Don't track Branch outputs by Eventualities, yet by the amount of Branch outputs\n            remaining. Any Branch output received, of a useful amount, is assumed to be our\n            own and handled. All other Branch outputs, even if they're the completion of some\n            Eventuality, are dropped.\n\n            This avoids needing any additional queue, avoiding additional pipelining/latency.\n\n      2) External outputs are self-evident. We simply stop handling them at the cut-off point,\n         and only start checking after `CONFIRMATIONS` blocks if all Eventualities are\n         complete.\n\n      3) Since all Change Eventualities will be known prior to the 6 hour window's beginning,\n         we can safely check if a received Change output is the resolution of an Eventuality.\n         We only need to forward it if so. Forwarding it simply requires only checking if\n         Eventualities are complete after `CONFIRMATIONS` blocks, same as for straggling\n         External outputs.\n    */\n\n    let mut plans = vec![];\n    existing_outputs.retain(|output| {\n      match output.kind() {\n        OutputType::External | OutputType::Forwarded => false,\n        OutputType::Branch => {\n          let scheduler = &mut self.existing.as_mut().unwrap().scheduler;\n          // There *would* be a race condition here due to the fact we only mark a `Branch` output\n          // as needed when we process the block (and handle scheduling), yet actual `Branch`\n          // outputs may appear as soon as the next block (and we scan the next block before we\n          // process the prior block)\n          //\n          // Unlike Eventuality checking, which happens on scanning and is therefore asynchronous,\n          // all scheduling (and this check against the scheduler) happens on processing, which is\n          // synchronous\n          //\n          // While we could move Eventuality checking into the block processing, removing its\n          // asynchonicity, we could only check data the Scanner deems important. The Scanner won't\n          // deem important Eventuality resolutions which don't create an output to Serai unless\n          // it knows of the Eventuality. Accordingly, at best we could have a split role (the\n          // Scanner noting completion of Eventualities which don't have relevant outputs, the\n          // processing noting completion of ones which do)\n          //\n          // This is unnecessary, due to the current flow around Eventuality resolutions and the\n          // current bounds naturally found being sufficiently amenable, yet notable for the future\n          if scheduler.can_use_branch(output.balance()) {\n            // We could simply call can_use_branch, yet it'd have an edge case where if we receive\n            // two outputs for 100, and we could use one such output, we'd handle both.\n            //\n            // Individually schedule each output once confirming they're usable in order to avoid\n            // this.\n            let mut plan = scheduler.schedule::<D>(\n              txn,\n              vec![output.clone()],\n              vec![],\n              self.new.as_ref().unwrap().key,\n              false,\n            );\n            assert_eq!(plan.len(), 1);\n            let plan = plan.remove(0);\n            plans.push(plan);\n          }\n          false\n        }\n        OutputType::Change => {\n          // If the TX containing this output resolved an Eventuality...\n          if let Some(plan) = ResolvedDb::get(txn, output.tx_id().as_ref()) {\n            // And the Eventuality had change...\n            // We need this check as Eventualities have a race condition and can't be relied\n            // on, as extensively detailed above. Eventualities explicitly with change do have\n            // a safe timing window however\n            if PlanDb::plan_by_key_with_self_change::<N>(\n              txn,\n              // Pass the key so the DB checks the Plan's key is this multisig's, preventing a\n              // potential issue where the new multisig creates a Plan with change *and a\n              // payment to the existing multisig's change address*\n              self.existing.as_ref().unwrap().key,\n              plan,\n            ) {\n              // Then this is an honest change output we need to forward\n              // (or it's a payment to the change address in the same transaction as an honest\n              // change output, which is fine to let slip in)\n              return true;\n            }\n          }\n          false\n        }\n      }\n    });\n    plans\n  }\n\n  // Returns the Plans caused from a block being acknowledged.\n  //\n  // Will rotate keys if the block acknowledged is the retirement block.\n  async fn plans_from_block(\n    &mut self,\n    txn: &mut D::Transaction<'_>,\n    block_number: usize,\n    block_id: <N::Block as Block<N>>::Id,\n    step: &mut RotationStep,\n    burns: Vec<OutInstructionWithBalance>,\n  ) -> (bool, Vec<Plan<N>>, HashSet<[u8; 32]>) {\n    let (mut existing_payments, mut new_payments) = self.burns_to_payments(txn, *step, burns);\n\n    let mut plans = vec![];\n    let mut plans_from_scanning = HashSet::new();\n\n    // We now have to acknowledge the acknowledged block, if it's new\n    // It won't be if this block's `InInstruction`s were split into multiple `Batch`s\n    let (acquired_lock, (mut existing_outputs, new_outputs)) = {\n      let (acquired_lock, mut outputs) = if ScannerHandle::<N, D>::db_scanned(txn)\n        .expect(\"published a Batch despite never scanning a block\") <\n        block_number\n      {\n        // Load plans crated when we scanned the block\n        let scanning_plans =\n          PlansFromScanningDb::take_plans_from_scanning::<N>(txn, block_number).unwrap();\n        // Expand into actual plans\n        plans = scanning_plans\n          .into_iter()\n          .map(|plan| match plan {\n            PlanFromScanning::Refund(output, refund_to) => {\n              let existing = self.existing.as_mut().unwrap();\n              if output.key() == existing.key {\n                Self::refund_plan(&mut existing.scheduler, txn, output, refund_to)\n              } else {\n                let new = self\n                  .new\n                  .as_mut()\n                  .expect(\"new multisig didn't expect yet output wasn't for existing multisig\");\n                assert_eq!(output.key(), new.key, \"output wasn't for existing nor new multisig\");\n                Self::refund_plan(&mut new.scheduler, txn, output, refund_to)\n              }\n            }\n            PlanFromScanning::Forward(output) => self\n              .forward_plan(txn, &output)\n              .expect(\"supposed to forward an output yet no forwarding plan\"),\n          })\n          .collect();\n\n        for plan in &plans {\n          plans_from_scanning.insert(plan.id());\n        }\n\n        let (is_retirement_block, outputs) = self.scanner.ack_block(txn, block_id.clone()).await;\n        if is_retirement_block {\n          let existing = self.existing.take().unwrap();\n          assert!(existing.scheduler.empty());\n          self.existing = self.new.take();\n          *step = RotationStep::UseExisting;\n          assert!(existing_payments.is_empty());\n          existing_payments = new_payments;\n          new_payments = vec![];\n        }\n        (true, outputs)\n      } else {\n        (false, vec![])\n      };\n\n      // Remove all outputs already present in plans\n      let mut output_set = HashSet::new();\n      for plan in &plans {\n        for input in &plan.inputs {\n          output_set.insert(input.id().as_ref().to_vec());\n        }\n      }\n      outputs.retain(|output| !output_set.remove(output.id().as_ref()));\n      assert_eq!(output_set.len(), 0);\n\n      (acquired_lock, self.split_outputs_by_key(outputs))\n    };\n\n    // If we're closing the existing multisig, filter its outputs down\n    if *step == RotationStep::ClosingExisting {\n      plans.extend(self.filter_outputs_due_to_closing(txn, &mut existing_outputs));\n    }\n\n    // Now that we've done all our filtering, schedule the existing multisig's outputs\n    plans.extend({\n      let existing = self.existing.as_mut().unwrap();\n      let existing_key = existing.key;\n      self.existing.as_mut().unwrap().scheduler.schedule::<D>(\n        txn,\n        existing_outputs,\n        existing_payments,\n        match *step {\n          RotationStep::UseExisting => existing_key,\n          RotationStep::NewAsChange |\n          RotationStep::ForwardFromExisting |\n          RotationStep::ClosingExisting => self.new.as_ref().unwrap().key,\n        },\n        match *step {\n          RotationStep::UseExisting | RotationStep::NewAsChange => false,\n          RotationStep::ForwardFromExisting | RotationStep::ClosingExisting => true,\n        },\n      )\n    });\n\n    for plan in &plans {\n      // This first equality should 'never meaningfully' be false\n      // All created plans so far are by the existing multisig EXCEPT:\n      // A) If we created a refund plan from the new multisig (yet that wouldn't have change)\n      // B) The existing Scheduler returned a Plan for the new key (yet that happens with the SC\n      //    scheduler, yet that doesn't have change)\n      // Despite being 'unnecessary' now, it's better to explicitly ensure and be robust\n      if plan.key == self.existing.as_ref().unwrap().key {\n        if let Some(change) = N::change_address(plan.key) {\n          if plan.change == Some(change) {\n            // Assert these (self-change) are only created during the expected step\n            match *step {\n              RotationStep::UseExisting => {}\n              RotationStep::NewAsChange |\n              RotationStep::ForwardFromExisting |\n              RotationStep::ClosingExisting => panic!(\"change was set to self despite rotating\"),\n            }\n          }\n        }\n      }\n    }\n\n    // Schedule the new multisig's outputs too\n    if let Some(new) = self.new.as_mut() {\n      plans.extend(new.scheduler.schedule::<D>(txn, new_outputs, new_payments, new.key, false));\n    }\n\n    (acquired_lock, plans, plans_from_scanning)\n  }\n\n  /// Handle a SubstrateBlock event, building the relevant Plans.\n  pub async fn substrate_block(\n    &mut self,\n    txn: &mut D::Transaction<'_>,\n    network: &N,\n    context: SubstrateContext,\n    burns: Vec<OutInstructionWithBalance>,\n  ) -> (bool, Vec<(<N::Curve as Ciphersuite>::G, [u8; 32], N::SignableTransaction, N::Eventuality)>)\n  {\n    let mut block_id = <N::Block as Block<N>>::Id::default();\n    block_id.as_mut().copy_from_slice(context.network_latest_finalized_block.as_ref());\n    let block_number = ScannerHandle::<N, D>::block_number(txn, &block_id)\n      .expect(\"SubstrateBlock with context we haven't synced\");\n\n    // Determine what step of rotation we're currently in\n    let mut step = self.current_rotation_step(block_number);\n\n    // Get the Plans from this block\n    let (acquired_lock, plans, plans_from_scanning) =\n      self.plans_from_block(txn, block_number, block_id, &mut step, burns).await;\n\n    let res = {\n      let mut res = Vec::with_capacity(plans.len());\n\n      for plan in plans {\n        let id = plan.id();\n        info!(\"preparing plan {}: {:?}\", hex::encode(id), plan);\n\n        let key = plan.key;\n        let key_bytes = key.to_bytes();\n\n        let (tx, post_fee_branches) = {\n          let running_operating_costs = OperatingCostsDb::take_operating_costs(txn);\n\n          PlanDb::save_active_plan::<N>(\n            txn,\n            key_bytes.as_ref(),\n            block_number,\n            &plan,\n            running_operating_costs,\n          );\n\n          // If this Plan is from the scanner handler below, don't take the opportunity to amortze\n          // operating costs\n          // It operates with limited context, and on a different clock, making it nable to react\n          // to operating costs\n          // Despite this, in order to properly save forwarded outputs' instructions, it needs to\n          // know the actual value forwarded outputs will be created with\n          // Including operating costs prevents that\n          let from_scanning = plans_from_scanning.contains(&plan.id());\n          let to_use_operating_costs = if from_scanning { 0 } else { running_operating_costs };\n\n          let PreparedSend { tx, post_fee_branches, mut operating_costs } =\n            prepare_send(network, block_number, plan, to_use_operating_costs).await;\n\n          // Restore running_operating_costs to operating_costs\n          if from_scanning {\n            // If we're forwarding (or refunding) this output, operating_costs should still be 0\n            // Either this TX wasn't created, causing no operating costs, or it was yet it'd be\n            // amortized\n            assert_eq!(operating_costs, 0);\n\n            operating_costs += running_operating_costs;\n          }\n\n          OperatingCostsDb::set_operating_costs(txn, operating_costs);\n\n          (tx, post_fee_branches)\n        };\n\n        for branch in post_fee_branches {\n          let existing = self.existing.as_mut().unwrap();\n          let to_use = if key == existing.key {\n            existing\n          } else {\n            let new = self\n              .new\n              .as_mut()\n              .expect(\"plan wasn't for existing multisig yet there wasn't a new multisig\");\n            assert_eq!(key, new.key);\n            new\n          };\n\n          to_use.scheduler.created_output::<D>(txn, branch.expected, branch.actual);\n        }\n\n        if let Some((tx, eventuality)) = tx {\n          // The main function we return to will send an event to the coordinator which must be\n          // fired before these registered Eventualities have their Completions fired\n          // Safety is derived from a mutable lock on the Scanner being preserved, preventing\n          // scanning (and detection of Eventuality resolutions) before it's released\n          // It's only released by the main function after it does what it will\n          self\n            .scanner\n            .register_eventuality(key_bytes.as_ref(), block_number, id, eventuality.clone())\n            .await;\n\n          res.push((key, id, tx, eventuality));\n        }\n\n        // TODO: If the TX is None, restore its inputs to the scheduler for efficiency's sake\n        // If this TODO is removed, also reduce the operating costs\n      }\n      res\n    };\n    (acquired_lock, res)\n  }\n\n  pub async fn release_scanner_lock(&mut self) {\n    self.scanner.release_lock().await;\n  }\n\n  pub async fn scanner_event_to_multisig_event(\n    &self,\n    txn: &mut D::Transaction<'_>,\n    network: &N,\n    msg: ScannerEvent<N>,\n  ) -> MultisigEvent<N> {\n    let (block_number, event) = match msg {\n      ScannerEvent::Block { is_retirement_block, block, mut outputs } => {\n        // Since the Scanner is asynchronous, the following is a concern for race conditions\n        // We safely know the step of a block since keys are declared, and the Scanner is safe\n        // with respect to the declaration of keys\n        // Accordingly, the following calls regarding new keys and step should be safe\n        let block_number = ScannerHandle::<N, D>::block_number(txn, &block)\n          .expect(\"didn't have the block number for a block we just scanned\");\n        let step = self.current_rotation_step(block_number);\n\n        // Instructions created from this block\n        let mut instructions = vec![];\n\n        // If any of these outputs were forwarded, create their instruction now\n        for output in &outputs {\n          if output.kind() != OutputType::Forwarded {\n            continue;\n          }\n\n          if let Some(instruction) = ForwardedOutputDb::take_forwarded_output(txn, output.balance())\n          {\n            instructions.push(instruction);\n          }\n        }\n\n        // If the remaining outputs aren't externally received funds, don't handle them as\n        // instructions\n        outputs.retain(|output| output.kind() == OutputType::External);\n\n        // These plans are of limited context. They're only allowed the outputs newly received\n        // within this block and are intended to handle forwarding transactions/refunds\n        let mut plans = vec![];\n\n        // If the old multisig is explicitly only supposed to forward, create all such plans now\n        if step == RotationStep::ForwardFromExisting {\n          let mut i = 0;\n          while i < outputs.len() {\n            let output = &outputs[i];\n            let plans = &mut plans;\n            let txn = &mut *txn;\n\n            #[allow(clippy::redundant_closure_call)]\n            let should_retain = (|| async move {\n              // If this output doesn't belong to the existing multisig, it shouldn't be forwarded\n              if output.key() != self.existing.as_ref().unwrap().key {\n                return true;\n              }\n\n              let plans_at_start = plans.len();\n              let (refund_to, instruction) = instruction_from_output::<N>(output);\n              if let Some(mut instruction) = instruction {\n                let Some(shimmed_plan) = N::Scheduler::shim_forward_plan(\n                  output.clone(),\n                  self.new.as_ref().expect(\"forwarding from existing yet no new multisig\").key,\n                ) else {\n                  // If this network doesn't need forwarding, report the output now\n                  return true;\n                };\n                plans.push(PlanFromScanning::<N>::Forward(output.clone()));\n\n                // Set the instruction for this output to be returned\n                // We need to set it under the amount it's forwarded with, so prepare its forwarding\n                // TX to determine the fees involved\n                let PreparedSend { tx, post_fee_branches: _, operating_costs } =\n                  prepare_send(network, block_number, shimmed_plan, 0).await;\n                // operating_costs should not increase in a forwarding TX\n                assert_eq!(operating_costs, 0);\n\n                // If this actually forwarded any coins, save the output as forwarded\n                // If this didn't create a TX, we don't bother saving the output as forwarded\n                // The fact we already created and pushed a plan still using this output will cause\n                // it to not be retained here, and later the plan will be dropped as this did here,\n                // letting it die out\n                if let Some(tx) = &tx {\n                  instruction.balance.amount.0 -= tx.0.fee();\n\n                  /*\n                    Sending a Plan, with arbitrary data proxying the InInstruction, would require\n                    adding a flow for networks which drop their data to still embed arbitrary data.\n                    It'd also have edge cases causing failures (we'd need to manually provide the\n                    origin if it was implied, which may exceed the encoding limit).\n\n                    Instead, we save the InInstruction as we scan this output. Then, when the\n                    output is successfully forwarded, we simply read it from the local database.\n                    This also saves the costs of embedding arbitrary data.\n\n                    Since we can't rely on the Eventuality system to detect if it's a forwarded\n                    transaction, due to the asynchonicity of the Eventuality system, we instead\n                    interpret an Forwarded output which has an amount associated with an\n                    InInstruction which was forwarded as having been forwarded.\n                  */\n                  ForwardedOutputDb::save_forwarded_output(txn, &instruction);\n                }\n              } else if let Some(refund_to) = refund_to {\n                if let Ok(refund_to) = refund_to.consume().try_into() {\n                  // Build a dedicated Plan refunding this\n                  plans.push(PlanFromScanning::Refund(output.clone(), refund_to));\n                }\n              }\n\n              // Only keep if we didn't make a Plan consuming it\n              plans_at_start == plans.len()\n            })()\n            .await;\n            if should_retain {\n              i += 1;\n              continue;\n            }\n            outputs.remove(i);\n          }\n        }\n\n        for output in outputs {\n          // If this is an External transaction to the existing multisig, and we're either solely\n          // forwarding or closing the existing multisig, drop it\n          // In the case of the forwarding case, we'll report it once it hits the new multisig\n          if (match step {\n            RotationStep::UseExisting | RotationStep::NewAsChange => false,\n            RotationStep::ForwardFromExisting | RotationStep::ClosingExisting => true,\n          }) && (output.key() == self.existing.as_ref().unwrap().key)\n          {\n            continue;\n          }\n\n          let (refund_to, instruction) = instruction_from_output::<N>(&output);\n          let Some(instruction) = instruction else {\n            if let Some(refund_to) = refund_to {\n              if let Ok(refund_to) = refund_to.consume().try_into() {\n                plans.push(PlanFromScanning::Refund(output.clone(), refund_to));\n              }\n            }\n            continue;\n          };\n\n          // Delay External outputs received to new multisig earlier than expected\n          if Some(output.key()) == self.new.as_ref().map(|new| new.key) {\n            match step {\n              RotationStep::UseExisting => {\n                DelayedOutputDb::save_delayed_output(txn, &instruction);\n                continue;\n              }\n              RotationStep::NewAsChange |\n              RotationStep::ForwardFromExisting |\n              RotationStep::ClosingExisting => {}\n            }\n          }\n\n          instructions.push(instruction);\n        }\n\n        // Save the plans created while scanning\n        // TODO: Should we combine all of these plans to reduce the fees incurred from their\n        // execution? They're refunds and forwards. Neither should need isolate Plan/Eventualities.\n        PlansFromScanningDb::set_plans_from_scanning(txn, block_number, plans);\n\n        // If any outputs were delayed, append them into this block\n        match step {\n          RotationStep::UseExisting => {}\n          RotationStep::NewAsChange |\n          RotationStep::ForwardFromExisting |\n          RotationStep::ClosingExisting => {\n            instructions.extend(DelayedOutputDb::take_delayed_outputs(txn));\n          }\n        }\n\n        let mut block_hash = [0; 32];\n        block_hash.copy_from_slice(block.as_ref());\n        let mut batch_id = NextBatchDb::get(txn).unwrap_or_default();\n\n        // start with empty batch\n        let mut batches = vec![Batch {\n          network: N::NETWORK,\n          id: batch_id,\n          block: BlockHash(block_hash),\n          instructions: vec![],\n        }];\n\n        for instruction in instructions {\n          let batch = batches.last_mut().unwrap();\n          batch.instructions.push(instruction);\n\n          // check if batch is over-size\n          if batch.encode().len() > MAX_BATCH_SIZE {\n            // pop the last instruction so it's back in size\n            let instruction = batch.instructions.pop().unwrap();\n\n            // bump the id for the new batch\n            batch_id += 1;\n\n            // make a new batch with this instruction included\n            batches.push(Batch {\n              network: N::NETWORK,\n              id: batch_id,\n              block: BlockHash(block_hash),\n              instructions: vec![instruction],\n            });\n          }\n        }\n\n        // Save the next batch ID\n        NextBatchDb::set(txn, &(batch_id + 1));\n\n        (\n          block_number,\n          MultisigEvent::Batches(\n            if is_retirement_block {\n              Some((self.existing.as_ref().unwrap().key, self.new.as_ref().unwrap().key))\n            } else {\n              None\n            },\n            batches,\n          ),\n        )\n      }\n\n      // This must be emitted before ScannerEvent::Block for all completions of known Eventualities\n      // within the block. Unknown Eventualities may have their Completed events emitted after\n      // ScannerEvent::Block however.\n      ScannerEvent::Completed(key, block_number, id, tx_id, completion) => {\n        ResolvedDb::resolve_plan::<N>(txn, &key, id, &tx_id);\n        (block_number, MultisigEvent::Completed(key, id, completion))\n      }\n    };\n\n    // If we either received a Block event (which will be the trigger when we have no\n    // Plans/Eventualities leading into ClosingExisting), or we received the last Completed for\n    // this multisig, set its retirement block\n    let existing = self.existing.as_ref().unwrap();\n\n    // This multisig is closing\n    let closing = self.current_rotation_step(block_number) == RotationStep::ClosingExisting;\n    // There's nothing left in its Scheduler. This call is safe as:\n    // 1) When ClosingExisting, all outputs should've been already forwarded, preventing\n    //    new UTXOs from accumulating.\n    // 2) No new payments should be issued.\n    // 3) While there may be plans, they'll be dropped to create Eventualities.\n    //    If this Eventuality is resolved, the Plan has already been dropped.\n    // 4) If this Eventuality will trigger a Plan, it'll still be in the plans HashMap.\n    let scheduler_is_empty = closing && existing.scheduler.empty();\n    // Nothing is still being signed\n    let no_active_plans = scheduler_is_empty &&\n      PlanDb::active_plans::<N>(txn, existing.key.to_bytes().as_ref()).is_empty();\n\n    self\n      .scanner\n      .multisig_completed\n      // The above explicitly included their predecessor to ensure short-circuiting, yet their\n      // names aren't defined as an aggregate check. Still including all three here ensures all are\n      // used in the final value\n      .send(closing && scheduler_is_empty && no_active_plans)\n      .unwrap();\n\n    event\n  }\n\n  pub async fn next_scanner_event(&mut self) -> ScannerEvent<N> {\n    self.scanner.events.recv().await.unwrap()\n  }\n}\n"
  },
  {
    "path": "processor/src/multisigs/scanner.rs",
    "content": "use core::marker::PhantomData;\nuse std::{\n  sync::Arc,\n  io::Read,\n  time::Duration,\n  collections::{VecDeque, HashSet, HashMap},\n};\n\nuse ciphersuite::group::GroupEncoding;\nuse frost::curve::Ciphersuite;\n\nuse log::{info, debug, warn};\nuse tokio::{\n  sync::{RwLockReadGuard, RwLockWriteGuard, RwLock, mpsc},\n  time::sleep,\n};\n\nuse crate::{\n  Get, DbTxn, Db,\n  networks::{Output, Transaction, Eventuality, EventualitiesTracker, Block, Network},\n};\n\n#[derive(Clone, Debug)]\npub enum ScannerEvent<N: Network> {\n  // Block scanned\n  Block {\n    is_retirement_block: bool,\n    block: <N::Block as Block<N>>::Id,\n    outputs: Vec<N::Output>,\n  },\n  // Eventuality completion found on-chain\n  // TODO: Move this from a tuple\n  Completed(\n    Vec<u8>,\n    usize,\n    [u8; 32],\n    <N::Transaction as Transaction<N>>::Id,\n    <N::Eventuality as Eventuality>::Completion,\n  ),\n}\n\npub type ScannerEventChannel<N> = mpsc::UnboundedReceiver<ScannerEvent<N>>;\n\n#[derive(Clone, Debug)]\nstruct ScannerDb<N: Network, D: Db>(PhantomData<N>, PhantomData<D>);\nimpl<N: Network, D: Db> ScannerDb<N, D> {\n  fn scanner_key(dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec<u8> {\n    D::key(b\"SCANNER\", dst, key)\n  }\n\n  fn block_key(number: usize) -> Vec<u8> {\n    Self::scanner_key(b\"block_id\", u64::try_from(number).unwrap().to_le_bytes())\n  }\n  fn block_number_key(id: &<N::Block as Block<N>>::Id) -> Vec<u8> {\n    Self::scanner_key(b\"block_number\", id)\n  }\n  fn save_block(txn: &mut D::Transaction<'_>, number: usize, id: &<N::Block as Block<N>>::Id) {\n    txn.put(Self::block_number_key(id), u64::try_from(number).unwrap().to_le_bytes());\n    txn.put(Self::block_key(number), id);\n  }\n  fn block<G: Get>(getter: &G, number: usize) -> Option<<N::Block as Block<N>>::Id> {\n    getter.get(Self::block_key(number)).map(|id| {\n      let mut res = <N::Block as Block<N>>::Id::default();\n      res.as_mut().copy_from_slice(&id);\n      res\n    })\n  }\n  fn block_number<G: Get>(getter: &G, id: &<N::Block as Block<N>>::Id) -> Option<usize> {\n    getter\n      .get(Self::block_number_key(id))\n      .map(|number| u64::from_le_bytes(number.try_into().unwrap()).try_into().unwrap())\n  }\n\n  fn keys_key() -> Vec<u8> {\n    Self::scanner_key(b\"keys\", b\"\")\n  }\n  fn register_key(\n    txn: &mut D::Transaction<'_>,\n    activation_number: usize,\n    key: <N::Curve as Ciphersuite>::G,\n  ) {\n    let mut keys = txn.get(Self::keys_key()).unwrap_or(vec![]);\n\n    let key_bytes = key.to_bytes();\n\n    let key_len = key_bytes.as_ref().len();\n    assert_eq!(keys.len() % (8 + key_len), 0);\n\n    // Sanity check this key isn't already present\n    let mut i = 0;\n    while i < keys.len() {\n      if &keys[(i + 8) .. ((i + 8) + key_len)] == key_bytes.as_ref() {\n        panic!(\"adding {} as a key yet it was already present\", hex::encode(key_bytes));\n      }\n      i += 8 + key_len;\n    }\n\n    keys.extend(u64::try_from(activation_number).unwrap().to_le_bytes());\n    keys.extend(key_bytes.as_ref());\n    txn.put(Self::keys_key(), keys);\n  }\n  fn keys<G: Get>(getter: &G) -> Vec<(usize, <N::Curve as Ciphersuite>::G)> {\n    let bytes_vec = getter.get(Self::keys_key()).unwrap_or(vec![]);\n    let mut bytes: &[u8] = bytes_vec.as_ref();\n\n    // Assumes keys will be 32 bytes when calculating the capacity\n    // If keys are larger, this may allocate more memory than needed\n    // If keys are smaller, this may require additional allocations\n    // Either are fine\n    let mut res = Vec::with_capacity(bytes.len() / (8 + 32));\n    while !bytes.is_empty() {\n      let mut activation_number = [0; 8];\n      bytes.read_exact(&mut activation_number).unwrap();\n      let activation_number = u64::from_le_bytes(activation_number).try_into().unwrap();\n\n      res.push((activation_number, N::Curve::read_G(&mut bytes).unwrap()));\n    }\n    res\n  }\n  fn retire_key(txn: &mut D::Transaction<'_>) {\n    let keys = Self::keys(txn);\n    assert_eq!(keys.len(), 2);\n    txn.del(Self::keys_key());\n    Self::register_key(txn, keys[1].0, keys[1].1);\n  }\n\n  fn seen_key(id: &<N::Output as Output<N>>::Id) -> Vec<u8> {\n    Self::scanner_key(b\"seen\", id)\n  }\n  fn seen<G: Get>(getter: &G, id: &<N::Output as Output<N>>::Id) -> bool {\n    getter.get(Self::seen_key(id)).is_some()\n  }\n\n  fn outputs_key(block: &<N::Block as Block<N>>::Id) -> Vec<u8> {\n    Self::scanner_key(b\"outputs\", block.as_ref())\n  }\n  fn save_outputs(\n    txn: &mut D::Transaction<'_>,\n    block: &<N::Block as Block<N>>::Id,\n    outputs: &[N::Output],\n  ) {\n    let mut bytes = Vec::with_capacity(outputs.len() * 64);\n    for output in outputs {\n      output.write(&mut bytes).unwrap();\n    }\n    txn.put(Self::outputs_key(block), bytes);\n  }\n  fn outputs(\n    txn: &D::Transaction<'_>,\n    block: &<N::Block as Block<N>>::Id,\n  ) -> Option<Vec<N::Output>> {\n    let bytes_vec = txn.get(Self::outputs_key(block))?;\n    let mut bytes: &[u8] = bytes_vec.as_ref();\n\n    let mut res = vec![];\n    while !bytes.is_empty() {\n      res.push(N::Output::read(&mut bytes).unwrap());\n    }\n    Some(res)\n  }\n\n  fn scanned_block_key() -> Vec<u8> {\n    Self::scanner_key(b\"scanned_block\", [])\n  }\n\n  fn save_scanned_block(txn: &mut D::Transaction<'_>, block: usize) -> Vec<N::Output> {\n    let id = Self::block(txn, block); // It may be None for the first key rotated to\n    let outputs =\n      if let Some(id) = id.as_ref() { Self::outputs(txn, id).unwrap_or(vec![]) } else { vec![] };\n\n    // Mark all the outputs from this block as seen\n    for output in &outputs {\n      txn.put(Self::seen_key(&output.id()), b\"\");\n    }\n\n    txn.put(Self::scanned_block_key(), u64::try_from(block).unwrap().to_le_bytes());\n\n    // Return this block's outputs so they can be pruned from the RAM cache\n    outputs\n  }\n  fn latest_scanned_block<G: Get>(getter: &G) -> Option<usize> {\n    getter\n      .get(Self::scanned_block_key())\n      .map(|bytes| u64::from_le_bytes(bytes.try_into().unwrap()).try_into().unwrap())\n  }\n\n  fn retirement_block_key(key: &<N::Curve as Ciphersuite>::G) -> Vec<u8> {\n    Self::scanner_key(b\"retirement_block\", key.to_bytes())\n  }\n  fn save_retirement_block(\n    txn: &mut D::Transaction<'_>,\n    key: &<N::Curve as Ciphersuite>::G,\n    block: usize,\n  ) {\n    txn.put(Self::retirement_block_key(key), u64::try_from(block).unwrap().to_le_bytes());\n  }\n  fn retirement_block<G: Get>(getter: &G, key: &<N::Curve as Ciphersuite>::G) -> Option<usize> {\n    getter\n      .get(Self::retirement_block_key(key))\n      .map(|bytes| usize::try_from(u64::from_le_bytes(bytes.try_into().unwrap())).unwrap())\n  }\n}\n\n/// The Scanner emits events relating to the blockchain, notably received outputs.\n///\n/// It WILL NOT fail to emit an event, even if it reboots at selected moments.\n///\n/// It MAY fire the same event multiple times.\n#[derive(Debug)]\npub struct Scanner<N: Network, D: Db> {\n  _db: PhantomData<D>,\n\n  keys: Vec<(usize, <N::Curve as Ciphersuite>::G)>,\n\n  eventualities: HashMap<Vec<u8>, EventualitiesTracker<N::Eventuality>>,\n\n  ram_scanned: Option<usize>,\n  ram_outputs: HashSet<Vec<u8>>,\n\n  need_ack: VecDeque<usize>,\n\n  events: mpsc::UnboundedSender<ScannerEvent<N>>,\n}\n\n#[derive(Clone, Debug)]\nstruct ScannerHold<N: Network, D: Db> {\n  scanner: Arc<RwLock<Option<Scanner<N, D>>>>,\n}\nimpl<N: Network, D: Db> ScannerHold<N, D> {\n  async fn read(&self) -> RwLockReadGuard<'_, Option<Scanner<N, D>>> {\n    loop {\n      let lock = self.scanner.read().await;\n      if lock.is_none() {\n        drop(lock);\n        tokio::task::yield_now().await;\n        continue;\n      }\n      return lock;\n    }\n  }\n  async fn write(&self) -> RwLockWriteGuard<'_, Option<Scanner<N, D>>> {\n    loop {\n      let lock = self.scanner.write().await;\n      if lock.is_none() {\n        drop(lock);\n        tokio::task::yield_now().await;\n        continue;\n      }\n      return lock;\n    }\n  }\n  // This is safe to not check if something else already acquired the Scanner as the only caller is\n  // sequential.\n  async fn long_term_acquire(&self) -> Scanner<N, D> {\n    self.scanner.write().await.take().unwrap()\n  }\n  async fn restore(&self, scanner: Scanner<N, D>) {\n    let _ = self.scanner.write().await.insert(scanner);\n  }\n}\n\n#[derive(Debug)]\npub struct ScannerHandle<N: Network, D: Db> {\n  scanner: ScannerHold<N, D>,\n  held_scanner: Option<Scanner<N, D>>,\n  pub events: ScannerEventChannel<N>,\n  pub multisig_completed: mpsc::UnboundedSender<bool>,\n}\n\nimpl<N: Network, D: Db> ScannerHandle<N, D> {\n  pub async fn ram_scanned(&self) -> usize {\n    self.scanner.read().await.as_ref().unwrap().ram_scanned.unwrap_or(0)\n  }\n\n  /// Register a key to scan for.\n  pub async fn register_key(\n    &mut self,\n    txn: &mut D::Transaction<'_>,\n    activation_number: usize,\n    key: <N::Curve as Ciphersuite>::G,\n  ) {\n    info!(\"Registering key {} in scanner at {activation_number}\", hex::encode(key.to_bytes()));\n\n    let mut scanner_lock = self.scanner.write().await;\n    let scanner = scanner_lock.as_mut().unwrap();\n    assert!(\n      activation_number > scanner.ram_scanned.unwrap_or(0),\n      \"activation block of new keys was already scanned\",\n    );\n\n    if scanner.keys.is_empty() {\n      assert!(scanner.ram_scanned.is_none());\n      scanner.ram_scanned = Some(activation_number);\n      assert!(ScannerDb::<N, D>::save_scanned_block(txn, activation_number).is_empty());\n    }\n\n    ScannerDb::<N, D>::register_key(txn, activation_number, key);\n    scanner.keys.push((activation_number, key));\n    #[cfg(not(test))] // TODO: A test violates this. Improve the test with a better flow\n    assert!(scanner.keys.len() <= 2);\n\n    scanner.eventualities.insert(key.to_bytes().as_ref().to_vec(), EventualitiesTracker::new());\n  }\n\n  pub fn db_scanned<G: Get>(getter: &G) -> Option<usize> {\n    ScannerDb::<N, D>::latest_scanned_block(getter)\n  }\n\n  // This perform a database read which isn't safe with regards to if the value is set or not\n  // It may be set, when it isn't expected to be set, or not set, when it is expected to be set\n  // Since the value is static, if it's set, it's correctly set\n  pub fn block_number<G: Get>(getter: &G, id: &<N::Block as Block<N>>::Id) -> Option<usize> {\n    ScannerDb::<N, D>::block_number(getter, id)\n  }\n\n  /// Acknowledge having handled a block.\n  ///\n  /// Creates a lock over the Scanner, preventing its independent scanning operations until\n  /// released.\n  ///\n  /// This must only be called on blocks which have been scanned in-memory.\n  pub async fn ack_block(\n    &mut self,\n    txn: &mut D::Transaction<'_>,\n    id: <N::Block as Block<N>>::Id,\n  ) -> (bool, Vec<N::Output>) {\n    debug!(\"block {} acknowledged\", hex::encode(&id));\n\n    let mut scanner = self.scanner.long_term_acquire().await;\n\n    // Get the number for this block\n    let number = ScannerDb::<N, D>::block_number(txn, &id)\n      .expect(\"main loop trying to operate on data we haven't scanned\");\n    log::trace!(\"block {} was {number}\", hex::encode(&id));\n\n    let outputs = ScannerDb::<N, D>::save_scanned_block(txn, number);\n    // This has a race condition if we try to ack a block we scanned on a prior boot, and we have\n    // yet to scan it on this boot\n    assert!(number <= scanner.ram_scanned.unwrap());\n    for output in &outputs {\n      assert!(scanner.ram_outputs.remove(output.id().as_ref()));\n    }\n\n    assert_eq!(scanner.need_ack.pop_front().unwrap(), number);\n\n    self.held_scanner = Some(scanner);\n\n    // Load the key from the DB, as it will have already been removed from RAM if retired\n    let key = ScannerDb::<N, D>::keys(txn)[0].1;\n    let is_retirement_block = ScannerDb::<N, D>::retirement_block(txn, &key) == Some(number);\n    if is_retirement_block {\n      ScannerDb::<N, D>::retire_key(txn);\n    }\n    (is_retirement_block, outputs)\n  }\n\n  pub async fn register_eventuality(\n    &mut self,\n    key: &[u8],\n    block_number: usize,\n    id: [u8; 32],\n    eventuality: N::Eventuality,\n  ) {\n    let mut lock;\n    // We won't use held_scanner if we're re-registering on boot\n    (if let Some(scanner) = self.held_scanner.as_mut() {\n      scanner\n    } else {\n      lock = Some(self.scanner.write().await);\n      lock.as_mut().unwrap().as_mut().unwrap()\n    })\n    .eventualities\n    .get_mut(key)\n    .unwrap()\n    .register(block_number, id, eventuality)\n  }\n\n  pub async fn release_lock(&mut self) {\n    self.scanner.restore(self.held_scanner.take().unwrap()).await\n  }\n}\n\nimpl<N: Network, D: Db> Scanner<N, D> {\n  #[allow(clippy::type_complexity, clippy::new_ret_no_self)]\n  pub fn new(\n    network: N,\n    db: D,\n  ) -> (ScannerHandle<N, D>, Vec<(usize, <N::Curve as Ciphersuite>::G)>) {\n    let (events_send, events_recv) = mpsc::unbounded_channel();\n    let (multisig_completed_send, multisig_completed_recv) = mpsc::unbounded_channel();\n\n    let keys = ScannerDb::<N, D>::keys(&db);\n    let mut eventualities = HashMap::new();\n    for key in &keys {\n      eventualities.insert(key.1.to_bytes().as_ref().to_vec(), EventualitiesTracker::new());\n    }\n\n    let ram_scanned = ScannerDb::<N, D>::latest_scanned_block(&db);\n\n    let scanner = ScannerHold {\n      scanner: Arc::new(RwLock::new(Some(Scanner {\n        _db: PhantomData,\n\n        keys: keys.clone(),\n\n        eventualities,\n\n        ram_scanned,\n        ram_outputs: HashSet::new(),\n\n        need_ack: VecDeque::new(),\n\n        events: events_send,\n      }))),\n    };\n    tokio::spawn(Scanner::run(db, network, scanner.clone(), multisig_completed_recv));\n\n    (\n      ScannerHandle {\n        scanner,\n        held_scanner: None,\n        events: events_recv,\n        multisig_completed: multisig_completed_send,\n      },\n      keys,\n    )\n  }\n\n  fn emit(&mut self, event: ScannerEvent<N>) -> bool {\n    if self.events.send(event).is_err() {\n      info!(\"Scanner handler was dropped. Shutting down?\");\n      return false;\n    }\n    true\n  }\n\n  // An async function, to be spawned on a task, to discover and report outputs\n  async fn run(\n    mut db: D,\n    network: N,\n    scanner_hold: ScannerHold<N, D>,\n    mut multisig_completed: mpsc::UnboundedReceiver<bool>,\n  ) {\n    loop {\n      let (ram_scanned, latest_block_to_scan) = {\n        // Sleep 5 seconds to prevent hammering the node/scanner lock\n        sleep(Duration::from_secs(5)).await;\n\n        let ram_scanned = {\n          let scanner_lock = scanner_hold.read().await;\n          let scanner = scanner_lock.as_ref().unwrap();\n\n          // If we're not scanning for keys yet, wait until we are\n          if scanner.keys.is_empty() {\n            continue;\n          }\n\n          let ram_scanned = scanner.ram_scanned.unwrap();\n          // If a Batch has taken too long to be published, start waiting until it is before\n          // continuing scanning\n          // Solves a race condition around multisig rotation, documented in the relevant doc\n          // and demonstrated with mini\n          if let Some(needing_ack) = scanner.need_ack.front() {\n            let next = ram_scanned + 1;\n            let limit = needing_ack + N::CONFIRMATIONS;\n            assert!(next <= limit);\n            if next == limit {\n              continue;\n            }\n          };\n\n          ram_scanned\n        };\n\n        (\n          ram_scanned,\n          loop {\n            break match network.get_latest_block_number().await {\n              // Only scan confirmed blocks, which we consider effectively finalized\n              // CONFIRMATIONS - 1 as whatever's in the latest block already has 1 confirm\n              Ok(latest) => latest.saturating_sub(N::CONFIRMATIONS.saturating_sub(1)),\n              Err(_) => {\n                warn!(\"couldn't get latest block number\");\n                sleep(Duration::from_secs(60)).await;\n                continue;\n              }\n            };\n          },\n        )\n      };\n\n      for block_being_scanned in (ram_scanned + 1) ..= latest_block_to_scan {\n        // Redo the checks for if we're too far ahead\n        {\n          let needing_ack = {\n            let scanner_lock = scanner_hold.read().await;\n            let scanner = scanner_lock.as_ref().unwrap();\n            scanner.need_ack.front().copied()\n          };\n\n          if let Some(needing_ack) = needing_ack {\n            let limit = needing_ack + N::CONFIRMATIONS;\n            assert!(block_being_scanned <= limit);\n            if block_being_scanned == limit {\n              break;\n            }\n          }\n        }\n\n        let Ok(block) = network.get_block(block_being_scanned).await else {\n          warn!(\"couldn't get block {block_being_scanned}\");\n          break;\n        };\n        let block_id = block.id();\n\n        info!(\"scanning block: {} ({block_being_scanned})\", hex::encode(&block_id));\n\n        // These DB calls are safe, despite not having a txn, since they're static values\n        // There's no issue if they're written in advance of expected (such as on reboot)\n        // They're also only expected here\n        if let Some(id) = ScannerDb::<N, D>::block(&db, block_being_scanned) {\n          if id != block_id {\n            panic!(\"reorg'd from finalized {} to {}\", hex::encode(id), hex::encode(block_id));\n          }\n        } else {\n          // TODO: Move this to an unwrap\n          if let Some(id) = ScannerDb::<N, D>::block(&db, block_being_scanned.saturating_sub(1)) {\n            if id != block.parent() {\n              panic!(\n                \"block {} doesn't build off expected parent {}\",\n                hex::encode(block_id),\n                hex::encode(id),\n              );\n            }\n          }\n\n          let mut txn = db.txn();\n          ScannerDb::<N, D>::save_block(&mut txn, block_being_scanned, &block_id);\n          txn.commit();\n        }\n\n        // Scan new blocks\n        // TODO: This lock acquisition may be long-lived...\n        let mut scanner_lock = scanner_hold.write().await;\n        let scanner = scanner_lock.as_mut().unwrap();\n\n        let mut has_activation = false;\n        let mut outputs = vec![];\n        let mut completion_block_numbers = vec![];\n        for (activation_number, key) in scanner.keys.clone() {\n          if activation_number > block_being_scanned {\n            continue;\n          }\n\n          if activation_number == block_being_scanned {\n            has_activation = true;\n          }\n\n          let key_vec = key.to_bytes().as_ref().to_vec();\n\n          // TODO: These lines are the ones which will cause a really long-lived lock acquisition\n          for output in network.get_outputs(&block, key).await {\n            assert_eq!(output.key(), key);\n            if output.balance().amount.0 >= N::DUST {\n              outputs.push(output);\n            }\n          }\n\n          for (id, (block_number, tx, completion)) in network\n            .get_eventuality_completions(scanner.eventualities.get_mut(&key_vec).unwrap(), &block)\n            .await\n          {\n            info!(\n              \"eventuality {} resolved by {}, as found on chain\",\n              hex::encode(id),\n              hex::encode(tx.as_ref())\n            );\n\n            completion_block_numbers.push(block_number);\n            // This must be before the mission of ScannerEvent::Block, per commentary in mod.rs\n            if !scanner.emit(ScannerEvent::Completed(\n              key_vec.clone(),\n              block_number,\n              id,\n              tx,\n              completion,\n            )) {\n              return;\n            }\n          }\n        }\n\n        // Panic if we've already seen these outputs\n        for output in &outputs {\n          let id = output.id();\n          info!(\n            \"block {} had output {} worth {:?}\",\n            hex::encode(&block_id),\n            hex::encode(&id),\n            output.balance(),\n          );\n\n          // On Bitcoin, the output ID should be unique for a given chain\n          // On Monero, it's trivial to make an output sharing an ID with another\n          // We should only scan outputs with valid IDs however, which will be unique\n\n          /*\n            The safety of this code must satisfy the following conditions:\n            1) seen is not set for the first occurrence\n            2) seen is set for any future occurrence\n\n            seen is only written to after this code completes. Accordingly, it cannot be set\n            before the first occurrence UNLESSS it's set, yet the last scanned block isn't.\n            They are both written in the same database transaction, preventing this.\n\n            As for future occurrences, the RAM entry ensures they're handled properly even if\n            the database has yet to be set.\n\n            On reboot, which will clear the RAM, if seen wasn't set, neither was latest scanned\n            block. Accordingly, this will scan from some prior block, re-populating the RAM.\n\n            If seen was set, then this will be successfully read.\n\n            There's also no concern ram_outputs was pruned, yet seen wasn't set, as pruning\n            from ram_outputs will acquire a write lock (preventing this code from acquiring\n            its own write lock and running), and during its holding of the write lock, it\n            commits the transaction setting seen and the latest scanned block.\n\n            This last case isn't true. Committing seen/latest_scanned_block happens after\n            relinquishing the write lock.\n\n            TODO2: Only update ram_outputs after committing the TXN in question.\n          */\n          let seen = ScannerDb::<N, D>::seen(&db, &id);\n          let id = id.as_ref().to_vec();\n          if seen || scanner.ram_outputs.contains(&id) {\n            panic!(\"scanned an output multiple times\");\n          }\n          scanner.ram_outputs.insert(id);\n        }\n\n        // We could remove this, if instead of doing the first block which passed\n        // requirements + CONFIRMATIONS, we simply emitted an event for every block where\n        // `number % CONFIRMATIONS == 0` (once at the final stage for the existing multisig)\n        // There's no need at this point, yet the latter may be more suitable for modeling...\n        async fn check_multisig_completed<N: Network, D: Db>(\n          db: &mut D,\n          multisig_completed: &mut mpsc::UnboundedReceiver<bool>,\n          block_number: usize,\n        ) -> bool {\n          match multisig_completed.recv().await {\n            None => {\n              info!(\"Scanner handler was dropped. Shutting down?\");\n              false\n            }\n            Some(completed) => {\n              // Set the retirement block as block_number + CONFIRMATIONS\n              if completed {\n                let mut txn = db.txn();\n                // The retiring key is the earliest one still around\n                let retiring_key = ScannerDb::<N, D>::keys(&txn)[0].1;\n                // This value is static w.r.t. the key\n                ScannerDb::<N, D>::save_retirement_block(\n                  &mut txn,\n                  &retiring_key,\n                  block_number + N::CONFIRMATIONS,\n                );\n                txn.commit();\n              }\n              true\n            }\n          }\n        }\n\n        drop(scanner_lock);\n        // Now that we've dropped the Scanner lock, we need to handle the multisig_completed\n        // channel before we decide if this block should be fired or not\n        // (holding the Scanner risks a deadlock)\n        for block_number in completion_block_numbers {\n          if !check_multisig_completed::<N, _>(&mut db, &mut multisig_completed, block_number).await\n          {\n            return;\n          };\n        }\n\n        // Reacquire the scanner\n        let mut scanner_lock = scanner_hold.write().await;\n        let scanner = scanner_lock.as_mut().unwrap();\n\n        // Only emit an event if any of the following is true:\n        // - This is an activation block\n        // - This is a retirement block\n        // - There's outputs\n        // as only those blocks are meaningful and warrant obtaining synchrony over\n        let is_retirement_block =\n          ScannerDb::<N, D>::retirement_block(&db, &scanner.keys[0].1) == Some(block_being_scanned);\n        let sent_block = if has_activation || is_retirement_block || (!outputs.is_empty()) {\n          // Save the outputs to disk\n          let mut txn = db.txn();\n          ScannerDb::<N, D>::save_outputs(&mut txn, &block_id, &outputs);\n          txn.commit();\n\n          // Send all outputs\n          if !scanner.emit(ScannerEvent::Block { is_retirement_block, block: block_id, outputs }) {\n            return;\n          }\n\n          // Since we're creating a Batch, mark it as needing ack\n          scanner.need_ack.push_back(block_being_scanned);\n          true\n        } else {\n          false\n        };\n\n        // Remove it from memory\n        if is_retirement_block {\n          let retired = scanner.keys.remove(0).1;\n          scanner.eventualities.remove(retired.to_bytes().as_ref());\n        }\n\n        // Update ram_scanned\n        scanner.ram_scanned = Some(block_being_scanned);\n\n        drop(scanner_lock);\n        // If we sent a Block event, once again check multisig_completed\n        if sent_block &&\n          (!check_multisig_completed::<N, _>(\n            &mut db,\n            &mut multisig_completed,\n            block_being_scanned,\n          )\n          .await)\n        {\n          return;\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "processor/src/multisigs/scheduler/mod.rs",
    "content": "use core::fmt::Debug;\nuse std::io;\n\nuse ciphersuite::Ciphersuite;\n\nuse serai_client::primitives::{ExternalBalance, ExternalNetworkId};\n\nuse crate::{networks::Network, Db, Payment, Plan};\n\npub(crate) mod utxo;\npub(crate) mod smart_contract;\n\npub trait SchedulerAddendum: Send + Clone + PartialEq + Debug {\n  fn read<R: io::Read>(reader: &mut R) -> io::Result<Self>;\n  fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()>;\n}\n\nimpl SchedulerAddendum for () {\n  fn read<R: io::Read>(_: &mut R) -> io::Result<Self> {\n    Ok(())\n  }\n  fn write<W: io::Write>(&self, _: &mut W) -> io::Result<()> {\n    Ok(())\n  }\n}\n\npub trait Scheduler<N: Network>: Sized + Clone + PartialEq + Debug {\n  type Addendum: SchedulerAddendum;\n\n  /// Check if this Scheduler is empty.\n  fn empty(&self) -> bool;\n\n  /// Create a new Scheduler.\n  fn new<D: Db>(\n    txn: &mut D::Transaction<'_>,\n    key: <N::Curve as Ciphersuite>::G,\n    network: ExternalNetworkId,\n  ) -> Self;\n\n  /// Load a Scheduler from the DB.\n  fn from_db<D: Db>(\n    db: &D,\n    key: <N::Curve as Ciphersuite>::G,\n    network: ExternalNetworkId,\n  ) -> io::Result<Self>;\n\n  /// Check if a branch is usable.\n  fn can_use_branch(&self, balance: ExternalBalance) -> bool;\n\n  /// Schedule a series of outputs/payments.\n  fn schedule<D: Db>(\n    &mut self,\n    txn: &mut D::Transaction<'_>,\n    utxos: Vec<N::Output>,\n    payments: Vec<Payment<N>>,\n    // TODO: Tighten this to multisig_for_any_change\n    key_for_any_change: <N::Curve as Ciphersuite>::G,\n    force_spend: bool,\n  ) -> Vec<Plan<N>>;\n\n  /// Consume all payments still pending within this Scheduler, without scheduling them.\n  fn consume_payments<D: Db>(&mut self, txn: &mut D::Transaction<'_>) -> Vec<Payment<N>>;\n\n  /// Note a branch output as having been created, with the amount it was actually created with,\n  /// or not having been created due to being too small.\n  fn created_output<D: Db>(\n    &mut self,\n    txn: &mut D::Transaction<'_>,\n    expected: u64,\n    actual: Option<u64>,\n  );\n\n  /// Refund a specific output.\n  fn refund_plan<D: Db>(\n    &mut self,\n    txn: &mut D::Transaction<'_>,\n    output: N::Output,\n    refund_to: N::Address,\n  ) -> Plan<N>;\n\n  /// Shim the forwarding Plan as necessary to obtain a fee estimate.\n  ///\n  /// If this Scheduler is for a Network which requires forwarding, this must return Some with a\n  /// plan with identical fee behavior. If forwarding isn't necessary, returns None.\n  fn shim_forward_plan(output: N::Output, to: <N::Curve as Ciphersuite>::G) -> Option<Plan<N>>;\n\n  /// Forward a specific output to the new multisig.\n  ///\n  /// Returns None if no forwarding is necessary. Must return Some if forwarding is necessary.\n  fn forward_plan<D: Db>(\n    &mut self,\n    txn: &mut D::Transaction<'_>,\n    output: N::Output,\n    to: <N::Curve as Ciphersuite>::G,\n  ) -> Option<Plan<N>>;\n}\n"
  },
  {
    "path": "processor/src/multisigs/scheduler/smart_contract.rs",
    "content": "use std::{io, collections::HashSet};\n\nuse ciphersuite::{group::GroupEncoding, Ciphersuite};\n\nuse serai_client::primitives::{ExternalBalance, ExternalCoin, ExternalNetworkId};\n\nuse crate::{\n  Get, DbTxn, Db, Payment, Plan, create_db,\n  networks::{Output, Network},\n  multisigs::scheduler::{SchedulerAddendum, Scheduler as SchedulerTrait},\n};\n\n#[derive(Clone, PartialEq, Eq, Debug)]\npub struct Scheduler<N: Network> {\n  key: <N::Curve as Ciphersuite>::G,\n  coins: HashSet<ExternalCoin>,\n  rotated: bool,\n}\n\n#[derive(Clone, Copy, PartialEq, Eq, Debug)]\npub enum Addendum<N: Network> {\n  Nonce(u64),\n  RotateTo { nonce: u64, new_key: <N::Curve as Ciphersuite>::G },\n}\n\nimpl<N: Network> SchedulerAddendum for Addendum<N> {\n  fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {\n    let mut kind = [0xff];\n    reader.read_exact(&mut kind)?;\n    match kind[0] {\n      0 => {\n        let mut nonce = [0; 8];\n        reader.read_exact(&mut nonce)?;\n        Ok(Addendum::Nonce(u64::from_le_bytes(nonce)))\n      }\n      1 => {\n        let mut nonce = [0; 8];\n        reader.read_exact(&mut nonce)?;\n        let nonce = u64::from_le_bytes(nonce);\n\n        let new_key = N::Curve::read_G(reader)?;\n        Ok(Addendum::RotateTo { nonce, new_key })\n      }\n      _ => Err(io::Error::other(\"reading unknown Addendum type\"))?,\n    }\n  }\n  fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {\n    match self {\n      Addendum::Nonce(nonce) => {\n        writer.write_all(&[0])?;\n        writer.write_all(&nonce.to_le_bytes())\n      }\n      Addendum::RotateTo { nonce, new_key } => {\n        writer.write_all(&[1])?;\n        writer.write_all(&nonce.to_le_bytes())?;\n        writer.write_all(new_key.to_bytes().as_ref())\n      }\n    }\n  }\n}\n\ncreate_db! {\n  SchedulerDb {\n    LastNonce: () -> u64,\n    RotatedTo: (key: &[u8]) -> Vec<u8>,\n  }\n}\n\nimpl<N: Network<Scheduler = Self>> SchedulerTrait<N> for Scheduler<N> {\n  type Addendum = Addendum<N>;\n\n  /// Check if this Scheduler is empty.\n  fn empty(&self) -> bool {\n    self.rotated\n  }\n\n  /// Create a new Scheduler.\n  fn new<D: Db>(\n    _txn: &mut D::Transaction<'_>,\n    key: <N::Curve as Ciphersuite>::G,\n    network: ExternalNetworkId,\n  ) -> Self {\n    assert!(N::branch_address(key).is_none());\n    assert!(N::change_address(key).is_none());\n    assert!(N::forward_address(key).is_none());\n\n    Scheduler { key, coins: network.coins().iter().copied().collect(), rotated: false }\n  }\n\n  /// Load a Scheduler from the DB.\n  fn from_db<D: Db>(\n    db: &D,\n    key: <N::Curve as Ciphersuite>::G,\n    network: ExternalNetworkId,\n  ) -> io::Result<Self> {\n    Ok(Scheduler {\n      key,\n      coins: network.coins().iter().copied().collect(),\n      rotated: RotatedTo::get(db, key.to_bytes().as_ref()).is_some(),\n    })\n  }\n\n  fn can_use_branch(&self, _balance: ExternalBalance) -> bool {\n    false\n  }\n\n  fn schedule<D: Db>(\n    &mut self,\n    txn: &mut D::Transaction<'_>,\n    utxos: Vec<N::Output>,\n    payments: Vec<Payment<N>>,\n    key_for_any_change: <N::Curve as Ciphersuite>::G,\n    force_spend: bool,\n  ) -> Vec<Plan<N>> {\n    for utxo in utxos {\n      assert!(self.coins.contains(&utxo.balance().coin));\n    }\n\n    let mut nonce = LastNonce::get(txn).unwrap_or(1);\n    let mut plans = vec![];\n    for chunk in payments.as_slice().chunks(N::MAX_OUTPUTS) {\n      // Once we rotate, all further payments should be scheduled via the new multisig\n      assert!(!self.rotated);\n      plans.push(Plan {\n        key: self.key,\n        inputs: vec![],\n        payments: chunk.to_vec(),\n        change: None,\n        scheduler_addendum: Addendum::Nonce(nonce),\n      });\n      nonce += 1;\n    }\n\n    // If we're supposed to rotate to the new key, create an empty Plan which will signify the key\n    // update\n    if force_spend && (!self.rotated) {\n      plans.push(Plan {\n        key: self.key,\n        inputs: vec![],\n        payments: vec![],\n        change: None,\n        scheduler_addendum: Addendum::RotateTo { nonce, new_key: key_for_any_change },\n      });\n      nonce += 1;\n      self.rotated = true;\n      RotatedTo::set(\n        txn,\n        self.key.to_bytes().as_ref(),\n        &key_for_any_change.to_bytes().as_ref().to_vec(),\n      );\n    }\n\n    LastNonce::set(txn, &nonce);\n\n    plans\n  }\n\n  fn consume_payments<D: Db>(&mut self, _txn: &mut D::Transaction<'_>) -> Vec<Payment<N>> {\n    vec![]\n  }\n\n  fn created_output<D: Db>(\n    &mut self,\n    _txn: &mut D::Transaction<'_>,\n    _expected: u64,\n    _actual: Option<u64>,\n  ) {\n    panic!(\"Smart Contract Scheduler created a Branch output\")\n  }\n\n  /// Refund a specific output.\n  fn refund_plan<D: Db>(\n    &mut self,\n    txn: &mut D::Transaction<'_>,\n    output: N::Output,\n    refund_to: N::Address,\n  ) -> Plan<N> {\n    let current_key = RotatedTo::get(txn, self.key.to_bytes().as_ref())\n      .and_then(|key_bytes| <N::Curve as Ciphersuite>::read_G(&mut key_bytes.as_slice()).ok())\n      .unwrap_or(self.key);\n\n    let nonce = LastNonce::get(txn).map_or(1, |nonce| nonce + 1);\n    LastNonce::set(txn, &(nonce + 1));\n    Plan {\n      key: current_key,\n      inputs: vec![],\n      payments: vec![Payment { address: refund_to, data: None, balance: output.balance() }],\n      change: None,\n      scheduler_addendum: Addendum::Nonce(nonce),\n    }\n  }\n\n  fn shim_forward_plan(_output: N::Output, _to: <N::Curve as Ciphersuite>::G) -> Option<Plan<N>> {\n    None\n  }\n\n  /// Forward a specific output to the new multisig.\n  ///\n  /// Returns None if no forwarding is necessary.\n  fn forward_plan<D: Db>(\n    &mut self,\n    _txn: &mut D::Transaction<'_>,\n    _output: N::Output,\n    _to: <N::Curve as Ciphersuite>::G,\n  ) -> Option<Plan<N>> {\n    None\n  }\n}\n"
  },
  {
    "path": "processor/src/multisigs/scheduler/utxo.rs",
    "content": "use std::{\n  io::{self, Read},\n  collections::{VecDeque, HashMap},\n};\n\nuse ciphersuite::{group::GroupEncoding, Ciphersuite};\n\nuse serai_client::primitives::{ExternalNetworkId, ExternalCoin, Amount, ExternalBalance};\n\nuse crate::{\n  DbTxn, Db, Payment, Plan,\n  networks::{OutputType, Output, Network, UtxoNetwork},\n  multisigs::scheduler::Scheduler as SchedulerTrait,\n};\n\n/// Deterministic output/payment manager.\n#[derive(Clone, PartialEq, Eq, Debug)]\npub struct Scheduler<N: UtxoNetwork> {\n  key: <N::Curve as Ciphersuite>::G,\n  coin: ExternalCoin,\n\n  // Serai, when it has more outputs expected than it can handle in a single transaction, will\n  // schedule the outputs to be handled later. Immediately, it just creates additional outputs\n  // which will eventually handle those outputs\n  //\n  // These maps map output amounts, which we'll receive in the future, to the payments they should\n  // be used on\n  //\n  // When those output amounts appear, their payments should be scheduled\n  // The Vec<Payment> is for all payments that should be done per output instance\n  // The VecDeque allows multiple sets of payments with the same sum amount to properly co-exist\n  //\n  // queued_plans are for outputs which we will create, yet when created, will have their amount\n  // reduced by the fee it cost to be created. The Scheduler will then be told how what amount the\n  // output actually has, and it'll be moved into plans\n  queued_plans: HashMap<u64, VecDeque<Vec<Payment<N>>>>,\n  plans: HashMap<u64, VecDeque<Vec<Payment<N>>>>,\n\n  // UTXOs available\n  utxos: Vec<N::Output>,\n\n  // Payments awaiting scheduling due to the output availability problem\n  payments: VecDeque<Payment<N>>,\n}\n\nfn scheduler_key<D: Db, G: GroupEncoding>(key: &G) -> Vec<u8> {\n  D::key(b\"SCHEDULER\", b\"scheduler\", key.to_bytes())\n}\n\nimpl<N: UtxoNetwork<Scheduler = Self>> Scheduler<N> {\n  pub fn empty(&self) -> bool {\n    self.queued_plans.is_empty() &&\n      self.plans.is_empty() &&\n      self.utxos.is_empty() &&\n      self.payments.is_empty()\n  }\n\n  fn read<R: Read>(\n    key: <N::Curve as Ciphersuite>::G,\n    coin: ExternalCoin,\n    reader: &mut R,\n  ) -> io::Result<Self> {\n    let mut read_plans = || -> io::Result<_> {\n      let mut all_plans = HashMap::new();\n      let mut all_plans_len = [0; 4];\n      reader.read_exact(&mut all_plans_len)?;\n      for _ in 0 .. u32::from_le_bytes(all_plans_len) {\n        let mut amount = [0; 8];\n        reader.read_exact(&mut amount)?;\n        let amount = u64::from_le_bytes(amount);\n\n        let mut plans = VecDeque::new();\n        let mut plans_len = [0; 4];\n        reader.read_exact(&mut plans_len)?;\n        for _ in 0 .. u32::from_le_bytes(plans_len) {\n          let mut payments = vec![];\n          let mut payments_len = [0; 4];\n          reader.read_exact(&mut payments_len)?;\n\n          for _ in 0 .. u32::from_le_bytes(payments_len) {\n            payments.push(Payment::read(reader)?);\n          }\n          plans.push_back(payments);\n        }\n        all_plans.insert(amount, plans);\n      }\n      Ok(all_plans)\n    };\n    let queued_plans = read_plans()?;\n    let plans = read_plans()?;\n\n    let mut utxos = vec![];\n    let mut utxos_len = [0; 4];\n    reader.read_exact(&mut utxos_len)?;\n    for _ in 0 .. u32::from_le_bytes(utxos_len) {\n      utxos.push(N::Output::read(reader)?);\n    }\n\n    let mut payments = VecDeque::new();\n    let mut payments_len = [0; 4];\n    reader.read_exact(&mut payments_len)?;\n    for _ in 0 .. u32::from_le_bytes(payments_len) {\n      payments.push_back(Payment::read(reader)?);\n    }\n\n    Ok(Scheduler { key, coin, queued_plans, plans, utxos, payments })\n  }\n\n  // TODO2: Get rid of this\n  // We reserialize the entire scheduler on any mutation to save it to the DB which is horrible\n  // We should have an incremental solution\n  fn serialize(&self) -> Vec<u8> {\n    let mut res = Vec::with_capacity(4096);\n\n    let mut write_plans = |plans: &HashMap<u64, VecDeque<Vec<Payment<N>>>>| {\n      res.extend(u32::try_from(plans.len()).unwrap().to_le_bytes());\n      for (amount, list_of_plans) in plans {\n        res.extend(amount.to_le_bytes());\n        res.extend(u32::try_from(list_of_plans.len()).unwrap().to_le_bytes());\n        for plan in list_of_plans {\n          res.extend(u32::try_from(plan.len()).unwrap().to_le_bytes());\n          for payment in plan {\n            payment.write(&mut res).unwrap();\n          }\n        }\n      }\n    };\n    write_plans(&self.queued_plans);\n    write_plans(&self.plans);\n\n    res.extend(u32::try_from(self.utxos.len()).unwrap().to_le_bytes());\n    for utxo in &self.utxos {\n      utxo.write(&mut res).unwrap();\n    }\n\n    res.extend(u32::try_from(self.payments.len()).unwrap().to_le_bytes());\n    for payment in &self.payments {\n      payment.write(&mut res).unwrap();\n    }\n\n    debug_assert_eq!(&Self::read(self.key, self.coin, &mut res.as_slice()).unwrap(), self);\n    res\n  }\n\n  pub fn new<D: Db>(\n    txn: &mut D::Transaction<'_>,\n    key: <N::Curve as Ciphersuite>::G,\n    network: ExternalNetworkId,\n  ) -> Self {\n    assert!(N::branch_address(key).is_some());\n    assert!(N::change_address(key).is_some());\n    assert!(N::forward_address(key).is_some());\n\n    let coin = {\n      let coins = network.coins();\n      assert_eq!(coins.len(), 1);\n      coins[0]\n    };\n\n    let res = Scheduler {\n      key,\n      coin,\n      queued_plans: HashMap::new(),\n      plans: HashMap::new(),\n      utxos: vec![],\n      payments: VecDeque::new(),\n    };\n    // Save it to disk so from_db won't panic if we don't mutate it before rebooting\n    txn.put(scheduler_key::<D, _>(&res.key), res.serialize());\n    res\n  }\n\n  pub fn from_db<D: Db>(\n    db: &D,\n    key: <N::Curve as Ciphersuite>::G,\n    network: ExternalNetworkId,\n  ) -> io::Result<Self> {\n    let coin = {\n      let coins = network.coins();\n      assert_eq!(coins.len(), 1);\n      coins[0]\n    };\n\n    let scheduler = db.get(scheduler_key::<D, _>(&key)).unwrap_or_else(|| {\n      panic!(\"loading scheduler from DB without scheduler for {}\", hex::encode(key.to_bytes()))\n    });\n    let mut reader_slice = scheduler.as_slice();\n    let reader = &mut reader_slice;\n\n    Self::read(key, coin, reader)\n  }\n\n  pub fn can_use_branch(&self, balance: ExternalBalance) -> bool {\n    assert_eq!(balance.coin, self.coin);\n    self.plans.contains_key(&balance.amount.0)\n  }\n\n  fn execute(\n    &mut self,\n    inputs: Vec<N::Output>,\n    mut payments: Vec<Payment<N>>,\n    key_for_any_change: <N::Curve as Ciphersuite>::G,\n  ) -> Plan<N> {\n    let mut change = false;\n    let mut max = N::MAX_OUTPUTS;\n\n    let payment_amounts = |payments: &Vec<Payment<N>>| {\n      payments.iter().map(|payment| payment.balance.amount.0).sum::<u64>()\n    };\n\n    // Requires a change output\n    if inputs.iter().map(|output| output.balance().amount.0).sum::<u64>() !=\n      payment_amounts(&payments)\n    {\n      change = true;\n      max -= 1;\n    }\n\n    let mut add_plan = |payments| {\n      let amount = payment_amounts(&payments);\n      self.queued_plans.entry(amount).or_insert(VecDeque::new()).push_back(payments);\n      amount\n    };\n\n    let branch_address = N::branch_address(self.key).unwrap();\n\n    // If we have more payments than we can handle in a single TX, create plans for them\n    // TODO2: This isn't perfect. For 258 outputs, and a MAX_OUTPUTS of 16, this will create:\n    // 15 branches of 16 leaves\n    // 1 branch of:\n    // - 1 branch of 16 leaves\n    // - 2 leaves\n    // If this was perfect, the heaviest branch would have 1 branch of 3 leaves and 15 leaves\n    while payments.len() > max {\n      // The resulting TX will have the remaining payments and a new branch payment\n      let to_remove = (payments.len() + 1) - N::MAX_OUTPUTS;\n      // Don't remove more than possible\n      let to_remove = to_remove.min(N::MAX_OUTPUTS);\n\n      // Create the plan\n      let removed = payments.drain((payments.len() - to_remove) ..).collect::<Vec<_>>();\n      assert_eq!(removed.len(), to_remove);\n      let amount = add_plan(removed);\n\n      // Create the payment for the plan\n      // Push it to the front so it's not moved into a branch until all lower-depth items are\n      payments.insert(\n        0,\n        Payment {\n          address: branch_address.clone(),\n          data: None,\n          balance: ExternalBalance { coin: self.coin, amount: Amount(amount) },\n        },\n      );\n    }\n\n    Plan {\n      key: self.key,\n      inputs,\n      payments,\n      change: Some(N::change_address(key_for_any_change).unwrap()).filter(|_| change),\n      scheduler_addendum: (),\n    }\n  }\n\n  fn add_outputs(\n    &mut self,\n    mut utxos: Vec<N::Output>,\n    key_for_any_change: <N::Curve as Ciphersuite>::G,\n  ) -> Vec<Plan<N>> {\n    log::info!(\"adding {} outputs\", utxos.len());\n\n    let mut txs = vec![];\n\n    for utxo in utxos.drain(..) {\n      if utxo.kind() == OutputType::Branch {\n        let amount = utxo.balance().amount.0;\n        if let Some(plans) = self.plans.get_mut(&amount) {\n          // Execute the first set of payments possible with an output of this amount\n          let payments = plans.pop_front().unwrap();\n          // They won't be equal if we dropped payments due to being dust\n          assert!(amount >= payments.iter().map(|payment| payment.balance.amount.0).sum::<u64>());\n\n          // If we've grabbed the last plan for this output amount, remove it from the map\n          if plans.is_empty() {\n            self.plans.remove(&amount);\n          }\n\n          // Create a TX for these payments\n          txs.push(self.execute(vec![utxo], payments, key_for_any_change));\n          continue;\n        }\n      }\n\n      self.utxos.push(utxo);\n    }\n\n    log::info!(\"{} planned TXs have had their required inputs confirmed\", txs.len());\n    txs\n  }\n\n  // Schedule a series of outputs/payments.\n  pub fn schedule<D: Db>(\n    &mut self,\n    txn: &mut D::Transaction<'_>,\n    utxos: Vec<N::Output>,\n    mut payments: Vec<Payment<N>>,\n    key_for_any_change: <N::Curve as Ciphersuite>::G,\n    force_spend: bool,\n  ) -> Vec<Plan<N>> {\n    for utxo in &utxos {\n      assert_eq!(utxo.balance().coin, self.coin);\n    }\n    for payment in &payments {\n      assert_eq!(payment.balance.coin, self.coin);\n    }\n\n    // Drop payments to our own branch address\n    /*\n      created_output will be called any time we send to a branch address. If it's called, and it\n      wasn't expecting to be called, that's almost certainly an error. The only way to guarantee\n      this however is to only have us send to a branch address when creating a branch, hence the\n      dropping of pointless payments.\n\n      This is not comprehensive as a payment may still be made to another active multisig's branch\n      address, depending on timing. This is safe as the issue only occurs when a multisig sends to\n      its *own* branch address, since created_output is called on the signer's Scheduler.\n    */\n    {\n      let branch_address = N::branch_address(self.key).unwrap();\n      payments =\n        payments.drain(..).filter(|payment| payment.address != branch_address).collect::<Vec<_>>();\n    }\n\n    let mut plans = self.add_outputs(utxos, key_for_any_change);\n\n    log::info!(\"scheduling {} new payments\", payments.len());\n\n    // Add all new payments to the list of pending payments\n    self.payments.extend(payments);\n    let payments_at_start = self.payments.len();\n    log::info!(\"{} payments are now scheduled\", payments_at_start);\n\n    // If we don't have UTXOs available, don't try to continue\n    if self.utxos.is_empty() {\n      log::info!(\"no utxos currently available\");\n      return plans;\n    }\n\n    // Sort UTXOs so the highest valued ones are first\n    self.utxos.sort_by(|a, b| a.balance().amount.0.cmp(&b.balance().amount.0).reverse());\n\n    // We always want to aggregate our UTXOs into a single UTXO in the name of simplicity\n    // We may have more UTXOs than will fit into a TX though\n    // We use the most valuable UTXOs to handle our current payments, and we return aggregation TXs\n    // for the rest of the inputs\n    // Since we do multiple aggregation TXs at once, this will execute in logarithmic time\n    let utxos = self.utxos.drain(..).collect::<Vec<_>>();\n    let mut utxo_chunks =\n      utxos.chunks(N::MAX_INPUTS).map(<[<N as Network>::Output]>::to_vec).collect::<Vec<_>>();\n\n    // Use the first chunk for any scheduled payments, since it has the most value\n    let utxos = utxo_chunks.remove(0);\n\n    // If the last chunk exists and only has one output, don't try aggregating it\n    // Set it to be restored to UTXO set\n    let mut to_restore = None;\n    if let Some(mut chunk) = utxo_chunks.pop() {\n      if chunk.len() == 1 {\n        to_restore = Some(chunk.pop().unwrap());\n      } else {\n        utxo_chunks.push(chunk);\n      }\n    }\n\n    for chunk in utxo_chunks.drain(..) {\n      log::debug!(\"aggregating a chunk of {} inputs\", chunk.len());\n      plans.push(Plan {\n        key: self.key,\n        inputs: chunk,\n        payments: vec![],\n        change: Some(N::change_address(key_for_any_change).unwrap()),\n        scheduler_addendum: (),\n      })\n    }\n\n    // We want to use all possible UTXOs for all possible payments\n    let mut balance = utxos.iter().map(|output| output.balance().amount.0).sum::<u64>();\n\n    // If we can't fulfill the next payment, we have encountered an instance of the UTXO\n    // availability problem\n    // This shows up in networks like Monero, where because we spent outputs, our change has yet to\n    // re-appear. Since it has yet to re-appear, we only operate with a balance which is a subset\n    // of our total balance\n    // Despite this, we may be ordered to fulfill a payment which is our total balance\n    // The solution is to wait for the temporarily unavailable change outputs to re-appear,\n    // granting us access to our full balance\n    let mut executing = vec![];\n    while !self.payments.is_empty() {\n      let amount = self.payments[0].balance.amount.0;\n      if balance.checked_sub(amount).is_some() {\n        balance -= amount;\n        executing.push(self.payments.pop_front().unwrap());\n      } else {\n        // Doesn't check if other payments would fit into the current batch as doing so may never\n        // let enough inputs become simultaneously availabile to enable handling of payments[0]\n        break;\n      }\n    }\n\n    // Now that we have the list of payments we can successfully handle right now, create the TX\n    // for them\n    if !executing.is_empty() {\n      plans.push(self.execute(utxos, executing, key_for_any_change));\n    } else {\n      // If we don't have any payments to execute, save these UTXOs for later\n      self.utxos.extend(utxos);\n    }\n\n    // If we're instructed to force a spend, do so\n    // This is used when an old multisig is retiring and we want to always transfer outputs to the\n    // new one, regardless if we currently have payments\n    if force_spend && (!self.utxos.is_empty()) {\n      assert!(self.utxos.len() <= N::MAX_INPUTS);\n      plans.push(Plan {\n        key: self.key,\n        inputs: self.utxos.drain(..).collect::<Vec<_>>(),\n        payments: vec![],\n        change: Some(N::change_address(key_for_any_change).unwrap()),\n        scheduler_addendum: (),\n      });\n    }\n\n    // If there's a UTXO to restore, restore it\n    // This is done now as if there is a to_restore output, and it was inserted into self.utxos\n    // earlier, self.utxos.len() may become `N::MAX_INPUTS + 1`\n    // The prior block requires the len to be `<= N::MAX_INPUTS`\n    if let Some(to_restore) = to_restore {\n      self.utxos.push(to_restore);\n    }\n\n    txn.put(scheduler_key::<D, _>(&self.key), self.serialize());\n\n    log::info!(\n      \"created {} plans containing {} payments to sign, with {} payments pending scheduling\",\n      plans.len(),\n      payments_at_start - self.payments.len(),\n      self.payments.len(),\n    );\n    plans\n  }\n\n  pub fn consume_payments<D: Db>(&mut self, txn: &mut D::Transaction<'_>) -> Vec<Payment<N>> {\n    let res: Vec<_> = self.payments.drain(..).collect();\n    if !res.is_empty() {\n      txn.put(scheduler_key::<D, _>(&self.key), self.serialize());\n    }\n    res\n  }\n\n  // Note a branch output as having been created, with the amount it was actually created with,\n  // or not having been created due to being too small\n  pub fn created_output<D: Db>(\n    &mut self,\n    txn: &mut D::Transaction<'_>,\n    expected: u64,\n    actual: Option<u64>,\n  ) {\n    log::debug!(\"output expected to have {} had {:?} after fees\", expected, actual);\n\n    // Get the payments this output is expected to handle\n    let queued = self.queued_plans.get_mut(&expected).unwrap();\n    let mut payments = queued.pop_front().unwrap();\n    assert_eq!(expected, payments.iter().map(|payment| payment.balance.amount.0).sum::<u64>());\n    // If this was the last set of payments at this amount, remove it\n    if queued.is_empty() {\n      self.queued_plans.remove(&expected);\n    }\n\n    // If we didn't actually create this output, return, dropping the child payments\n    let Some(actual) = actual else { return };\n\n    // Amortize the fee amongst all payments underneath this branch\n    {\n      let mut to_amortize = actual - expected;\n      // If the payments are worth less than this fee we need to amortize, return, dropping them\n      if payments.iter().map(|payment| payment.balance.amount.0).sum::<u64>() < to_amortize {\n        return;\n      }\n      while to_amortize != 0 {\n        let payments_len = u64::try_from(payments.len()).unwrap();\n        let per_payment = to_amortize / payments_len;\n        let mut overage = to_amortize % payments_len;\n\n        for payment in &mut payments {\n          let to_subtract = per_payment + overage;\n          // Only subtract the overage once\n          overage = 0;\n\n          let subtractable = payment.balance.amount.0.min(to_subtract);\n          to_amortize -= subtractable;\n          payment.balance.amount.0 -= subtractable;\n        }\n      }\n    }\n\n    // Drop payments now below the dust threshold\n    let payments = payments\n      .into_iter()\n      .filter(|payment| payment.balance.amount.0 >= N::DUST)\n      .collect::<Vec<_>>();\n    // Sanity check this was done properly\n    assert!(actual >= payments.iter().map(|payment| payment.balance.amount.0).sum::<u64>());\n\n    // If there's no payments left, return\n    if payments.is_empty() {\n      return;\n    }\n\n    self.plans.entry(actual).or_insert(VecDeque::new()).push_back(payments);\n\n    // TODO2: This shows how ridiculous the serialize function is\n    txn.put(scheduler_key::<D, _>(&self.key), self.serialize());\n  }\n}\n\nimpl<N: UtxoNetwork<Scheduler = Self>> SchedulerTrait<N> for Scheduler<N> {\n  type Addendum = ();\n\n  /// Check if this Scheduler is empty.\n  fn empty(&self) -> bool {\n    Scheduler::empty(self)\n  }\n\n  /// Create a new Scheduler.\n  fn new<D: Db>(\n    txn: &mut D::Transaction<'_>,\n    key: <N::Curve as Ciphersuite>::G,\n    network: ExternalNetworkId,\n  ) -> Self {\n    Scheduler::new::<D>(txn, key, network)\n  }\n\n  /// Load a Scheduler from the DB.\n  fn from_db<D: Db>(\n    db: &D,\n    key: <N::Curve as Ciphersuite>::G,\n    network: ExternalNetworkId,\n  ) -> io::Result<Self> {\n    Scheduler::from_db::<D>(db, key, network)\n  }\n\n  /// Check if a branch is usable.\n  fn can_use_branch(&self, balance: ExternalBalance) -> bool {\n    Scheduler::can_use_branch(self, balance)\n  }\n\n  /// Schedule a series of outputs/payments.\n  fn schedule<D: Db>(\n    &mut self,\n    txn: &mut D::Transaction<'_>,\n    utxos: Vec<N::Output>,\n    payments: Vec<Payment<N>>,\n    key_for_any_change: <N::Curve as Ciphersuite>::G,\n    force_spend: bool,\n  ) -> Vec<Plan<N>> {\n    Scheduler::schedule::<D>(self, txn, utxos, payments, key_for_any_change, force_spend)\n  }\n\n  /// Consume all payments still pending within this Scheduler, without scheduling them.\n  fn consume_payments<D: Db>(&mut self, txn: &mut D::Transaction<'_>) -> Vec<Payment<N>> {\n    Scheduler::consume_payments::<D>(self, txn)\n  }\n\n  /// Note a branch output as having been created, with the amount it was actually created with,\n  /// or not having been created due to being too small.\n  // TODO: Move this to ExternalBalance.\n  fn created_output<D: Db>(\n    &mut self,\n    txn: &mut D::Transaction<'_>,\n    expected: u64,\n    actual: Option<u64>,\n  ) {\n    Scheduler::created_output::<D>(self, txn, expected, actual)\n  }\n\n  fn refund_plan<D: Db>(\n    &mut self,\n    _: &mut D::Transaction<'_>,\n    output: N::Output,\n    refund_to: N::Address,\n  ) -> Plan<N> {\n    let output_id = output.id().as_ref().to_vec();\n    let res = Plan {\n      key: output.key(),\n      // Uses a payment as this will still be successfully sent due to fee amortization,\n      // and because change is currently always a Serai key\n      payments: vec![Payment { address: refund_to, data: None, balance: output.balance() }],\n      inputs: vec![output],\n      change: None,\n      scheduler_addendum: (),\n    };\n    log::info!(\"refund plan for {} has ID {}\", hex::encode(output_id), hex::encode(res.id()));\n    res\n  }\n\n  fn shim_forward_plan(output: N::Output, to: <N::Curve as Ciphersuite>::G) -> Option<Plan<N>> {\n    Some(Plan {\n      key: output.key(),\n      payments: vec![Payment {\n        address: N::forward_address(to).unwrap(),\n        data: None,\n        balance: output.balance(),\n      }],\n      inputs: vec![output],\n      change: None,\n      scheduler_addendum: (),\n    })\n  }\n\n  fn forward_plan<D: Db>(\n    &mut self,\n    _: &mut D::Transaction<'_>,\n    output: N::Output,\n    to: <N::Curve as Ciphersuite>::G,\n  ) -> Option<Plan<N>> {\n    assert_eq!(self.key, output.key());\n    // Call shim as shim returns the actual\n    Self::shim_forward_plan(output, to)\n  }\n}\n"
  },
  {
    "path": "processor/src/networks/bitcoin.rs",
    "content": "use std::{sync::OnceLock, time::Duration, io, collections::HashMap};\n\nuse async_trait::async_trait;\n\nuse scale::{Encode, Decode};\n\nuse ciphersuite::group::ff::PrimeField;\nuse k256::{ProjectivePoint, Scalar};\nuse frost::{\n  curve::{Curve, Secp256k1},\n  ThresholdKeys,\n};\n\nuse tokio::time::sleep;\n\nuse bitcoin_serai::{\n  bitcoin::{\n    hashes::Hash as HashTrait,\n    key::{Parity, XOnlyPublicKey},\n    consensus::{Encodable, Decodable},\n    script::Instruction,\n    Transaction, Block, ScriptBuf,\n    opcodes::all::{OP_SHA256, OP_EQUALVERIFY},\n  },\n  wallet::{\n    tweak_keys, p2tr_script_buf, ReceivedOutput, Scanner, TransactionError,\n    SignableTransaction as BSignableTransaction, TransactionMachine,\n  },\n  rpc::{RpcError, Rpc},\n};\n\n#[cfg(test)]\nuse bitcoin_serai::bitcoin::{\n  secp256k1::{SECP256K1, SecretKey, Message},\n  PrivateKey, PublicKey,\n  sighash::{EcdsaSighashType, SighashCache},\n  script::PushBytesBuf,\n  absolute::LockTime,\n  Amount as BAmount, Sequence, Script, Witness, OutPoint,\n  transaction::Version,\n  blockdata::transaction::{TxIn, TxOut},\n};\n\nuse serai_client::{\n  primitives::{MAX_DATA_LEN, ExternalCoin, ExternalNetworkId, Amount, ExternalBalance},\n  networks::bitcoin::Address,\n};\n\nuse crate::{\n  networks::{\n    NetworkError, Block as BlockTrait, OutputType, Output as OutputTrait,\n    Transaction as TransactionTrait, SignableTransaction as SignableTransactionTrait,\n    Eventuality as EventualityTrait, EventualitiesTracker, Network, UtxoNetwork,\n  },\n  Payment,\n  multisigs::scheduler::utxo::Scheduler,\n};\n\n#[derive(Clone, PartialEq, Eq, Debug)]\npub struct OutputId(pub [u8; 36]);\nimpl Default for OutputId {\n  fn default() -> Self {\n    Self([0; 36])\n  }\n}\nimpl AsRef<[u8]> for OutputId {\n  fn as_ref(&self) -> &[u8] {\n    self.0.as_ref()\n  }\n}\nimpl AsMut<[u8]> for OutputId {\n  fn as_mut(&mut self) -> &mut [u8] {\n    self.0.as_mut()\n  }\n}\n\n#[derive(Clone, PartialEq, Eq, Debug)]\npub struct Output {\n  kind: OutputType,\n  presumed_origin: Option<Address>,\n  output: ReceivedOutput,\n  data: Vec<u8>,\n}\n\nimpl OutputTrait<Bitcoin> for Output {\n  type Id = OutputId;\n\n  fn kind(&self) -> OutputType {\n    self.kind\n  }\n\n  fn id(&self) -> Self::Id {\n    let mut res = OutputId::default();\n    self.output.outpoint().consensus_encode(&mut res.as_mut()).unwrap();\n    debug_assert_eq!(\n      {\n        let mut outpoint = vec![];\n        self.output.outpoint().consensus_encode(&mut outpoint).unwrap();\n        outpoint\n      },\n      res.as_ref().to_vec()\n    );\n    res\n  }\n\n  fn tx_id(&self) -> [u8; 32] {\n    let mut hash = *self.output.outpoint().txid.as_raw_hash().as_byte_array();\n    hash.reverse();\n    hash\n  }\n\n  fn key(&self) -> ProjectivePoint {\n    let script = &self.output.output().script_pubkey;\n    assert!(script.is_p2tr());\n    let Instruction::PushBytes(key) = script.instructions_minimal().last().unwrap().unwrap() else {\n      panic!(\"last item in v1 Taproot script wasn't bytes\")\n    };\n    let key = XOnlyPublicKey::from_slice(key.as_ref())\n      .expect(\"last item in v1 Taproot script wasn't x-only public key\");\n    Secp256k1::read_G(&mut key.public_key(Parity::Even).serialize().as_slice()).unwrap() -\n      (ProjectivePoint::GENERATOR * self.output.offset())\n  }\n\n  fn presumed_origin(&self) -> Option<Address> {\n    self.presumed_origin.clone()\n  }\n\n  fn balance(&self) -> ExternalBalance {\n    ExternalBalance { coin: ExternalCoin::Bitcoin, amount: Amount(self.output.value()) }\n  }\n\n  fn data(&self) -> &[u8] {\n    &self.data\n  }\n\n  fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {\n    self.kind.write(writer)?;\n    let presumed_origin: Option<Vec<u8>> = self.presumed_origin.clone().map(Into::into);\n    writer.write_all(&presumed_origin.encode())?;\n    self.output.write(writer)?;\n    writer.write_all(&u16::try_from(self.data.len()).unwrap().to_le_bytes())?;\n    writer.write_all(&self.data)\n  }\n\n  fn read<R: io::Read>(mut reader: &mut R) -> io::Result<Self> {\n    Ok(Output {\n      kind: OutputType::read(reader)?,\n      presumed_origin: {\n        let mut io_reader = scale::IoReader(reader);\n        let res = Option::<Vec<u8>>::decode(&mut io_reader)\n          .unwrap()\n          .map(|address| Address::try_from(address).unwrap());\n        reader = io_reader.0;\n        res\n      },\n      output: ReceivedOutput::read(reader)?,\n      data: {\n        let mut data_len = [0; 2];\n        reader.read_exact(&mut data_len)?;\n\n        let mut data = vec![0; usize::from(u16::from_le_bytes(data_len))];\n        reader.read_exact(&mut data)?;\n        data\n      },\n    })\n  }\n}\n\n#[derive(Clone, Copy, PartialEq, Eq, Debug)]\npub struct Fee(u64);\n\n#[async_trait]\nimpl TransactionTrait<Bitcoin> for Transaction {\n  type Id = [u8; 32];\n  fn id(&self) -> Self::Id {\n    let mut hash = *self.compute_txid().as_raw_hash().as_byte_array();\n    hash.reverse();\n    hash\n  }\n\n  #[cfg(test)]\n  async fn fee(&self, network: &Bitcoin) -> u64 {\n    let mut value = 0;\n    for input in &self.input {\n      let output = input.previous_output;\n      let mut hash = *output.txid.as_raw_hash().as_byte_array();\n      hash.reverse();\n      value += network.rpc.get_transaction(&hash).await.unwrap().output\n        [usize::try_from(output.vout).unwrap()]\n      .value\n      .to_sat();\n    }\n    for output in &self.output {\n      value -= output.value.to_sat();\n    }\n    value\n  }\n}\n\n#[derive(Clone, PartialEq, Eq, Debug)]\npub struct Eventuality([u8; 32]);\n\n#[derive(Clone, PartialEq, Eq, Default, Debug)]\npub struct EmptyClaim;\nimpl AsRef<[u8]> for EmptyClaim {\n  fn as_ref(&self) -> &[u8] {\n    &[]\n  }\n}\nimpl AsMut<[u8]> for EmptyClaim {\n  fn as_mut(&mut self) -> &mut [u8] {\n    &mut []\n  }\n}\n\nimpl EventualityTrait for Eventuality {\n  type Claim = EmptyClaim;\n  type Completion = Transaction;\n\n  fn lookup(&self) -> Vec<u8> {\n    self.0.to_vec()\n  }\n\n  fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {\n    let mut id = [0; 32];\n    reader\n      .read_exact(&mut id)\n      .map_err(|_| io::Error::other(\"couldn't decode ID in eventuality\"))?;\n    Ok(Eventuality(id))\n  }\n  fn serialize(&self) -> Vec<u8> {\n    self.0.to_vec()\n  }\n\n  fn claim(_: &Transaction) -> EmptyClaim {\n    EmptyClaim\n  }\n  fn serialize_completion(completion: &Transaction) -> Vec<u8> {\n    let mut buf = vec![];\n    completion.consensus_encode(&mut buf).unwrap();\n    buf\n  }\n  fn read_completion<R: io::Read>(reader: &mut R) -> io::Result<Transaction> {\n    Transaction::consensus_decode(&mut io::BufReader::with_capacity(0, reader))\n      .map_err(|e| io::Error::other(format!(\"{e}\")))\n  }\n}\n\n#[derive(Clone, Debug)]\npub struct SignableTransaction {\n  actual: BSignableTransaction,\n}\nimpl PartialEq for SignableTransaction {\n  fn eq(&self, other: &SignableTransaction) -> bool {\n    self.actual == other.actual\n  }\n}\nimpl Eq for SignableTransaction {}\nimpl SignableTransactionTrait for SignableTransaction {\n  fn fee(&self) -> u64 {\n    self.actual.fee()\n  }\n}\n\n#[async_trait]\nimpl BlockTrait<Bitcoin> for Block {\n  type Id = [u8; 32];\n  fn id(&self) -> Self::Id {\n    let mut hash = *self.block_hash().as_raw_hash().as_byte_array();\n    hash.reverse();\n    hash\n  }\n\n  fn parent(&self) -> Self::Id {\n    let mut hash = *self.header.prev_blockhash.as_raw_hash().as_byte_array();\n    hash.reverse();\n    hash\n  }\n\n  async fn time(&self, rpc: &Bitcoin) -> u64 {\n    // Use the network median time defined in BIP-0113 since the in-block time isn't guaranteed to\n    // be monotonic\n    let mut timestamps = vec![u64::from(self.header.time)];\n    let mut parent = self.parent();\n    // BIP-0113 uses a median of the prior 11 blocks\n    while timestamps.len() < 11 {\n      let mut parent_block;\n      while {\n        parent_block = rpc.rpc.get_block(&parent).await;\n        parent_block.is_err()\n      } {\n        log::error!(\"couldn't get parent block when trying to get block time: {parent_block:?}\");\n        sleep(Duration::from_secs(5)).await;\n      }\n      let parent_block = parent_block.unwrap();\n      timestamps.push(u64::from(parent_block.header.time));\n      parent = parent_block.parent();\n\n      if parent == [0; 32] {\n        break;\n      }\n    }\n    timestamps.sort();\n    timestamps[timestamps.len() / 2]\n  }\n}\n\nconst KEY_DST: &[u8] = b\"Serai Bitcoin Output Offset\";\nstatic BRANCH_OFFSET: OnceLock<Scalar> = OnceLock::new();\nstatic CHANGE_OFFSET: OnceLock<Scalar> = OnceLock::new();\nstatic FORWARD_OFFSET: OnceLock<Scalar> = OnceLock::new();\n\n// Always construct the full scanner in order to ensure there's no collisions\nfn scanner(\n  key: ProjectivePoint,\n) -> (Scanner, HashMap<OutputType, Scalar>, HashMap<Vec<u8>, OutputType>) {\n  let mut scanner = Scanner::new(key).unwrap();\n  let mut offsets = HashMap::from([(OutputType::External, Scalar::ZERO)]);\n\n  let zero = Scalar::ZERO.to_repr();\n  let zero_ref: &[u8] = zero.as_ref();\n  let mut kinds = HashMap::from([(zero_ref.to_vec(), OutputType::External)]);\n\n  let mut register = |kind, offset| {\n    let offset = scanner.register_offset(offset).expect(\"offset collision\");\n    offsets.insert(kind, offset);\n\n    let offset = offset.to_repr();\n    let offset_ref: &[u8] = offset.as_ref();\n    kinds.insert(offset_ref.to_vec(), kind);\n  };\n\n  register(\n    OutputType::Branch,\n    *BRANCH_OFFSET.get_or_init(|| Secp256k1::hash_to_F(KEY_DST, b\"branch\")),\n  );\n  register(\n    OutputType::Change,\n    *CHANGE_OFFSET.get_or_init(|| Secp256k1::hash_to_F(KEY_DST, b\"change\")),\n  );\n  register(\n    OutputType::Forwarded,\n    *FORWARD_OFFSET.get_or_init(|| Secp256k1::hash_to_F(KEY_DST, b\"forward\")),\n  );\n\n  (scanner, offsets, kinds)\n}\n\n#[derive(Clone, Debug)]\npub struct Bitcoin {\n  pub(crate) rpc: Rpc,\n}\n// Shim required for testing/debugging purposes due to generic arguments also necessitating trait\n// bounds\nimpl PartialEq for Bitcoin {\n  fn eq(&self, _: &Self) -> bool {\n    true\n  }\n}\nimpl Eq for Bitcoin {}\n\nimpl Bitcoin {\n  pub async fn new(url: String) -> Bitcoin {\n    let mut res = Rpc::new(url.clone()).await;\n    while let Err(e) = res {\n      log::error!(\"couldn't connect to Bitcoin node: {e:?}\");\n      sleep(Duration::from_secs(5)).await;\n      res = Rpc::new(url.clone()).await;\n    }\n    Bitcoin { rpc: res.unwrap() }\n  }\n\n  #[cfg(test)]\n  pub async fn fresh_chain(&self) {\n    if self.rpc.get_latest_block_number().await.unwrap() > 0 {\n      self\n        .rpc\n        .rpc_call(\n          \"invalidateblock\",\n          serde_json::json!([hex::encode(self.rpc.get_block_hash(1).await.unwrap())]),\n        )\n        .await\n        .unwrap()\n    }\n  }\n\n  // This function panics on a node which doesn't follow the Bitcoin protocol, which is deemed fine\n  async fn median_fee(&self, block: &Block) -> Result<Fee, NetworkError> {\n    let mut fees = vec![];\n    if block.txdata.len() > 1 {\n      for tx in &block.txdata[1 ..] {\n        let mut in_value = 0;\n        for input in &tx.input {\n          let mut input_tx = input.previous_output.txid.to_raw_hash().to_byte_array();\n          input_tx.reverse();\n          in_value += self\n            .rpc\n            .get_transaction(&input_tx)\n            .await\n            .map_err(|_| NetworkError::ConnectionError)?\n            .output[usize::try_from(input.previous_output.vout).unwrap()]\n          .value\n          .to_sat();\n        }\n        let out = tx.output.iter().map(|output| output.value.to_sat()).sum::<u64>();\n        fees.push((in_value - out) / u64::try_from(tx.vsize()).unwrap());\n      }\n    }\n    fees.sort();\n    let fee = fees.get(fees.len() / 2).copied().unwrap_or(0);\n\n    // The DUST constant documentation notes a relay rule practically enforcing a\n    // 1000 sat/kilo-vbyte minimum fee.\n    Ok(Fee(fee.max(1)))\n  }\n\n  async fn make_signable_transaction(\n    &self,\n    block_number: usize,\n    inputs: &[Output],\n    payments: &[Payment<Self>],\n    change: &Option<Address>,\n    calculating_fee: bool,\n  ) -> Result<Option<BSignableTransaction>, NetworkError> {\n    for payment in payments {\n      assert_eq!(payment.balance.coin, ExternalCoin::Bitcoin);\n    }\n\n    // TODO2: Use an fee representative of several blocks, cached inside Self\n    let block_for_fee = self.get_block(block_number).await?;\n    let fee = self.median_fee(&block_for_fee).await?;\n\n    let payments = payments\n      .iter()\n      .map(|payment| {\n        (\n          payment.address.clone().into(),\n          // If we're solely estimating the fee, don't specify the actual amount\n          // This won't affect the fee calculation yet will ensure we don't hit a not enough funds\n          // error\n          if calculating_fee { Self::DUST } else { payment.balance.amount.0 },\n        )\n      })\n      .collect::<Vec<_>>();\n\n    match BSignableTransaction::new(\n      inputs.iter().map(|input| input.output.clone()).collect(),\n      &payments,\n      change.clone().map(Into::into),\n      None,\n      fee.0,\n    ) {\n      Ok(signable) => Ok(Some(signable)),\n      Err(TransactionError::NoInputs) => {\n        panic!(\"trying to create a bitcoin transaction without inputs\")\n      }\n      // No outputs left and the change isn't worth enough/not even enough funds to pay the fee\n      Err(TransactionError::NoOutputs | TransactionError::NotEnoughFunds { .. }) => Ok(None),\n      // amortize_fee removes payments which fall below the dust threshold\n      Err(TransactionError::DustPayment) => panic!(\"dust payment despite removing dust\"),\n      Err(TransactionError::TooMuchData) => {\n        panic!(\"too much data despite not specifying data\")\n      }\n      Err(TransactionError::TooLowFee) => {\n        panic!(\"created a transaction whose fee is below the minimum\")\n      }\n      Err(TransactionError::TooLargeTransaction) => {\n        panic!(\"created a too large transaction despite limiting inputs/outputs\")\n      }\n    }\n  }\n\n  // Expected script has to start with SHA256 PUSH MSG_HASH OP_EQUALVERIFY ..\n  fn segwit_data_pattern(script: &ScriptBuf) -> Option<bool> {\n    let mut ins = script.instructions();\n\n    // first item should be SHA256 code\n    if ins.next()?.ok()?.opcode()? != OP_SHA256 {\n      return Some(false);\n    }\n\n    // next should be a data push\n    ins.next()?.ok()?.push_bytes()?;\n\n    // next should be a equality check\n    if ins.next()?.ok()?.opcode()? != OP_EQUALVERIFY {\n      return Some(false);\n    }\n\n    Some(true)\n  }\n\n  fn extract_serai_data(tx: &Transaction) -> Vec<u8> {\n    // check outputs\n    let mut data = (|| {\n      for output in &tx.output {\n        if output.script_pubkey.is_op_return() {\n          match output.script_pubkey.instructions_minimal().last() {\n            Some(Ok(Instruction::PushBytes(data))) => return data.as_bytes().to_vec(),\n            _ => continue,\n          }\n        }\n      }\n      vec![]\n    })();\n\n    // check inputs\n    if data.is_empty() {\n      for input in &tx.input {\n        let witness = input.witness.to_vec();\n        // expected witness at least has to have 2 items, msg and the redeem script.\n        if witness.len() >= 2 {\n          let redeem_script = ScriptBuf::from_bytes(witness.last().unwrap().clone());\n          if Self::segwit_data_pattern(&redeem_script) == Some(true) {\n            data.clone_from(&witness[witness.len() - 2]); // len() - 1 is the redeem_script\n            break;\n          }\n        }\n      }\n    }\n\n    data.truncate(MAX_DATA_LEN.try_into().unwrap());\n    data\n  }\n\n  #[cfg(test)]\n  pub fn sign_btc_input_for_p2pkh(\n    tx: &Transaction,\n    input_index: usize,\n    private_key: &PrivateKey,\n  ) -> ScriptBuf {\n    use bitcoin_serai::bitcoin::{Network as BNetwork, Address as BAddress};\n\n    let public_key = PublicKey::from_private_key(SECP256K1, private_key);\n    let main_addr = BAddress::p2pkh(public_key, BNetwork::Regtest);\n\n    let mut der = SECP256K1\n      .sign_ecdsa_low_r(\n        &Message::from_digest_slice(\n          SighashCache::new(tx)\n            .legacy_signature_hash(\n              input_index,\n              &main_addr.script_pubkey(),\n              EcdsaSighashType::All.to_u32(),\n            )\n            .unwrap()\n            .to_raw_hash()\n            .as_ref(),\n        )\n        .unwrap(),\n        &private_key.inner,\n      )\n      .serialize_der()\n      .to_vec();\n    der.push(1);\n\n    ScriptBuf::builder()\n      .push_slice(PushBytesBuf::try_from(der).unwrap())\n      .push_key(&public_key)\n      .into_script()\n  }\n}\n\n// Bitcoin has a max weight of 400,000 (MAX_STANDARD_TX_WEIGHT)\n// A non-SegWit TX will have 4 weight units per byte, leaving a max size of 100,000 bytes\n// While our inputs are entirely SegWit, such fine tuning is not necessary and could create\n// issues in the future (if the size decreases or we misevaluate it)\n// It also offers a minimal amount of benefit when we are able to logarithmically accumulate\n// inputs\n// For 128-byte inputs (36-byte output specification, 64-byte signature, whatever overhead) and\n// 64-byte outputs (40-byte script, 8-byte amount, whatever overhead), they together take up 192\n// bytes\n// 100,000 / 192 = 520\n// 520 * 192 leaves 160 bytes of overhead for the transaction structure itself\nconst MAX_INPUTS: usize = 520;\nconst MAX_OUTPUTS: usize = 520;\n\nfn address_from_key(key: ProjectivePoint) -> Address {\n  Address::new(\n    p2tr_script_buf(key).expect(\"creating address from key which isn't properly tweaked\"),\n  )\n  .expect(\"couldn't create Serai-representable address for P2TR script\")\n}\n\n#[async_trait]\nimpl Network for Bitcoin {\n  type Curve = Secp256k1;\n\n  type Transaction = Transaction;\n  type Block = Block;\n\n  type Output = Output;\n  type SignableTransaction = SignableTransaction;\n  type Eventuality = Eventuality;\n  type TransactionMachine = TransactionMachine;\n\n  type Scheduler = Scheduler<Bitcoin>;\n\n  type Address = Address;\n\n  const NETWORK: ExternalNetworkId = ExternalNetworkId::Bitcoin;\n  const ID: &'static str = \"Bitcoin\";\n  const ESTIMATED_BLOCK_TIME_IN_SECONDS: usize = 600;\n  const CONFIRMATIONS: usize = 6;\n\n  /*\n    A Taproot input is:\n    - 36 bytes for the OutPoint\n    - 0 bytes for the script (+1 byte for the length)\n    - 4 bytes for the sequence\n    Per https://developer.bitcoin.org/reference/transactions.html#raw-transaction-format\n\n    There's also:\n    - 1 byte for the witness length\n    - 1 byte for the signature length\n    - 64 bytes for the signature\n    which have the SegWit discount.\n\n    (4 * (36 + 1 + 4)) + (1 + 1 + 64) = 164 + 66 = 230 weight units\n    230 ceil div 4 = 57 vbytes\n\n    Bitcoin defines multiple minimum feerate constants *per kilo-vbyte*. Currently, these are:\n    - 1000 sat/kilo-vbyte for a transaction to be relayed\n    - Each output's value must exceed the fee of the TX spending it at 3000 sat/kilo-vbyte\n    The DUST constant needs to be determined by the latter.\n    Since these are solely relay rules, and may be raised, we require all outputs be spendable\n    under a 5000 sat/kilo-vbyte fee rate.\n\n    5000 sat/kilo-vbyte = 5 sat/vbyte\n    5 * 57 = 285 sats/spent-output\n\n    Even if an output took 100 bytes (it should be just ~29-43), taking 400 weight units, adding\n    100 vbytes, tripling the transaction size, then the sats/tx would be < 1000.\n\n    Increase by an order of magnitude, in order to ensure this is actually worth our time, and we\n    get 10,000 satoshis.\n  */\n  const DUST: u64 = 10_000;\n\n  // 2 inputs should be 2 * 230 = 460 weight units\n  // The output should be ~36 bytes, or 144 weight units\n  // The overhead should be ~20 bytes at most, or 80 weight units\n  // 684 weight units, 171 vbytes, round up to 200\n  // 200 vbytes at 1 sat/weight (our current minimum fee, 4 sat/vbyte) = 800 sat fee for the\n  // aggregation TX\n  const COST_TO_AGGREGATE: u64 = 800;\n\n  const MAX_OUTPUTS: usize = MAX_OUTPUTS;\n\n  fn tweak_keys(keys: &mut ThresholdKeys<Self::Curve>) {\n    *keys = tweak_keys(keys.clone());\n    // Also create a scanner to assert these keys, and all expected paths, are usable\n    scanner(keys.group_key());\n  }\n\n  #[cfg(test)]\n  async fn external_address(&self, key: ProjectivePoint) -> Address {\n    address_from_key(key)\n  }\n\n  fn branch_address(key: ProjectivePoint) -> Option<Address> {\n    let (_, offsets, _) = scanner(key);\n    Some(address_from_key(key + (ProjectivePoint::GENERATOR * offsets[&OutputType::Branch])))\n  }\n\n  fn change_address(key: ProjectivePoint) -> Option<Address> {\n    let (_, offsets, _) = scanner(key);\n    Some(address_from_key(key + (ProjectivePoint::GENERATOR * offsets[&OutputType::Change])))\n  }\n\n  fn forward_address(key: ProjectivePoint) -> Option<Address> {\n    let (_, offsets, _) = scanner(key);\n    Some(address_from_key(key + (ProjectivePoint::GENERATOR * offsets[&OutputType::Forwarded])))\n  }\n\n  async fn get_latest_block_number(&self) -> Result<usize, NetworkError> {\n    self.rpc.get_latest_block_number().await.map_err(|_| NetworkError::ConnectionError)\n  }\n\n  async fn get_block(&self, number: usize) -> Result<Self::Block, NetworkError> {\n    let block_hash =\n      self.rpc.get_block_hash(number).await.map_err(|_| NetworkError::ConnectionError)?;\n    self.rpc.get_block(&block_hash).await.map_err(|_| NetworkError::ConnectionError)\n  }\n\n  async fn get_outputs(&self, block: &Self::Block, key: ProjectivePoint) -> Vec<Output> {\n    let (scanner, _, kinds) = scanner(key);\n\n    let mut outputs = vec![];\n    // Skip the coinbase transaction which is burdened by maturity\n    for tx in &block.txdata[1 ..] {\n      for output in scanner.scan_transaction(tx) {\n        let offset_repr = output.offset().to_repr();\n        let offset_repr_ref: &[u8] = offset_repr.as_ref();\n        let kind = kinds[offset_repr_ref];\n\n        let output = Output { kind, presumed_origin: None, output, data: vec![] };\n        assert_eq!(output.tx_id(), tx.id());\n        outputs.push(output);\n      }\n\n      if outputs.is_empty() {\n        continue;\n      }\n\n      // populate the outputs with the origin and data\n      let presumed_origin = {\n        // This may identify the P2WSH output *embedding the InInstruction* as the origin, which\n        // would be a bit trickier to spend that a traditional output...\n        // There's no risk of the InInstruction going missing as it'd already be on-chain though\n        // We *could* parse out the script *without the InInstruction prefix* and declare that the\n        // origin\n        // TODO\n        let spent_output = {\n          let input = &tx.input[0];\n          let mut spent_tx = input.previous_output.txid.as_raw_hash().to_byte_array();\n          spent_tx.reverse();\n          let mut tx;\n          while {\n            tx = self.rpc.get_transaction(&spent_tx).await;\n            tx.is_err()\n          } {\n            log::error!(\"couldn't get transaction from bitcoin node: {tx:?}\");\n            sleep(Duration::from_secs(5)).await;\n          }\n          tx.unwrap().output.swap_remove(usize::try_from(input.previous_output.vout).unwrap())\n        };\n        Address::new(spent_output.script_pubkey)\n      };\n      let data = Self::extract_serai_data(tx);\n      for output in &mut outputs {\n        if output.kind == OutputType::External {\n          output.data.clone_from(&data);\n        }\n        output.presumed_origin.clone_from(&presumed_origin);\n      }\n    }\n\n    outputs\n  }\n\n  async fn get_eventuality_completions(\n    &self,\n    eventualities: &mut EventualitiesTracker<Eventuality>,\n    block: &Self::Block,\n  ) -> HashMap<[u8; 32], (usize, [u8; 32], Transaction)> {\n    let mut res = HashMap::new();\n    if eventualities.map.is_empty() {\n      return res;\n    }\n\n    fn check_block(\n      eventualities: &mut EventualitiesTracker<Eventuality>,\n      block: &Block,\n      res: &mut HashMap<[u8; 32], (usize, [u8; 32], Transaction)>,\n    ) {\n      for tx in &block.txdata[1 ..] {\n        if let Some((plan, _)) = eventualities.map.remove(tx.id().as_slice()) {\n          res.insert(plan, (eventualities.block_number, tx.id(), tx.clone()));\n        }\n      }\n\n      eventualities.block_number += 1;\n    }\n\n    let this_block_hash = block.id();\n    let this_block_num = (async {\n      loop {\n        match self.rpc.get_block_number(&this_block_hash).await {\n          Ok(number) => return number,\n          Err(e) => {\n            log::error!(\"couldn't get the block number for {}: {}\", hex::encode(this_block_hash), e)\n          }\n        }\n        sleep(Duration::from_secs(60)).await;\n      }\n    })\n    .await;\n\n    for block_num in (eventualities.block_number + 1) .. this_block_num {\n      let block = {\n        let mut block;\n        while {\n          block = self.get_block(block_num).await;\n          block.is_err()\n        } {\n          log::error!(\"couldn't get block {}: {}\", block_num, block.err().unwrap());\n          sleep(Duration::from_secs(60)).await;\n        }\n        block.unwrap()\n      };\n\n      check_block(eventualities, &block, &mut res);\n    }\n\n    // Also check the current block\n    check_block(eventualities, block, &mut res);\n    assert_eq!(eventualities.block_number, this_block_num);\n\n    res\n  }\n\n  async fn needed_fee(\n    &self,\n    block_number: usize,\n    inputs: &[Output],\n    payments: &[Payment<Self>],\n    change: &Option<Address>,\n  ) -> Result<Option<u64>, NetworkError> {\n    Ok(\n      self\n        .make_signable_transaction(block_number, inputs, payments, change, true)\n        .await?\n        .map(|signable| signable.needed_fee()),\n    )\n  }\n\n  async fn signable_transaction(\n    &self,\n    block_number: usize,\n    _plan_id: &[u8; 32],\n    _key: ProjectivePoint,\n    inputs: &[Output],\n    payments: &[Payment<Self>],\n    change: &Option<Address>,\n    (): &(),\n  ) -> Result<Option<(Self::SignableTransaction, Self::Eventuality)>, NetworkError> {\n    Ok(self.make_signable_transaction(block_number, inputs, payments, change, false).await?.map(\n      |signable| {\n        let eventuality = Eventuality(signable.txid());\n        (SignableTransaction { actual: signable }, eventuality)\n      },\n    ))\n  }\n\n  async fn attempt_sign(\n    &self,\n    keys: ThresholdKeys<Self::Curve>,\n    transaction: Self::SignableTransaction,\n  ) -> Result<Self::TransactionMachine, NetworkError> {\n    Ok(transaction.actual.clone().multisig(&keys).expect(\"used the wrong keys\"))\n  }\n\n  async fn publish_completion(&self, tx: &Transaction) -> Result<(), NetworkError> {\n    match self.rpc.send_raw_transaction(tx).await {\n      Ok(_) => (),\n      Err(RpcError::ConnectionError) => Err(NetworkError::ConnectionError)?,\n      // TODO: Distinguish already in pool vs double spend (other signing attempt succeeded) vs\n      // invalid transaction\n      Err(e) => panic!(\"failed to publish TX {}: {e}\", tx.compute_txid()),\n    }\n    Ok(())\n  }\n\n  async fn confirm_completion(\n    &self,\n    eventuality: &Self::Eventuality,\n    _: &EmptyClaim,\n  ) -> Result<Option<Transaction>, NetworkError> {\n    Ok(Some(\n      self.rpc.get_transaction(&eventuality.0).await.map_err(|_| NetworkError::ConnectionError)?,\n    ))\n  }\n\n  #[cfg(test)]\n  async fn get_block_number(&self, id: &[u8; 32]) -> usize {\n    self.rpc.get_block_number(id).await.unwrap()\n  }\n\n  #[cfg(test)]\n  async fn check_eventuality_by_claim(\n    &self,\n    eventuality: &Self::Eventuality,\n    _: &EmptyClaim,\n  ) -> bool {\n    self.rpc.get_transaction(&eventuality.0).await.is_ok()\n  }\n\n  #[cfg(test)]\n  async fn get_transaction_by_eventuality(&self, _: usize, id: &Eventuality) -> Transaction {\n    self.rpc.get_transaction(&id.0).await.unwrap()\n  }\n\n  #[cfg(test)]\n  async fn mine_block(&self) {\n    use bitcoin_serai::bitcoin::{Network as BNetwork, Address as BAddress};\n\n    self\n      .rpc\n      .rpc_call::<Vec<String>>(\n        \"generatetoaddress\",\n        serde_json::json!([1, BAddress::p2sh(Script::new(), BNetwork::Regtest).unwrap()]),\n      )\n      .await\n      .unwrap();\n  }\n\n  #[cfg(test)]\n  async fn test_send(&self, address: Address) -> Block {\n    use bitcoin_serai::bitcoin::{Network as BNetwork, Address as BAddress};\n\n    let secret_key = SecretKey::new(&mut rand_core::OsRng);\n    let private_key = PrivateKey::new(secret_key, BNetwork::Regtest);\n    let public_key = PublicKey::from_private_key(SECP256K1, &private_key);\n    let main_addr = BAddress::p2pkh(public_key, BNetwork::Regtest);\n\n    let new_block = self.get_latest_block_number().await.unwrap() + 1;\n    self\n      .rpc\n      .rpc_call::<Vec<String>>(\"generatetoaddress\", serde_json::json!([100, main_addr]))\n      .await\n      .unwrap();\n\n    let tx = self.get_block(new_block).await.unwrap().txdata.swap_remove(0);\n    let mut tx = Transaction {\n      version: Version(2),\n      lock_time: LockTime::ZERO,\n      input: vec![TxIn {\n        previous_output: OutPoint { txid: tx.compute_txid(), vout: 0 },\n        script_sig: Script::new().into(),\n        sequence: Sequence(u32::MAX),\n        witness: Witness::default(),\n      }],\n      output: vec![TxOut {\n        value: tx.output[0].value - BAmount::from_sat(10000),\n        script_pubkey: address.clone().into(),\n      }],\n    };\n    tx.input[0].script_sig = Self::sign_btc_input_for_p2pkh(&tx, 0, &private_key);\n\n    let block = self.get_latest_block_number().await.unwrap() + 1;\n    self.rpc.send_raw_transaction(&tx).await.unwrap();\n    for _ in 0 .. Self::CONFIRMATIONS {\n      self.mine_block().await;\n    }\n    self.get_block(block).await.unwrap()\n  }\n}\n\nimpl UtxoNetwork for Bitcoin {\n  const MAX_INPUTS: usize = MAX_INPUTS;\n}\n"
  },
  {
    "path": "processor/src/networks/ethereum.rs",
    "content": "#![allow(deprecated)]\n\nuse core::{fmt, time::Duration};\nuse std::{\n  sync::Arc,\n  collections::{HashSet, HashMap},\n  io,\n};\n\nuse async_trait::async_trait;\n\nuse ciphersuite::{group::GroupEncoding, Ciphersuite};\nuse ciphersuite_kp256::Secp256k1;\nuse frost::ThresholdKeys;\n\nuse ethereum_serai::{\n  alloy::{\n    primitives::U256,\n    rpc_types::{BlockTransactionsKind, BlockNumberOrTag, Transaction},\n    simple_request_transport::SimpleRequest,\n    rpc_client::ClientBuilder,\n    provider::{Provider, RootProvider},\n  },\n  crypto::{PublicKey, Signature},\n  erc20::Erc20,\n  deployer::Deployer,\n  router::{Router, Coin as EthereumCoin, InInstruction as EthereumInInstruction},\n  machine::*,\n};\n#[cfg(test)]\nuse ethereum_serai::alloy::primitives::B256;\n\nuse tokio::{\n  time::sleep,\n  sync::{RwLock, RwLockReadGuard},\n};\n#[cfg(not(test))]\nuse tokio::{\n  io::{AsyncReadExt, AsyncWriteExt},\n  net::TcpStream,\n};\n\nuse serai_client::{\n  primitives::{ExternalCoin, Amount, ExternalBalance, ExternalNetworkId},\n  validator_sets::primitives::Session,\n};\n\nuse crate::{\n  Db, Payment,\n  networks::{\n    OutputType, Output, Transaction as TransactionTrait, SignableTransaction, Block,\n    Eventuality as EventualityTrait, EventualitiesTracker, NetworkError, Network,\n  },\n  key_gen::NetworkKeyDb,\n  multisigs::scheduler::{\n    Scheduler as SchedulerTrait,\n    smart_contract::{Addendum, Scheduler},\n  },\n};\n\n#[cfg(not(test))]\nconst DAI: [u8; 20] =\n  match const_hex::const_decode_to_array(b\"0x6B175474E89094C44Da98b954EedeAC495271d0F\") {\n    Ok(res) => res,\n    Err(_) => panic!(\"invalid non-test DAI hex address\"),\n  };\n#[cfg(test)] // TODO\nconst DAI: [u8; 20] =\n  match const_hex::const_decode_to_array(b\"0000000000000000000000000000000000000000\") {\n    Ok(res) => res,\n    Err(_) => panic!(\"invalid test DAI hex address\"),\n  };\n\nfn coin_to_serai_coin(coin: &EthereumCoin) -> Option<ExternalCoin> {\n  match coin {\n    EthereumCoin::Ether => Some(ExternalCoin::Ether),\n    EthereumCoin::Erc20(token) => {\n      if *token == DAI {\n        return Some(ExternalCoin::Dai);\n      }\n      None\n    }\n  }\n}\n\nfn amount_to_serai_amount(coin: ExternalCoin, amount: U256) -> Amount {\n  assert_eq!(coin.network(), ExternalNetworkId::Ethereum);\n  assert_eq!(coin.decimals(), 8);\n  // Remove 10 decimals so we go from 18 decimals to 8 decimals\n  let divisor = U256::from(10_000_000_000u64);\n  // This is valid up to 184b, which is assumed for the coins allowed\n  Amount(u64::try_from(amount / divisor).unwrap())\n}\n\nfn balance_to_ethereum_amount(balance: ExternalBalance) -> U256 {\n  assert_eq!(balance.coin.network(), ExternalNetworkId::Ethereum);\n  assert_eq!(balance.coin.decimals(), 8);\n  // Restore 10 decimals so we go from 8 decimals to 18 decimals\n  let factor = U256::from(10_000_000_000u64);\n  U256::from(balance.amount.0) * factor\n}\n\n#[derive(Clone, Copy, PartialEq, Eq, Debug)]\npub struct Address(pub [u8; 20]);\nimpl TryFrom<Vec<u8>> for Address {\n  type Error = ();\n  fn try_from(bytes: Vec<u8>) -> Result<Address, ()> {\n    if bytes.len() != 20 {\n      Err(())?;\n    }\n    let mut res = [0; 20];\n    res.copy_from_slice(&bytes);\n    Ok(Address(res))\n  }\n}\nimpl TryInto<Vec<u8>> for Address {\n  type Error = ();\n  fn try_into(self) -> Result<Vec<u8>, ()> {\n    Ok(self.0.to_vec())\n  }\n}\n\nimpl fmt::Display for Address {\n  fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n    ethereum_serai::alloy::primitives::Address::from(self.0).fmt(f)\n  }\n}\n\nimpl SignableTransaction for RouterCommand {\n  fn fee(&self) -> u64 {\n    // Return a fee of 0 as we'll handle amortization on our end\n    0\n  }\n}\n\n#[async_trait]\nimpl<D: Db> TransactionTrait<Ethereum<D>> for Transaction {\n  type Id = [u8; 32];\n  fn id(&self) -> Self::Id {\n    self.hash.0\n  }\n\n  #[cfg(test)]\n  async fn fee(&self, _network: &Ethereum<D>) -> u64 {\n    // Return a fee of 0 as we'll handle amortization on our end\n    0\n  }\n}\n\n// We use 32-block Epochs to represent blocks.\n#[derive(Clone, Copy, PartialEq, Eq, Debug)]\npub struct Epoch {\n  // The hash of the block which ended the prior Epoch.\n  prior_end_hash: [u8; 32],\n  // The first block number within this Epoch.\n  start: u64,\n  // The hash of the last block within this Epoch.\n  end_hash: [u8; 32],\n  // The monotonic time for this Epoch.\n  time: u64,\n}\n\nimpl Epoch {\n  fn end(&self) -> u64 {\n    self.start + 31\n  }\n}\n\n#[async_trait]\nimpl<D: Db> Block<Ethereum<D>> for Epoch {\n  type Id = [u8; 32];\n  fn id(&self) -> [u8; 32] {\n    self.end_hash\n  }\n  fn parent(&self) -> [u8; 32] {\n    self.prior_end_hash\n  }\n  async fn time(&self, _: &Ethereum<D>) -> u64 {\n    self.time\n  }\n}\n\nimpl<D: Db> Output<Ethereum<D>> for EthereumInInstruction {\n  type Id = [u8; 32];\n\n  fn kind(&self) -> OutputType {\n    OutputType::External\n  }\n\n  fn id(&self) -> Self::Id {\n    let mut id = [0; 40];\n    id[.. 32].copy_from_slice(&self.id.0);\n    id[32 ..].copy_from_slice(&self.id.1.to_le_bytes());\n    *ethereum_serai::alloy::primitives::keccak256(id)\n  }\n  fn tx_id(&self) -> [u8; 32] {\n    self.id.0\n  }\n  fn key(&self) -> <Secp256k1 as Ciphersuite>::G {\n    self.key_at_end_of_block\n  }\n\n  fn presumed_origin(&self) -> Option<Address> {\n    Some(Address(self.from))\n  }\n\n  fn balance(&self) -> ExternalBalance {\n    let coin = coin_to_serai_coin(&self.coin).unwrap_or_else(|| {\n      panic!(\n        \"requesting coin for an EthereumInInstruction with a coin {}\",\n        \"we don't handle. this never should have been yielded\"\n      )\n    });\n    ExternalBalance { coin, amount: amount_to_serai_amount(coin, self.amount) }\n  }\n  fn data(&self) -> &[u8] {\n    &self.data\n  }\n\n  fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {\n    EthereumInInstruction::write(self, writer)\n  }\n  fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {\n    EthereumInInstruction::read(reader)\n  }\n}\n\n#[derive(Clone, PartialEq, Eq, Debug)]\npub struct Claim {\n  signature: [u8; 64],\n}\nimpl AsRef<[u8]> for Claim {\n  fn as_ref(&self) -> &[u8] {\n    &self.signature\n  }\n}\nimpl AsMut<[u8]> for Claim {\n  fn as_mut(&mut self) -> &mut [u8] {\n    &mut self.signature\n  }\n}\nimpl Default for Claim {\n  fn default() -> Self {\n    Self { signature: [0; 64] }\n  }\n}\nimpl From<&Signature> for Claim {\n  fn from(sig: &Signature) -> Self {\n    Self { signature: sig.to_bytes() }\n  }\n}\n\n#[derive(Clone, PartialEq, Eq, Debug)]\npub struct Eventuality(PublicKey, RouterCommand);\nimpl EventualityTrait for Eventuality {\n  type Claim = Claim;\n  type Completion = SignedRouterCommand;\n\n  fn lookup(&self) -> Vec<u8> {\n    match self.1 {\n      RouterCommand::UpdateSeraiKey { nonce, .. } | RouterCommand::Execute { nonce, .. } => {\n        nonce.as_le_bytes().to_vec()\n      }\n    }\n  }\n\n  fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {\n    let point = Secp256k1::read_G(reader)?;\n    let command = RouterCommand::read(reader)?;\n    Ok(Eventuality(\n      PublicKey::new(point).ok_or(io::Error::other(\"unusable key within Eventuality\"))?,\n      command,\n    ))\n  }\n  fn serialize(&self) -> Vec<u8> {\n    let mut res = vec![];\n    res.extend(self.0.point().to_bytes().as_slice());\n    self.1.write(&mut res).unwrap();\n    res\n  }\n\n  fn claim(completion: &Self::Completion) -> Self::Claim {\n    Claim::from(completion.signature())\n  }\n  fn serialize_completion(completion: &Self::Completion) -> Vec<u8> {\n    let mut res = vec![];\n    completion.write(&mut res).unwrap();\n    res\n  }\n  fn read_completion<R: io::Read>(reader: &mut R) -> io::Result<Self::Completion> {\n    SignedRouterCommand::read(reader)\n  }\n}\n\n#[derive(Clone)]\npub struct Ethereum<D: Db> {\n  // This DB is solely used to access the first key generated, as needed to determine the Router's\n  // address. Accordingly, all methods present are consistent to a Serai chain with a finalized\n  // first key (regardless of local state), and this is safe.\n  db: D,\n  #[cfg_attr(test, allow(unused))]\n  relayer_url: String,\n  provider: Arc<RootProvider<SimpleRequest>>,\n  deployer: Deployer,\n  router: Arc<RwLock<Option<Router>>>,\n}\nimpl<D: Db> PartialEq for Ethereum<D> {\n  fn eq(&self, _other: &Ethereum<D>) -> bool {\n    true\n  }\n}\nimpl<D: Db> fmt::Debug for Ethereum<D> {\n  fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {\n    fmt\n      .debug_struct(\"Ethereum\")\n      .field(\"deployer\", &self.deployer)\n      .field(\"router\", &self.router)\n      .finish_non_exhaustive()\n  }\n}\nimpl<D: Db> Ethereum<D> {\n  pub async fn new(db: D, daemon_url: String, relayer_url: String) -> Self {\n    let provider = Arc::new(RootProvider::new(\n      ClientBuilder::default().transport(SimpleRequest::new(daemon_url), true),\n    ));\n\n    let mut deployer = Deployer::new(provider.clone()).await;\n    while !matches!(deployer, Ok(Some(_))) {\n      log::error!(\"Deployer wasn't deployed yet or networking error\");\n      sleep(Duration::from_secs(5)).await;\n      deployer = Deployer::new(provider.clone()).await;\n    }\n    let deployer = deployer.unwrap().unwrap();\n\n    dbg!(&relayer_url);\n    dbg!(relayer_url.len());\n    Ethereum { db, relayer_url, provider, deployer, router: Arc::new(RwLock::new(None)) }\n  }\n\n  // Obtain a reference to the Router, sleeping until it's deployed if it hasn't already been.\n  // This is guaranteed to return Some.\n  pub async fn router(&self) -> RwLockReadGuard<'_, Option<Router>> {\n    // If we've already instantiated the Router, return a read reference\n    {\n      let router = self.router.read().await;\n      if router.is_some() {\n        return router;\n      }\n    }\n\n    // Instantiate it\n    let mut router = self.router.write().await;\n    // If another attempt beat us to it, return\n    if router.is_some() {\n      drop(router);\n      return self.router.read().await;\n    }\n\n    // Get the first key from the DB\n    let first_key =\n      NetworkKeyDb::get(&self.db, Session(0)).expect(\"getting outputs before confirming a key\");\n    let key = Secp256k1::read_G(&mut first_key.as_slice()).unwrap();\n    let public_key = PublicKey::new(key).unwrap();\n\n    // Find the router\n    let mut found = self.deployer.find_router(self.provider.clone(), &public_key).await;\n    while !matches!(found, Ok(Some(_))) {\n      log::error!(\"Router wasn't deployed yet or networking error\");\n      sleep(Duration::from_secs(5)).await;\n      found = self.deployer.find_router(self.provider.clone(), &public_key).await;\n    }\n\n    // Set it\n    *router = Some(found.unwrap().unwrap());\n\n    // Downgrade to a read lock\n    // Explicitly doesn't use `downgrade` so that another pending write txn can realize it's no\n    // longer necessary\n    drop(router);\n    self.router.read().await\n  }\n}\n\n#[async_trait]\nimpl<D: Db> Network for Ethereum<D> {\n  type Curve = Secp256k1;\n\n  type Transaction = Transaction;\n  type Block = Epoch;\n\n  type Output = EthereumInInstruction;\n  type SignableTransaction = RouterCommand;\n  type Eventuality = Eventuality;\n  type TransactionMachine = RouterCommandMachine;\n\n  type Scheduler = Scheduler<Self>;\n\n  type Address = Address;\n\n  const NETWORK: ExternalNetworkId = ExternalNetworkId::Ethereum;\n  const ID: &'static str = \"Ethereum\";\n  const ESTIMATED_BLOCK_TIME_IN_SECONDS: usize = 32 * 12;\n  const CONFIRMATIONS: usize = 1;\n\n  const DUST: u64 = 0; // TODO\n\n  const COST_TO_AGGREGATE: u64 = 0;\n\n  // TODO: usize::max, with a merkle tree in the router\n  const MAX_OUTPUTS: usize = 256;\n\n  fn tweak_keys(keys: &mut ThresholdKeys<Self::Curve>) {\n    while PublicKey::new(keys.group_key()).is_none() {\n      *keys = keys.clone().offset(<Secp256k1 as Ciphersuite>::F::ONE);\n    }\n  }\n\n  #[cfg(test)]\n  async fn external_address(&self, _key: <Secp256k1 as Ciphersuite>::G) -> Address {\n    Address(self.router().await.as_ref().unwrap().address())\n  }\n\n  fn branch_address(_key: <Secp256k1 as Ciphersuite>::G) -> Option<Address> {\n    None\n  }\n\n  fn change_address(_key: <Secp256k1 as Ciphersuite>::G) -> Option<Address> {\n    None\n  }\n\n  fn forward_address(_key: <Secp256k1 as Ciphersuite>::G) -> Option<Address> {\n    None\n  }\n\n  async fn get_latest_block_number(&self) -> Result<usize, NetworkError> {\n    let actual_number = self\n      .provider\n      .get_block(BlockNumberOrTag::Finalized.into(), BlockTransactionsKind::Hashes)\n      .await\n      .map_err(|_| NetworkError::ConnectionError)?\n      .ok_or(NetworkError::ConnectionError)?\n      .header\n      .number;\n    // Error if there hasn't been a full epoch yet\n    if actual_number < 32 {\n      Err(NetworkError::ConnectionError)?\n    }\n    // If this is 33, the division will return 1, yet 1 is the epoch in progress\n    let latest_full_epoch = (actual_number / 32).saturating_sub(1);\n    Ok(latest_full_epoch.try_into().unwrap())\n  }\n\n  async fn get_block(&self, number: usize) -> Result<Self::Block, NetworkError> {\n    let latest_finalized = self.get_latest_block_number().await?;\n    if number > latest_finalized {\n      Err(NetworkError::ConnectionError)?\n    }\n\n    let start = number * 32;\n    let prior_end_hash = if start == 0 {\n      [0; 32]\n    } else {\n      self\n        .provider\n        .get_block(u64::try_from(start - 1).unwrap().into(), BlockTransactionsKind::Hashes)\n        .await\n        .ok()\n        .flatten()\n        .ok_or(NetworkError::ConnectionError)?\n        .header\n        .hash\n        .into()\n    };\n\n    let end_header = self\n      .provider\n      .get_block(u64::try_from(start + 31).unwrap().into(), BlockTransactionsKind::Hashes)\n      .await\n      .ok()\n      .flatten()\n      .ok_or(NetworkError::ConnectionError)?\n      .header;\n\n    let end_hash = end_header.hash.into();\n    let time = end_header.timestamp;\n\n    Ok(Epoch { prior_end_hash, start: start.try_into().unwrap(), end_hash, time })\n  }\n\n  async fn get_outputs(\n    &self,\n    block: &Self::Block,\n    _: <Secp256k1 as Ciphersuite>::G,\n  ) -> Vec<Self::Output> {\n    let router = self.router().await;\n    let router = router.as_ref().unwrap();\n    // Grab the key at the end of the epoch\n    let key_at_end_of_block = loop {\n      match router.key_at_end_of_block(block.start + 31).await {\n        Ok(Some(key)) => break key,\n        Ok(None) => return vec![],\n        Err(e) => {\n          log::error!(\"couldn't connect to router for the key at the end of the block: {e:?}\");\n          sleep(Duration::from_secs(5)).await;\n          continue;\n        }\n      }\n    };\n\n    let mut all_events = vec![];\n    let mut top_level_txids = HashSet::new();\n    for erc20_addr in [DAI] {\n      let erc20 = Erc20::new(self.provider.clone(), erc20_addr);\n\n      for block in block.start .. (block.start + 32) {\n        let transfers = loop {\n          match erc20.top_level_transfers(block, router.address()).await {\n            Ok(transfers) => break transfers,\n            Err(e) => {\n              log::error!(\"couldn't connect to Ethereum node for the top-level transfers: {e:?}\");\n              sleep(Duration::from_secs(5)).await;\n              continue;\n            }\n          }\n        };\n\n        for transfer in transfers {\n          top_level_txids.insert(transfer.id);\n          all_events.push(EthereumInInstruction {\n            id: (transfer.id, 0),\n            from: transfer.from,\n            coin: EthereumCoin::Erc20(erc20_addr),\n            amount: transfer.amount,\n            data: transfer.data,\n            key_at_end_of_block,\n          });\n        }\n      }\n    }\n\n    for block in block.start .. (block.start + 32) {\n      let mut events = router.in_instructions(block, &HashSet::from([DAI])).await;\n      while let Err(e) = events {\n        log::error!(\"couldn't connect to Ethereum node for the Router's events: {e:?}\");\n        sleep(Duration::from_secs(5)).await;\n        events = router.in_instructions(block, &HashSet::from([DAI])).await;\n      }\n      let mut events = events.unwrap();\n      for event in &mut events {\n        // A transaction should either be a top-level transfer or a Router InInstruction\n        if top_level_txids.contains(&event.id.0) {\n          panic!(\"top-level transfer had {} and router had {:?}\", hex::encode(event.id.0), event);\n        }\n        // Overwrite the key at end of block to key at end of epoch\n        event.key_at_end_of_block = key_at_end_of_block;\n      }\n      all_events.extend(events);\n    }\n\n    for event in &all_events {\n      assert!(\n        coin_to_serai_coin(&event.coin).is_some(),\n        \"router yielded events for unrecognized coins\"\n      );\n    }\n    all_events\n  }\n\n  async fn get_eventuality_completions(\n    &self,\n    eventualities: &mut EventualitiesTracker<Self::Eventuality>,\n    block: &Self::Block,\n  ) -> HashMap<\n    [u8; 32],\n    (\n      usize,\n      <Self::Transaction as TransactionTrait<Self>>::Id,\n      <Self::Eventuality as EventualityTrait>::Completion,\n    ),\n  > {\n    let mut res = HashMap::new();\n    if eventualities.map.is_empty() {\n      return res;\n    }\n\n    let router = self.router().await;\n    let router = router.as_ref().unwrap();\n\n    let past_scanned_epoch = loop {\n      match self.get_block(eventualities.block_number).await {\n        Ok(block) => break block,\n        Err(e) => log::error!(\"couldn't get the last scanned block in the tracker: {}\", e),\n      }\n      sleep(Duration::from_secs(10)).await;\n    };\n    assert_eq!(\n      past_scanned_epoch.start / 32,\n      u64::try_from(eventualities.block_number).unwrap(),\n      \"assumption of tracker block number's relation to epoch start is incorrect\"\n    );\n\n    // Iterate from after the epoch number in the tracker to the end of this epoch\n    for block_num in (past_scanned_epoch.end() + 1) ..= block.end() {\n      let executed = loop {\n        match router.executed_commands(block_num).await {\n          Ok(executed) => break executed,\n          Err(e) => log::error!(\"couldn't get the executed commands in block {block_num}: {e}\"),\n        }\n        sleep(Duration::from_secs(10)).await;\n      };\n\n      for executed in executed {\n        let lookup = executed.nonce.to_le_bytes().to_vec();\n        if let Some((plan_id, eventuality)) = eventualities.map.get(&lookup) {\n          if let Some(command) =\n            SignedRouterCommand::new(&eventuality.0, eventuality.1.clone(), &executed.signature)\n          {\n            res.insert(*plan_id, (block_num.try_into().unwrap(), executed.tx_id, command));\n            eventualities.map.remove(&lookup);\n          }\n        }\n      }\n    }\n    eventualities.block_number = (block.start / 32).try_into().unwrap();\n\n    res\n  }\n\n  async fn needed_fee(\n    &self,\n    _block_number: usize,\n    inputs: &[Self::Output],\n    _payments: &[Payment<Self>],\n    _change: &Option<Self::Address>,\n  ) -> Result<Option<u64>, NetworkError> {\n    assert_eq!(inputs.len(), 0);\n    // Claim no fee is needed so we can perform amortization ourselves\n    Ok(Some(0))\n  }\n\n  async fn signable_transaction(\n    &self,\n    _block_number: usize,\n    _plan_id: &[u8; 32],\n    key: <Self::Curve as Ciphersuite>::G,\n    inputs: &[Self::Output],\n    payments: &[Payment<Self>],\n    change: &Option<Self::Address>,\n    scheduler_addendum: &<Self::Scheduler as SchedulerTrait<Self>>::Addendum,\n  ) -> Result<Option<(Self::SignableTransaction, Self::Eventuality)>, NetworkError> {\n    assert_eq!(inputs.len(), 0);\n    assert!(change.is_none());\n    let chain_id = self.provider.get_chain_id().await.map_err(|_| NetworkError::ConnectionError)?;\n\n    // TODO: Perform fee amortization (in scheduler?\n    // TODO: Make this function internal and have needed_fee properly return None as expected?\n    // TODO: signable_transaction is written as cannot return None if needed_fee returns Some\n    // TODO: Why can this return None at all if it isn't allowed to return None?\n\n    let command = match scheduler_addendum {\n      Addendum::Nonce(nonce) => RouterCommand::Execute {\n        chain_id: U256::try_from(chain_id).unwrap(),\n        nonce: U256::try_from(*nonce).unwrap(),\n        outs: payments\n          .iter()\n          .filter_map(|payment| {\n            Some(OutInstruction {\n              target: if let Some(data) = payment.data.as_ref() {\n                // This introspects the Call serialization format, expecting the first 20 bytes to\n                // be the address\n                // This avoids wasting the 20-bytes allocated within address\n                let full_data = [payment.address.0.as_slice(), data].concat();\n                let mut reader = full_data.as_slice();\n\n                let mut calls = vec![];\n                while !reader.is_empty() {\n                  calls.push(Call::read(&mut reader).ok()?)\n                }\n                // The above must have executed at least once since reader contains the address\n                assert_eq!(calls[0].to, payment.address.0);\n\n                OutInstructionTarget::Calls(calls)\n              } else {\n                OutInstructionTarget::Direct(payment.address.0)\n              },\n              value: {\n                assert_eq!(payment.balance.coin, ExternalCoin::Ether); // TODO\n                balance_to_ethereum_amount(payment.balance)\n              },\n            })\n          })\n          .collect(),\n      },\n      Addendum::RotateTo { nonce, new_key } => {\n        assert!(payments.is_empty());\n        RouterCommand::UpdateSeraiKey {\n          chain_id: U256::try_from(chain_id).unwrap(),\n          nonce: U256::try_from(*nonce).unwrap(),\n          key: PublicKey::new(*new_key).expect(\"new key wasn't a valid ETH public key\"),\n        }\n      }\n    };\n    Ok(Some((\n      command.clone(),\n      Eventuality(PublicKey::new(key).expect(\"key wasn't a valid ETH public key\"), command),\n    )))\n  }\n\n  async fn attempt_sign(\n    &self,\n    keys: ThresholdKeys<Self::Curve>,\n    transaction: Self::SignableTransaction,\n  ) -> Result<Self::TransactionMachine, NetworkError> {\n    Ok(\n      RouterCommandMachine::new(keys, transaction)\n        .expect(\"keys weren't usable to sign router commands\"),\n    )\n  }\n\n  async fn publish_completion(\n    &self,\n    completion: &<Self::Eventuality as EventualityTrait>::Completion,\n  ) -> Result<(), NetworkError> {\n    // Publish this to the dedicated TX server for a solver to actually publish\n    #[cfg(not(test))]\n    {\n      let mut msg = vec![];\n      match completion.command() {\n        RouterCommand::UpdateSeraiKey { nonce, .. } | RouterCommand::Execute { nonce, .. } => {\n          msg.extend(&u32::try_from(nonce).unwrap().to_le_bytes());\n        }\n      }\n      completion.write(&mut msg).unwrap();\n\n      let Ok(mut socket) = TcpStream::connect(&self.relayer_url).await else {\n        log::warn!(\"couldn't connect to the relayer server\");\n        Err(NetworkError::ConnectionError)?\n      };\n      let Ok(()) = socket.write_all(&u32::try_from(msg.len()).unwrap().to_le_bytes()).await else {\n        log::warn!(\"couldn't send the message's len to the relayer server\");\n        Err(NetworkError::ConnectionError)?\n      };\n      let Ok(()) = socket.write_all(&msg).await else {\n        log::warn!(\"couldn't write the message to the relayer server\");\n        Err(NetworkError::ConnectionError)?\n      };\n      if socket.read_u8().await.ok() != Some(1) {\n        log::warn!(\"didn't get the ack from the relayer server\");\n        Err(NetworkError::ConnectionError)?;\n      }\n\n      Ok(())\n    }\n\n    // Publish this using a dummy account we fund with magic RPC commands\n    #[cfg(test)]\n    {\n      let router = self.router().await;\n      let router = router.as_ref().unwrap();\n\n      let mut tx = match completion.command() {\n        RouterCommand::UpdateSeraiKey { key, .. } => {\n          router.update_serai_key(key, completion.signature())\n        }\n        RouterCommand::Execute { outs, .. } => router.execute(\n          &outs.iter().cloned().map(Into::into).collect::<Vec<_>>(),\n          completion.signature(),\n        ),\n      };\n      tx.gas_limit = 1_000_000u64;\n      tx.gas_price = 1_000_000_000u64.into();\n      let tx = ethereum_serai::crypto::deterministically_sign(&tx);\n\n      if self.provider.get_transaction_by_hash(*tx.hash()).await.unwrap().is_none() {\n        self\n          .provider\n          .raw_request::<_, ()>(\n            \"anvil_setBalance\".into(),\n            [\n              tx.recover_signer().unwrap().to_string(),\n              (U256::from(tx.tx().gas_limit) * U256::from(tx.tx().gas_price)).to_string(),\n            ],\n          )\n          .await\n          .unwrap();\n\n        let (tx, sig, _) = tx.into_parts();\n        let mut bytes = vec![];\n        tx.encode_with_signature_fields(&sig, &mut bytes);\n        let pending_tx = self.provider.send_raw_transaction(&bytes).await.unwrap();\n        self.mine_block().await;\n        assert!(pending_tx.get_receipt().await.unwrap().status());\n      }\n\n      Ok(())\n    }\n  }\n\n  async fn confirm_completion(\n    &self,\n    eventuality: &Self::Eventuality,\n    claim: &<Self::Eventuality as EventualityTrait>::Claim,\n  ) -> Result<Option<<Self::Eventuality as EventualityTrait>::Completion>, NetworkError> {\n    Ok(SignedRouterCommand::new(&eventuality.0, eventuality.1.clone(), &claim.signature))\n  }\n\n  #[cfg(test)]\n  async fn get_block_number(&self, id: &<Self::Block as Block<Self>>::Id) -> usize {\n    self\n      .provider\n      .get_block(B256::from(*id).into(), BlockTransactionsKind::Hashes)\n      .await\n      .unwrap()\n      .unwrap()\n      .header\n      .number\n      .try_into()\n      .unwrap()\n  }\n\n  #[cfg(test)]\n  async fn check_eventuality_by_claim(\n    &self,\n    eventuality: &Self::Eventuality,\n    claim: &<Self::Eventuality as EventualityTrait>::Claim,\n  ) -> bool {\n    SignedRouterCommand::new(&eventuality.0, eventuality.1.clone(), &claim.signature).is_some()\n  }\n\n  #[cfg(test)]\n  async fn get_transaction_by_eventuality(\n    &self,\n    block: usize,\n    eventuality: &Self::Eventuality,\n  ) -> Self::Transaction {\n    // We mine 96 blocks to ensure the 32 blocks relevant are finalized\n    // Back-check the prior two epochs in response to this\n    // TODO: Review why this is sub(3) and not sub(2)\n    for block in block.saturating_sub(3) ..= block {\n      match eventuality.1 {\n        RouterCommand::UpdateSeraiKey { nonce, .. } | RouterCommand::Execute { nonce, .. } => {\n          let router = self.router().await;\n          let router = router.as_ref().unwrap();\n\n          let block = u64::try_from(block).unwrap();\n          let filter = router\n            .key_updated_filter()\n            .from_block(block * 32)\n            .to_block(((block + 1) * 32) - 1)\n            .topic1(nonce);\n          let logs = self.provider.get_logs(&filter).await.unwrap();\n          if let Some(log) = logs.first() {\n            return self\n              .provider\n              .get_transaction_by_hash(log.clone().transaction_hash.unwrap())\n              .await\n              .unwrap()\n              .unwrap();\n          };\n\n          let filter = router\n            .executed_filter()\n            .from_block(block * 32)\n            .to_block(((block + 1) * 32) - 1)\n            .topic1(nonce);\n          let logs = self.provider.get_logs(&filter).await.unwrap();\n          if logs.is_empty() {\n            continue;\n          }\n          return self\n            .provider\n            .get_transaction_by_hash(logs[0].transaction_hash.unwrap())\n            .await\n            .unwrap()\n            .unwrap();\n        }\n      }\n    }\n    panic!(\"couldn't find completion in any three of checked blocks\");\n  }\n\n  #[cfg(test)]\n  async fn mine_block(&self) {\n    self.provider.raw_request::<_, ()>(\"anvil_mine\".into(), [96]).await.unwrap();\n  }\n\n  #[cfg(test)]\n  async fn test_send(&self, send_to: Self::Address) -> Self::Block {\n    use rand_core::OsRng;\n    use ciphersuite::group::ff::Field;\n    use ethereum_serai::alloy::sol_types::SolCall;\n\n    let key = <Secp256k1 as Ciphersuite>::F::random(&mut OsRng);\n    let address = ethereum_serai::crypto::address(&(Secp256k1::generator() * key));\n\n    // Set a 1.1 ETH balance\n    self\n      .provider\n      .raw_request::<_, ()>(\n        \"anvil_setBalance\".into(),\n        [Address(address).to_string(), \"1100000000000000000\".into()],\n      )\n      .await\n      .unwrap();\n\n    let value = U256::from_str_radix(\"1000000000000000000\", 10).unwrap();\n    let tx = ethereum_serai::alloy::consensus::TxLegacy {\n      chain_id: None,\n      nonce: 0,\n      gas_price: 1_000_000_000u128,\n      gas_limit: 200_000,\n      to: ethereum_serai::alloy::primitives::TxKind::Call(send_to.0.into()),\n      // 1 ETH\n      value,\n      input: ethereum_serai::router::abi::inInstructionCall::new((\n        [0; 20].into(),\n        value,\n        vec![].into(),\n      ))\n      .abi_encode()\n      .into(),\n    };\n\n    use ethereum_serai::alloy::{\n      primitives::{Parity, Signature},\n      consensus::SignableTransaction,\n    };\n    let sig = k256::ecdsa::SigningKey::from(k256::elliptic_curve::NonZeroScalar::new(key).unwrap())\n      .sign_prehash_recoverable(tx.signature_hash().as_ref())\n      .unwrap();\n\n    let mut bytes = vec![];\n    let parity = Parity::NonEip155(Parity::from(sig.1).y_parity());\n    tx.encode_with_signature_fields(&Signature::from(sig).with_parity(parity), &mut bytes);\n    let pending_tx = self.provider.send_raw_transaction(&bytes).await.ok().unwrap();\n\n    // Mine an epoch containing this TX\n    self.mine_block().await;\n    assert!(pending_tx.get_receipt().await.unwrap().status());\n    // Yield the freshly mined block\n    self.get_block(self.get_latest_block_number().await.unwrap()).await.unwrap()\n  }\n}\n"
  },
  {
    "path": "processor/src/networks/mod.rs",
    "content": "use core::{fmt::Debug, time::Duration};\nuse std::{io, collections::HashMap};\n\nuse async_trait::async_trait;\nuse thiserror::Error;\n\nuse frost::{\n  curve::{Ciphersuite, Curve},\n  ThresholdKeys,\n  sign::PreprocessMachine,\n};\n\nuse serai_client::primitives::{ExternalBalance, ExternalNetworkId};\n\nuse log::error;\n\nuse tokio::time::sleep;\n\n#[cfg(feature = \"bitcoin\")]\npub mod bitcoin;\n#[cfg(feature = \"bitcoin\")]\npub use self::bitcoin::Bitcoin;\n\n#[cfg(feature = \"ethereum\")]\npub mod ethereum;\n#[cfg(feature = \"ethereum\")]\npub use ethereum::Ethereum;\n\n#[cfg(feature = \"monero\")]\npub mod monero;\n#[cfg(feature = \"monero\")]\npub use monero::Monero;\n\nuse crate::{Payment, Plan, multisigs::scheduler::Scheduler};\n\n#[derive(Clone, Copy, Error, Debug)]\npub enum NetworkError {\n  #[error(\"failed to connect to network daemon\")]\n  ConnectionError,\n}\n\npub trait Id:\n  Send + Sync + Clone + Default + PartialEq + AsRef<[u8]> + AsMut<[u8]> + Debug\n{\n}\nimpl<I: Send + Sync + Clone + Default + PartialEq + AsRef<[u8]> + AsMut<[u8]> + Debug> Id for I {}\n\n#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]\npub enum OutputType {\n  // Needs to be processed/sent up to Substrate\n  External,\n\n  // Given a known output set, and a known series of outbound transactions, we should be able to\n  // form a completely deterministic schedule S. The issue is when S has TXs which spend prior TXs\n  // in S (which is needed for our logarithmic scheduling). In order to have the descendant TX, say\n  // S[1], build off S[0], we need to observe when S[0] is included on-chain.\n  //\n  // We cannot.\n  //\n  // Monero (and other privacy coins) do not expose their UTXO graphs. Even if we know how to\n  // create S[0], and the actual payment info behind it, we cannot observe it on the blockchain\n  // unless we participated in creating it. Locking the entire schedule, when we cannot sign for\n  // the entire schedule at once, to a single signing set isn't feasible.\n  //\n  // While any member of the active signing set can provide data enabling other signers to\n  // participate, it's several KB of data which we then have to code communication for.\n  // The other option is to simply not observe S[0]. Instead, observe a TX with an identical output\n  // to the one in S[0] we intended to use for S[1]. It's either from S[0], or Eve, a malicious\n  // actor, has sent us a forged TX which is... equally as usable? so who cares?\n  //\n  // The only issue is if we have multiple outputs on-chain with identical amounts and purposes.\n  // Accordingly, when the scheduler makes a plan for when a specific output is available, it\n  // shouldn't write that plan. It should *push* that plan to a queue of plans to perform when\n  // instances of that output occur.\n  Branch,\n\n  // Should be added to the available UTXO pool with no further action\n  Change,\n\n  // Forwarded output from the prior multisig\n  Forwarded,\n}\n\nimpl OutputType {\n  fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {\n    writer.write_all(&[match self {\n      OutputType::External => 0,\n      OutputType::Branch => 1,\n      OutputType::Change => 2,\n      OutputType::Forwarded => 3,\n    }])\n  }\n\n  fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {\n    let mut byte = [0; 1];\n    reader.read_exact(&mut byte)?;\n    Ok(match byte[0] {\n      0 => OutputType::External,\n      1 => OutputType::Branch,\n      2 => OutputType::Change,\n      3 => OutputType::Forwarded,\n      _ => Err(io::Error::other(\"invalid OutputType\"))?,\n    })\n  }\n}\n\npub trait Output<N: Network>: Send + Sync + Sized + Clone + PartialEq + Eq + Debug {\n  type Id: 'static + Id;\n\n  fn kind(&self) -> OutputType;\n\n  fn id(&self) -> Self::Id;\n  fn tx_id(&self) -> <N::Transaction as Transaction<N>>::Id; // TODO: Review use of\n  fn key(&self) -> <N::Curve as Ciphersuite>::G;\n\n  fn presumed_origin(&self) -> Option<N::Address>;\n\n  fn balance(&self) -> ExternalBalance;\n  fn data(&self) -> &[u8];\n\n  fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()>;\n  fn read<R: io::Read>(reader: &mut R) -> io::Result<Self>;\n}\n\n#[async_trait]\npub trait Transaction<N: Network>: Send + Sync + Sized + Clone + PartialEq + Debug {\n  type Id: 'static + Id;\n  fn id(&self) -> Self::Id;\n  // TODO: Move to ExternalBalance\n  #[cfg(test)]\n  async fn fee(&self, network: &N) -> u64;\n}\n\npub trait SignableTransaction: Send + Sync + Clone + Debug {\n  // TODO: Move to ExternalBalance\n  fn fee(&self) -> u64;\n}\n\npub trait Eventuality: Send + Sync + Clone + PartialEq + Debug {\n  type Claim: Send + Sync + Clone + PartialEq + Default + AsRef<[u8]> + AsMut<[u8]> + Debug;\n  type Completion: Send + Sync + Clone + PartialEq + Debug;\n\n  fn lookup(&self) -> Vec<u8>;\n\n  fn read<R: io::Read>(reader: &mut R) -> io::Result<Self>;\n  fn serialize(&self) -> Vec<u8>;\n\n  fn claim(completion: &Self::Completion) -> Self::Claim;\n\n  // TODO: Make a dedicated Completion trait\n  fn serialize_completion(completion: &Self::Completion) -> Vec<u8>;\n  fn read_completion<R: io::Read>(reader: &mut R) -> io::Result<Self::Completion>;\n}\n\n#[derive(Clone, PartialEq, Eq, Debug)]\npub struct EventualitiesTracker<E: Eventuality> {\n  // Lookup property (input, nonce, TX extra...) -> (plan ID, eventuality)\n  map: HashMap<Vec<u8>, ([u8; 32], E)>,\n  // Block number we've scanned these eventualities too\n  block_number: usize,\n}\n\nimpl<E: Eventuality> EventualitiesTracker<E> {\n  pub fn new() -> Self {\n    EventualitiesTracker { map: HashMap::new(), block_number: usize::MAX }\n  }\n\n  pub fn register(&mut self, block_number: usize, id: [u8; 32], eventuality: E) {\n    log::info!(\"registering eventuality for {}\", hex::encode(id));\n\n    let lookup = eventuality.lookup();\n    if self.map.contains_key(&lookup) {\n      panic!(\"registering an eventuality multiple times or lookup collision\");\n    }\n    self.map.insert(lookup, (id, eventuality));\n    // If our self tracker already went past this block number, set it back\n    self.block_number = self.block_number.min(block_number);\n  }\n\n  pub fn drop(&mut self, id: [u8; 32]) {\n    // O(n) due to the lack of a reverse lookup\n    let mut found_key = None;\n    for (key, value) in &self.map {\n      if value.0 == id {\n        found_key = Some(key.clone());\n        break;\n      }\n    }\n\n    if let Some(key) = found_key {\n      self.map.remove(&key);\n    }\n  }\n}\n\nimpl<E: Eventuality> Default for EventualitiesTracker<E> {\n  fn default() -> Self {\n    Self::new()\n  }\n}\n\n#[async_trait]\npub trait Block<N: Network>: Send + Sync + Sized + Clone + Debug {\n  // This is currently bounded to being 32 bytes.\n  type Id: 'static + Id;\n  fn id(&self) -> Self::Id;\n  fn parent(&self) -> Self::Id;\n  /// The monotonic network time at this block.\n  ///\n  /// This call is presumed to be expensive and should only be called sparingly.\n  async fn time(&self, rpc: &N) -> u64;\n}\n\n// The post-fee value of an expected branch.\npub struct PostFeeBranch {\n  pub expected: u64,\n  pub actual: Option<u64>,\n}\n\n// Return the PostFeeBranches needed when dropping a transaction\nfn drop_branches<N: Network>(\n  key: <N::Curve as Ciphersuite>::G,\n  payments: &[Payment<N>],\n) -> Vec<PostFeeBranch> {\n  let mut branch_outputs = vec![];\n  for payment in payments {\n    if Some(&payment.address) == N::branch_address(key).as_ref() {\n      branch_outputs.push(PostFeeBranch { expected: payment.balance.amount.0, actual: None });\n    }\n  }\n  branch_outputs\n}\n\npub struct PreparedSend<N: Network> {\n  /// None for the transaction if the SignableTransaction was dropped due to lack of value.\n  pub tx: Option<(N::SignableTransaction, N::Eventuality)>,\n  pub post_fee_branches: Vec<PostFeeBranch>,\n  /// The updated operating costs after preparing this transaction.\n  pub operating_costs: u64,\n}\n\n#[async_trait]\npub trait Network: 'static + Send + Sync + Clone + PartialEq + Debug {\n  /// The elliptic curve used for this network.\n  type Curve: Curve;\n\n  /// The type representing the transaction for this network.\n  type Transaction: Transaction<Self>; // TODO: Review use of\n  /// The type representing the block for this network.\n  type Block: Block<Self>;\n\n  /// The type containing all information on a scanned output.\n  // This is almost certainly distinct from the network's native output type.\n  type Output: Output<Self>;\n  /// The type containing all information on a planned transaction, waiting to be signed.\n  type SignableTransaction: SignableTransaction;\n  /// The type containing all information to check if a plan was completed.\n  ///\n  /// This must be binding to both the outputs expected and the plan ID.\n  type Eventuality: Eventuality;\n  /// The FROST machine to sign a transaction.\n  type TransactionMachine: PreprocessMachine<\n    Signature = <Self::Eventuality as Eventuality>::Completion,\n  >;\n\n  /// The scheduler for this network.\n  type Scheduler: Scheduler<Self>;\n\n  /// The type representing an address.\n  // This should NOT be a String, yet a tailored type representing an efficient binary encoding,\n  // as detailed in the integration documentation.\n  type Address: Send\n    + Sync\n    + Clone\n    + PartialEq\n    + Eq\n    + Debug\n    + ToString\n    + TryInto<Vec<u8>>\n    + TryFrom<Vec<u8>>;\n\n  /// Network ID for this network.\n  const NETWORK: ExternalNetworkId;\n  /// String ID for this network.\n  const ID: &'static str;\n  /// The estimated amount of time a block will take.\n  const ESTIMATED_BLOCK_TIME_IN_SECONDS: usize;\n  /// The amount of confirmations required to consider a block 'final'.\n  const CONFIRMATIONS: usize;\n  /// The maximum amount of outputs which will fit in a TX.\n  /// This should be equal to MAX_INPUTS unless one is specifically limited.\n  /// A TX with MAX_INPUTS and MAX_OUTPUTS must not exceed the max size.\n  const MAX_OUTPUTS: usize;\n\n  /// Minimum output value which will be handled.\n  ///\n  /// For any received output, there's the cost to spend the output. This value MUST exceed the\n  /// cost to spend said output, and should by a notable margin (not just 2x, yet an order of\n  /// magnitude).\n  // TODO: Dust needs to be diversified per ExternalCoin\n  const DUST: u64;\n\n  /// The cost to perform input aggregation with a 2-input 1-output TX.\n  const COST_TO_AGGREGATE: u64;\n\n  /// Tweak keys for this network.\n  fn tweak_keys(key: &mut ThresholdKeys<Self::Curve>);\n\n  /// Address for the given group key to receive external coins to.\n  #[cfg(test)]\n  async fn external_address(&self, key: <Self::Curve as Ciphersuite>::G) -> Self::Address;\n  /// Address for the given group key to use for scheduled branches.\n  fn branch_address(key: <Self::Curve as Ciphersuite>::G) -> Option<Self::Address>;\n  /// Address for the given group key to use for change.\n  fn change_address(key: <Self::Curve as Ciphersuite>::G) -> Option<Self::Address>;\n  /// Address for forwarded outputs from prior multisigs.\n  ///\n  /// forward_address must only return None if explicit forwarding isn't necessary.\n  fn forward_address(key: <Self::Curve as Ciphersuite>::G) -> Option<Self::Address>;\n\n  /// Get the latest block's number.\n  async fn get_latest_block_number(&self) -> Result<usize, NetworkError>;\n  /// Get a block by its number.\n  async fn get_block(&self, number: usize) -> Result<Self::Block, NetworkError>;\n\n  /// Get the latest block's number, retrying until success.\n  async fn get_latest_block_number_with_retries(&self) -> usize {\n    loop {\n      match self.get_latest_block_number().await {\n        Ok(number) => {\n          return number;\n        }\n        Err(e) => {\n          error!(\n            \"couldn't get the latest block number in the with retry get_latest_block_number: {e:?}\",\n          );\n          sleep(Duration::from_secs(10)).await;\n        }\n      }\n    }\n  }\n\n  /// Get a block, retrying until success.\n  async fn get_block_with_retries(&self, block_number: usize) -> Self::Block {\n    loop {\n      match self.get_block(block_number).await {\n        Ok(block) => {\n          return block;\n        }\n        Err(e) => {\n          error!(\"couldn't get block {block_number} in the with retry get_block: {:?}\", e);\n          sleep(Duration::from_secs(10)).await;\n        }\n      }\n    }\n  }\n\n  /// Get the outputs within a block for a specific key.\n  async fn get_outputs(\n    &self,\n    block: &Self::Block,\n    key: <Self::Curve as Ciphersuite>::G,\n  ) -> Vec<Self::Output>;\n\n  /// Get the registered eventualities completed within this block, and any prior blocks which\n  /// registered eventualities may have been completed in.\n  ///\n  /// This may panic if not fed a block greater than the tracker's block number.\n  ///\n  /// Plan ID -> (block number, TX ID, completion)\n  // TODO: get_eventuality_completions_internal + provided get_eventuality_completions for common\n  // code\n  // TODO: Consider having this return the Transaction + the Completion?\n  // Or Transaction with extract_completion?\n  async fn get_eventuality_completions(\n    &self,\n    eventualities: &mut EventualitiesTracker<Self::Eventuality>,\n    block: &Self::Block,\n  ) -> HashMap<\n    [u8; 32],\n    (\n      usize,\n      <Self::Transaction as Transaction<Self>>::Id,\n      <Self::Eventuality as Eventuality>::Completion,\n    ),\n  >;\n\n  /// Returns the needed fee to fulfill this Plan at this fee rate.\n  ///\n  /// Returns None if this Plan isn't fulfillable (such as when the fee exceeds the input value).\n  async fn needed_fee(\n    &self,\n    block_number: usize,\n    inputs: &[Self::Output],\n    payments: &[Payment<Self>],\n    change: &Option<Self::Address>,\n  ) -> Result<Option<u64>, NetworkError>;\n\n  /// Create a SignableTransaction for the given Plan.\n  ///\n  /// The expected flow is:\n  /// 1) Call needed_fee\n  /// 2) If the Plan is fulfillable, amortize the fee\n  /// 3) Call signable_transaction *which MUST NOT return None if the above was done properly*\n  ///\n  /// This takes a destructured Plan as some of these arguments are malleated from the original\n  /// Plan.\n  // TODO: Explicit AmortizedPlan?\n  #[allow(clippy::too_many_arguments)]\n  async fn signable_transaction(\n    &self,\n    block_number: usize,\n    plan_id: &[u8; 32],\n    key: <Self::Curve as Ciphersuite>::G,\n    inputs: &[Self::Output],\n    payments: &[Payment<Self>],\n    change: &Option<Self::Address>,\n    scheduler_addendum: &<Self::Scheduler as Scheduler<Self>>::Addendum,\n  ) -> Result<Option<(Self::SignableTransaction, Self::Eventuality)>, NetworkError>;\n\n  /// Prepare a SignableTransaction for a transaction.\n  ///\n  /// This must not persist anything as we will prepare Plans we never intend to execute.\n  async fn prepare_send(\n    &self,\n    block_number: usize,\n    plan: Plan<Self>,\n    operating_costs: u64,\n  ) -> Result<PreparedSend<Self>, NetworkError> {\n    // Sanity check this has at least one output planned\n    assert!((!plan.payments.is_empty()) || plan.change.is_some());\n\n    let plan_id = plan.id();\n    let Plan { key, inputs, mut payments, change, scheduler_addendum } = plan;\n    let theoretical_change_amount = if change.is_some() {\n      inputs.iter().map(|input| input.balance().amount.0).sum::<u64>() -\n        payments.iter().map(|payment| payment.balance.amount.0).sum::<u64>()\n    } else {\n      0\n    };\n\n    let Some(tx_fee) = self.needed_fee(block_number, &inputs, &payments, &change).await? else {\n      // This Plan is not fulfillable\n      // TODO: Have Plan explicitly distinguish payments and branches in two separate Vecs?\n      return Ok(PreparedSend {\n        tx: None,\n        // Have all of its branches dropped\n        post_fee_branches: drop_branches(key, &payments),\n        // This plan expects a change output valued at sum(inputs) - sum(outputs)\n        // Since we can no longer create this change output, it becomes an operating cost\n        // TODO: Look at input restoration to reduce this operating cost\n        operating_costs: operating_costs +\n          if change.is_some() { theoretical_change_amount } else { 0 },\n      });\n    };\n\n    // Amortize the fee over the plan's payments\n    let (post_fee_branches, mut operating_costs) = (|| {\n      // If we're creating a change output, letting us recoup coins, amortize the operating costs\n      // as well\n      let total_fee = tx_fee + if change.is_some() { operating_costs } else { 0 };\n\n      let original_outputs = payments.iter().map(|payment| payment.balance.amount.0).sum::<u64>();\n      // If this isn't enough for the total fee, drop and move on\n      if original_outputs < total_fee {\n        let mut remaining_operating_costs = operating_costs;\n        if change.is_some() {\n          // Operating costs increase by the TX fee\n          remaining_operating_costs += tx_fee;\n          // Yet decrease by the payments we managed to drop\n          remaining_operating_costs = remaining_operating_costs.saturating_sub(original_outputs);\n        }\n        return (drop_branches(key, &payments), remaining_operating_costs);\n      }\n\n      let initial_payment_amounts =\n        payments.iter().map(|payment| payment.balance.amount.0).collect::<Vec<_>>();\n\n      // Amortize the transaction fee across outputs\n      let mut remaining_fee = total_fee;\n      // Run as many times as needed until we can successfully subtract this fee\n      while remaining_fee != 0 {\n        // This shouldn't be a / by 0 as these payments have enough value to cover the fee\n        let this_iter_fee = remaining_fee / u64::try_from(payments.len()).unwrap();\n        let mut overage = remaining_fee % u64::try_from(payments.len()).unwrap();\n        for payment in &mut payments {\n          let this_payment_fee = this_iter_fee + overage;\n          // Only subtract the overage once\n          overage = 0;\n\n          let subtractable = payment.balance.amount.0.min(this_payment_fee);\n          remaining_fee -= subtractable;\n          payment.balance.amount.0 -= subtractable;\n        }\n      }\n\n      // If any payment is now below the dust threshold, set its value to 0 so it'll be dropped\n      for payment in &mut payments {\n        if payment.balance.amount.0 < Self::DUST {\n          payment.balance.amount.0 = 0;\n        }\n      }\n\n      // Note the branch outputs' new values\n      let mut branch_outputs = vec![];\n      for (initial_amount, payment) in initial_payment_amounts.into_iter().zip(&payments) {\n        if Some(&payment.address) == Self::branch_address(key).as_ref() {\n          branch_outputs.push(PostFeeBranch {\n            expected: initial_amount,\n            actual: if payment.balance.amount.0 == 0 {\n              None\n            } else {\n              Some(payment.balance.amount.0)\n            },\n          });\n        }\n      }\n\n      // Drop payments now worth 0\n      payments = payments\n        .drain(..)\n        .filter(|payment| {\n          if payment.balance.amount.0 != 0 {\n            true\n          } else {\n            log::debug!(\"dropping dust payment from plan {}\", hex::encode(plan_id));\n            false\n          }\n        })\n        .collect();\n\n      // Sanity check the fee was successfully amortized\n      let new_outputs = payments.iter().map(|payment| payment.balance.amount.0).sum::<u64>();\n      assert!((new_outputs + total_fee) <= original_outputs);\n\n      (\n        branch_outputs,\n        if change.is_none() {\n          // If the change is None, this had no effect on the operating costs\n          operating_costs\n        } else {\n          // Since the change is some, and we successfully amortized, the operating costs were\n          // recouped\n          0\n        },\n      )\n    })();\n\n    let Some(tx) = self\n      .signable_transaction(\n        block_number,\n        &plan_id,\n        key,\n        &inputs,\n        &payments,\n        &change,\n        &scheduler_addendum,\n      )\n      .await?\n    else {\n      panic!(\n        \"{}. {}: {}, {}: {:?}, {}: {:?}, {}: {:?}, {}: {}, {}: {:?}\",\n        \"signable_transaction returned None for a TX we prior successfully calculated the fee for\",\n        \"id\",\n        hex::encode(plan_id),\n        \"inputs\",\n        inputs,\n        \"post-amortization payments\",\n        payments,\n        \"change\",\n        change,\n        \"successfully amoritized fee\",\n        tx_fee,\n        \"scheduler's addendum\",\n        scheduler_addendum,\n      )\n    };\n\n    if change.is_some() {\n      let on_chain_expected_change =\n        inputs.iter().map(|input| input.balance().amount.0).sum::<u64>() -\n          payments.iter().map(|payment| payment.balance.amount.0).sum::<u64>() -\n          tx_fee;\n      // If the change value is less than the dust threshold, it becomes an operating cost\n      // This may be slightly inaccurate as dropping payments may reduce the fee, raising the\n      // change above dust\n      // That's fine since it'd have to be in a very precarious state AND then it's over-eager in\n      // tabulating costs\n      if on_chain_expected_change < Self::DUST {\n        operating_costs += theoretical_change_amount;\n      }\n    }\n\n    Ok(PreparedSend { tx: Some(tx), post_fee_branches, operating_costs })\n  }\n\n  /// Attempt to sign a SignableTransaction.\n  async fn attempt_sign(\n    &self,\n    keys: ThresholdKeys<Self::Curve>,\n    transaction: Self::SignableTransaction,\n  ) -> Result<Self::TransactionMachine, NetworkError>;\n\n  /// Publish a completion.\n  async fn publish_completion(\n    &self,\n    completion: &<Self::Eventuality as Eventuality>::Completion,\n  ) -> Result<(), NetworkError>;\n\n  /// Confirm a plan was completed by the specified transaction, per our bounds.\n  ///\n  /// Returns Err if there was an error with the confirmation methodology.\n  /// Returns Ok(None) if this is not a valid completion.\n  /// Returns Ok(Some(_)) with the completion if it's valid.\n  async fn confirm_completion(\n    &self,\n    eventuality: &Self::Eventuality,\n    claim: &<Self::Eventuality as Eventuality>::Claim,\n  ) -> Result<Option<<Self::Eventuality as Eventuality>::Completion>, NetworkError>;\n\n  /// Get a block's number by its ID.\n  #[cfg(test)]\n  async fn get_block_number(&self, id: &<Self::Block as Block<Self>>::Id) -> usize;\n\n  /// Check an Eventuality is fulfilled by a claim.\n  #[cfg(test)]\n  async fn check_eventuality_by_claim(\n    &self,\n    eventuality: &Self::Eventuality,\n    claim: &<Self::Eventuality as Eventuality>::Claim,\n  ) -> bool;\n\n  /// Get a transaction by the Eventuality it completes.\n  #[cfg(test)]\n  async fn get_transaction_by_eventuality(\n    &self,\n    block: usize,\n    eventuality: &Self::Eventuality,\n  ) -> Self::Transaction;\n\n  #[cfg(test)]\n  async fn mine_block(&self);\n\n  /// Sends to the specified address.\n  /// Additionally mines enough blocks so that the TX is past the confirmation depth.\n  #[cfg(test)]\n  async fn test_send(&self, key: Self::Address) -> Self::Block;\n}\n\npub trait UtxoNetwork: Network {\n  /// The maximum amount of inputs which will fit in a TX.\n  /// This should be equal to MAX_OUTPUTS unless one is specifically limited.\n  /// A TX with MAX_INPUTS and MAX_OUTPUTS must not exceed the max size.\n  const MAX_INPUTS: usize;\n}\n"
  },
  {
    "path": "processor/src/networks/monero.rs",
    "content": "use std::{time::Duration, collections::HashMap, io};\n\nuse async_trait::async_trait;\n\nuse zeroize::Zeroizing;\n\nuse rand_core::SeedableRng;\nuse rand_chacha::ChaCha20Rng;\n\nuse transcript::{Transcript, RecommendedTranscript};\n\nuse ciphersuite::group::{ff::Field, Group};\nuse dalek_ff_group::{Scalar, EdwardsPoint};\nuse frost::{curve::Ed25519, ThresholdKeys};\n\nuse monero_simple_request_rpc::SimpleRequestRpc;\nuse monero_wallet::{\n  ringct::RctType,\n  transaction::Transaction,\n  block::Block,\n  rpc::{FeeRate, RpcError, Rpc},\n  address::{Network as MoneroNetwork, SubaddressIndex},\n  ViewPair, GuaranteedViewPair, WalletOutput, OutputWithDecoys, GuaranteedScanner,\n  send::{\n    SendError, Change, SignableTransaction as MSignableTransaction, Eventuality, TransactionMachine,\n  },\n};\n#[cfg(test)]\nuse monero_wallet::Scanner;\n\nuse tokio::time::sleep;\n\npub use serai_client::{\n  primitives::{MAX_DATA_LEN, ExternalCoin, ExternalNetworkId, Amount, ExternalBalance},\n  networks::monero::Address,\n};\n\nuse crate::{\n  Payment, additional_key,\n  networks::{\n    NetworkError, Block as BlockTrait, OutputType, Output as OutputTrait,\n    Transaction as TransactionTrait, SignableTransaction as SignableTransactionTrait,\n    Eventuality as EventualityTrait, EventualitiesTracker, Network, UtxoNetwork,\n  },\n  multisigs::scheduler::utxo::Scheduler,\n};\n\n#[derive(Clone, PartialEq, Eq, Debug)]\npub struct Output(WalletOutput);\n\nconst EXTERNAL_SUBADDRESS: Option<SubaddressIndex> = SubaddressIndex::new(0, 0);\nconst BRANCH_SUBADDRESS: Option<SubaddressIndex> = SubaddressIndex::new(1, 0);\nconst CHANGE_SUBADDRESS: Option<SubaddressIndex> = SubaddressIndex::new(2, 0);\nconst FORWARD_SUBADDRESS: Option<SubaddressIndex> = SubaddressIndex::new(3, 0);\n\nimpl OutputTrait<Monero> for Output {\n  // While we could use (tx, o), using the key ensures we won't be susceptible to the burning bug.\n  // While we already are immune, thanks to using featured address, this doesn't hurt and is\n  // technically more efficient.\n  type Id = [u8; 32];\n\n  fn kind(&self) -> OutputType {\n    match self.0.subaddress() {\n      EXTERNAL_SUBADDRESS => OutputType::External,\n      BRANCH_SUBADDRESS => OutputType::Branch,\n      CHANGE_SUBADDRESS => OutputType::Change,\n      FORWARD_SUBADDRESS => OutputType::Forwarded,\n      _ => panic!(\"unrecognized address was scanned for\"),\n    }\n  }\n\n  fn id(&self) -> Self::Id {\n    self.0.key().compress().to_bytes()\n  }\n\n  fn tx_id(&self) -> [u8; 32] {\n    self.0.transaction()\n  }\n\n  fn key(&self) -> EdwardsPoint {\n    EdwardsPoint(self.0.key() - (EdwardsPoint::generator().0 * self.0.key_offset()))\n  }\n\n  fn presumed_origin(&self) -> Option<Address> {\n    None\n  }\n\n  fn balance(&self) -> ExternalBalance {\n    ExternalBalance { coin: ExternalCoin::Monero, amount: Amount(self.0.commitment().amount) }\n  }\n\n  fn data(&self) -> &[u8] {\n    let Some(data) = self.0.arbitrary_data().first() else { return &[] };\n    // If the data is too large, prune it\n    // This should cause decoding the instruction to fail, and trigger a refund as appropriate\n    if data.len() > usize::try_from(MAX_DATA_LEN).unwrap() {\n      return &[];\n    }\n    data\n  }\n\n  fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {\n    self.0.write(writer)?;\n    Ok(())\n  }\n\n  fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {\n    Ok(Output(WalletOutput::read(reader)?))\n  }\n}\n\n// TODO: Consider ([u8; 32], TransactionPruned)\n#[async_trait]\nimpl TransactionTrait<Monero> for Transaction {\n  type Id = [u8; 32];\n  fn id(&self) -> Self::Id {\n    self.hash()\n  }\n\n  #[cfg(test)]\n  async fn fee(&self, _: &Monero) -> u64 {\n    match self {\n      Transaction::V1 { .. } => panic!(\"v1 TX in test-only function\"),\n      Transaction::V2 { ref proofs, .. } => proofs.as_ref().unwrap().base.fee,\n    }\n  }\n}\n\nimpl EventualityTrait for Eventuality {\n  type Claim = [u8; 32];\n  type Completion = Transaction;\n\n  // Use the TX extra to look up potential matches\n  // While anyone can forge this, a transaction with distinct outputs won't actually match\n  // Extra includess the one time keys which are derived from the plan ID, so a collision here is a\n  // hash collision\n  fn lookup(&self) -> Vec<u8> {\n    self.extra()\n  }\n\n  fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {\n    Eventuality::read(reader)\n  }\n  fn serialize(&self) -> Vec<u8> {\n    self.serialize()\n  }\n\n  fn claim(tx: &Transaction) -> [u8; 32] {\n    tx.id()\n  }\n  fn serialize_completion(completion: &Transaction) -> Vec<u8> {\n    completion.serialize()\n  }\n  fn read_completion<R: io::Read>(reader: &mut R) -> io::Result<Transaction> {\n    Transaction::read(reader)\n  }\n}\n\n#[derive(Clone, Debug)]\npub struct SignableTransaction(MSignableTransaction);\nimpl SignableTransactionTrait for SignableTransaction {\n  fn fee(&self) -> u64 {\n    self.0.necessary_fee()\n  }\n}\n\n#[async_trait]\nimpl BlockTrait<Monero> for Block {\n  type Id = [u8; 32];\n  fn id(&self) -> Self::Id {\n    self.hash()\n  }\n\n  fn parent(&self) -> Self::Id {\n    self.header.previous\n  }\n\n  async fn time(&self, rpc: &Monero) -> u64 {\n    // Constant from Monero\n    const BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW: usize = 60;\n\n    // If Monero doesn't have enough blocks to build a window, it doesn't define a network time\n    if (self.number().unwrap() + 1) < BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW {\n      // Use the block number as the time\n      return u64::try_from(self.number().unwrap()).unwrap();\n    }\n\n    let mut timestamps = vec![self.header.timestamp];\n    let mut parent = self.parent();\n    while timestamps.len() < BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW {\n      let mut parent_block;\n      while {\n        parent_block = rpc.rpc.get_block(parent).await;\n        parent_block.is_err()\n      } {\n        log::error!(\"couldn't get parent block when trying to get block time: {parent_block:?}\");\n        sleep(Duration::from_secs(5)).await;\n      }\n      let parent_block = parent_block.unwrap();\n      timestamps.push(parent_block.header.timestamp);\n      parent = parent_block.parent();\n\n      if parent_block.number().unwrap() == 0 {\n        break;\n      }\n    }\n    timestamps.sort();\n\n    // Because 60 has two medians, Monero's epee picks the in-between value, calculated by the\n    // following formula (from the \"get_mid\" function)\n    let n = timestamps.len() / 2;\n    let a = timestamps[n - 1];\n    let b = timestamps[n];\n    #[rustfmt::skip] // Enables Ctrl+F'ing for everything after the `= `\n    let res = (a/2) + (b/2) + ((a - 2*(a/2)) + (b - 2*(b/2)))/2;\n    // Technically, res may be 1 if all prior blocks had a timestamp by 0, which would break\n    // monotonicity with our above definition of height as time\n    // Monero also solely requires the block's time not be less than the median, it doesn't ensure\n    // it advances the median forward\n    // Ensure monotonicity despite both these issues by adding the block number to the median time\n    res + u64::try_from(self.number().unwrap()).unwrap()\n  }\n}\n\n#[derive(Clone, Debug)]\npub struct Monero {\n  rpc: SimpleRequestRpc,\n}\n// Shim required for testing/debugging purposes due to generic arguments also necessitating trait\n// bounds\nimpl PartialEq for Monero {\n  fn eq(&self, _: &Self) -> bool {\n    true\n  }\n}\nimpl Eq for Monero {}\n\n#[allow(clippy::needless_pass_by_value)] // Needed to satisfy API expectations\nfn map_rpc_err(err: RpcError) -> NetworkError {\n  if let RpcError::InvalidNode(reason) = &err {\n    log::error!(\"Monero RpcError::InvalidNode({reason})\");\n  } else {\n    log::debug!(\"Monero RpcError {err:?}\");\n  }\n  NetworkError::ConnectionError\n}\n\nenum MakeSignableTransactionResult {\n  Fee(u64),\n  SignableTransaction(MSignableTransaction),\n}\n\nimpl Monero {\n  pub async fn new(url: String) -> Monero {\n    let mut res = SimpleRequestRpc::new(url.clone()).await;\n    while let Err(e) = res {\n      log::error!(\"couldn't connect to Monero node: {e:?}\");\n      tokio::time::sleep(Duration::from_secs(5)).await;\n      res = SimpleRequestRpc::new(url.clone()).await;\n    }\n    Monero { rpc: res.unwrap() }\n  }\n\n  fn view_pair(spend: EdwardsPoint) -> GuaranteedViewPair {\n    GuaranteedViewPair::new(spend.0, Zeroizing::new(additional_key::<Monero>(0).0)).unwrap()\n  }\n\n  fn address_internal(spend: EdwardsPoint, subaddress: Option<SubaddressIndex>) -> Address {\n    Address::new(Self::view_pair(spend).address(MoneroNetwork::Mainnet, subaddress, None)).unwrap()\n  }\n\n  fn scanner(spend: EdwardsPoint) -> GuaranteedScanner {\n    let mut scanner = GuaranteedScanner::new(Self::view_pair(spend));\n    debug_assert!(EXTERNAL_SUBADDRESS.is_none());\n    scanner.register_subaddress(BRANCH_SUBADDRESS.unwrap());\n    scanner.register_subaddress(CHANGE_SUBADDRESS.unwrap());\n    scanner.register_subaddress(FORWARD_SUBADDRESS.unwrap());\n    scanner\n  }\n\n  async fn median_fee(&self, block: &Block) -> Result<FeeRate, NetworkError> {\n    let mut fees = vec![];\n    for tx_hash in &block.transactions {\n      let tx =\n        self.rpc.get_transaction(*tx_hash).await.map_err(|_| NetworkError::ConnectionError)?;\n      // Only consider fees from RCT transactions, else the fee property read wouldn't be accurate\n      let fee = match &tx {\n        Transaction::V2 { proofs: Some(proofs), .. } => proofs.base.fee,\n        _ => continue,\n      };\n      fees.push(fee / u64::try_from(tx.weight()).unwrap());\n    }\n    fees.sort();\n    let fee = fees.get(fees.len() / 2).copied().unwrap_or(0);\n\n    // TODO: Set a sane minimum fee\n    const MINIMUM_FEE: u64 = 1_500_000;\n    Ok(FeeRate::new(fee.max(MINIMUM_FEE), 10000).unwrap())\n  }\n\n  async fn make_signable_transaction(\n    &self,\n    block_number: usize,\n    plan_id: &[u8; 32],\n    inputs: &[Output],\n    payments: &[Payment<Self>],\n    change: &Option<Address>,\n    calculating_fee: bool,\n  ) -> Result<Option<MakeSignableTransactionResult>, NetworkError> {\n    for payment in payments {\n      assert_eq!(payment.balance.coin, ExternalCoin::Monero);\n    }\n\n    // TODO2: Use an fee representative of several blocks, cached inside Self\n    let block_for_fee = self.get_block(block_number).await?;\n    let fee_rate = self.median_fee(&block_for_fee).await?;\n\n    // Determine the RCT proofs to make based off the hard fork\n    // TODO: Make a fn for this block which is duplicated with tests\n    let rct_type = match block_for_fee.header.hardfork_version {\n      14 => RctType::ClsagBulletproof,\n      15 | 16 => RctType::ClsagBulletproofPlus,\n      _ => panic!(\"Monero hard forked and the processor wasn't updated for it\"),\n    };\n\n    let mut transcript =\n      RecommendedTranscript::new(b\"Serai Processor Monero Transaction Transcript\");\n    transcript.append_message(b\"plan\", plan_id);\n\n    // All signers need to select the same decoys\n    // All signers use the same height and a seeded RNG to make sure they do so.\n    let mut inputs_actual = Vec::with_capacity(inputs.len());\n    for input in inputs {\n      inputs_actual.push(\n        OutputWithDecoys::fingerprintable_deterministic_new(\n          &mut ChaCha20Rng::from_seed(transcript.rng_seed(b\"decoys\")),\n          &self.rpc,\n          // TODO: Have Decoys take RctType\n          match rct_type {\n            RctType::ClsagBulletproof => 11,\n            RctType::ClsagBulletproofPlus => 16,\n            _ => panic!(\"selecting decoys for an unsupported RctType\"),\n          },\n          block_number + 1,\n          input.0.clone(),\n        )\n        .await\n        .map_err(map_rpc_err)?,\n      );\n    }\n\n    // Monero requires at least two outputs\n    // If we only have one output planned, add a dummy payment\n    let mut payments = payments.to_vec();\n    let outputs = payments.len() + usize::from(u8::from(change.is_some()));\n    if outputs == 0 {\n      return Ok(None);\n    } else if outputs == 1 {\n      payments.push(Payment {\n        address: Address::new(\n          ViewPair::new(EdwardsPoint::generator().0, Zeroizing::new(Scalar::ONE.0))\n            .unwrap()\n            .legacy_address(MoneroNetwork::Mainnet),\n        )\n        .unwrap(),\n        balance: ExternalBalance { coin: ExternalCoin::Monero, amount: Amount(0) },\n        data: None,\n      });\n    }\n\n    let payments = payments\n      .into_iter()\n      .map(|payment| (payment.address.into(), payment.balance.amount.0))\n      .collect::<Vec<_>>();\n\n    match MSignableTransaction::new(\n      rct_type,\n      // Use the plan ID as the outgoing view key\n      Zeroizing::new(*plan_id),\n      inputs_actual,\n      payments,\n      Change::fingerprintable(change.as_ref().map(|change| change.clone().into())),\n      vec![],\n      fee_rate,\n    ) {\n      Ok(signable) => Ok(Some({\n        if calculating_fee {\n          MakeSignableTransactionResult::Fee(signable.necessary_fee())\n        } else {\n          MakeSignableTransactionResult::SignableTransaction(signable)\n        }\n      })),\n      // AmountsUnrepresentable is unreachable on Monero without 100% of the supply before tail\n      // emission or fundamental corruption\n      Err(e) => match e {\n        SendError::UnsupportedRctType => {\n          panic!(\"trying to use an RctType unsupported by monero-wallet\")\n        }\n        SendError::NoInputs |\n        SendError::InvalidDecoyQuantity |\n        SendError::NoOutputs |\n        SendError::TooManyOutputs |\n        SendError::AmountsUnrepresentable { .. } |\n        SendError::NoChange |\n        SendError::TooMuchArbitraryData |\n        SendError::TooLargeTransaction |\n        SendError::WrongPrivateKey => {\n          panic!(\"created an invalid Monero transaction: {e}\");\n        }\n        SendError::MultiplePaymentIds => {\n          panic!(\"multiple payment IDs despite not supporting integrated addresses\");\n        }\n        SendError::NotEnoughFunds { inputs, outputs, necessary_fee } => {\n          log::debug!(\n            \"Monero NotEnoughFunds. inputs: {:?}, outputs: {:?}, necessary_fee: {necessary_fee:?}\",\n            inputs,\n            outputs\n          );\n          match necessary_fee {\n            Some(necessary_fee) => {\n              // If we're solely calculating the fee, return the fee this TX will cost\n              if calculating_fee {\n                Ok(Some(MakeSignableTransactionResult::Fee(necessary_fee)))\n              } else {\n                // If we're actually trying to make the TX, return None\n                Ok(None)\n              }\n            }\n            // We didn't have enough funds to even cover the outputs\n            None => {\n              // Ensure we're not misinterpreting this\n              assert!(outputs > inputs);\n              Ok(None)\n            }\n          }\n        }\n        SendError::MaliciousSerialization | SendError::ClsagError(_) | SendError::FrostError(_) => {\n          panic!(\"supposedly unreachable (at this time) Monero error: {e}\");\n        }\n      },\n    }\n  }\n\n  #[cfg(test)]\n  fn test_view_pair() -> ViewPair {\n    ViewPair::new(*EdwardsPoint::generator(), Zeroizing::new(Scalar::ONE.0)).unwrap()\n  }\n\n  #[cfg(test)]\n  fn test_scanner() -> Scanner {\n    Scanner::new(Self::test_view_pair())\n  }\n\n  #[cfg(test)]\n  fn test_address() -> Address {\n    Address::new(Self::test_view_pair().legacy_address(MoneroNetwork::Mainnet)).unwrap()\n  }\n}\n\n#[async_trait]\nimpl Network for Monero {\n  type Curve = Ed25519;\n\n  type Transaction = Transaction;\n  type Block = Block;\n\n  type Output = Output;\n  type SignableTransaction = SignableTransaction;\n  type Eventuality = Eventuality;\n  type TransactionMachine = TransactionMachine;\n\n  type Scheduler = Scheduler<Monero>;\n\n  type Address = Address;\n\n  const NETWORK: ExternalNetworkId = ExternalNetworkId::Monero;\n  const ID: &'static str = \"Monero\";\n  const ESTIMATED_BLOCK_TIME_IN_SECONDS: usize = 120;\n  const CONFIRMATIONS: usize = 10;\n\n  const MAX_OUTPUTS: usize = 16;\n\n  // 0.01 XMR\n  const DUST: u64 = 10000000000;\n\n  // TODO\n  const COST_TO_AGGREGATE: u64 = 0;\n\n  // Monero doesn't require/benefit from tweaking\n  fn tweak_keys(_: &mut ThresholdKeys<Self::Curve>) {}\n\n  #[cfg(test)]\n  async fn external_address(&self, key: EdwardsPoint) -> Address {\n    Self::address_internal(key, EXTERNAL_SUBADDRESS)\n  }\n\n  fn branch_address(key: EdwardsPoint) -> Option<Address> {\n    Some(Self::address_internal(key, BRANCH_SUBADDRESS))\n  }\n\n  fn change_address(key: EdwardsPoint) -> Option<Address> {\n    Some(Self::address_internal(key, CHANGE_SUBADDRESS))\n  }\n\n  fn forward_address(key: EdwardsPoint) -> Option<Address> {\n    Some(Self::address_internal(key, FORWARD_SUBADDRESS))\n  }\n\n  async fn get_latest_block_number(&self) -> Result<usize, NetworkError> {\n    // Monero defines height as chain length, so subtract 1 for block number\n    Ok(self.rpc.get_height().await.map_err(map_rpc_err)? - 1)\n  }\n\n  async fn get_block(&self, number: usize) -> Result<Self::Block, NetworkError> {\n    Ok(\n      self\n        .rpc\n        .get_block(self.rpc.get_block_hash(number).await.map_err(map_rpc_err)?)\n        .await\n        .map_err(map_rpc_err)?,\n    )\n  }\n\n  async fn get_outputs(&self, block: &Block, key: EdwardsPoint) -> Vec<Output> {\n    let outputs = loop {\n      match self\n        .rpc\n        .get_scannable_block(block.clone())\n        .await\n        .map_err(|e| format!(\"{e:?}\"))\n        .and_then(|block| Self::scanner(key).scan(block).map_err(|e| format!(\"{e:?}\")))\n      {\n        Ok(outputs) => break outputs,\n        Err(e) => {\n          log::error!(\"couldn't scan block {}: {e:?}\", hex::encode(block.id()));\n          sleep(Duration::from_secs(60)).await;\n          continue;\n        }\n      }\n    };\n\n    // Miner transactions are required to explicitly state their timelock, so this does exclude\n    // those (which have an extended timelock we don't want to deal with)\n    let raw_outputs = outputs.not_additionally_locked();\n    let mut outputs = Vec::with_capacity(raw_outputs.len());\n    for output in raw_outputs {\n      // This should be pointless as we shouldn't be able to scan for any other subaddress\n      // This just helps ensures nothing invalid makes it through\n      assert!([EXTERNAL_SUBADDRESS, BRANCH_SUBADDRESS, CHANGE_SUBADDRESS, FORWARD_SUBADDRESS]\n        .contains(&output.subaddress()));\n\n      outputs.push(Output(output));\n    }\n\n    outputs\n  }\n\n  async fn get_eventuality_completions(\n    &self,\n    eventualities: &mut EventualitiesTracker<Eventuality>,\n    block: &Block,\n  ) -> HashMap<[u8; 32], (usize, [u8; 32], Transaction)> {\n    let mut res = HashMap::new();\n    if eventualities.map.is_empty() {\n      return res;\n    }\n\n    async fn check_block(\n      network: &Monero,\n      eventualities: &mut EventualitiesTracker<Eventuality>,\n      block: &Block,\n      res: &mut HashMap<[u8; 32], (usize, [u8; 32], Transaction)>,\n    ) {\n      for hash in &block.transactions {\n        let tx = {\n          let mut tx;\n          while {\n            tx = network.rpc.get_transaction(*hash).await;\n            tx.is_err()\n          } {\n            log::error!(\"couldn't get transaction {}: {}\", hex::encode(hash), tx.err().unwrap());\n            sleep(Duration::from_secs(60)).await;\n          }\n          tx.unwrap()\n        };\n\n        if let Some((_, eventuality)) = eventualities.map.get(&tx.prefix().extra) {\n          if eventuality.matches(&tx.clone().into()) {\n            res.insert(\n              eventualities.map.remove(&tx.prefix().extra).unwrap().0,\n              (block.number().unwrap(), tx.id(), tx),\n            );\n          }\n        }\n      }\n\n      eventualities.block_number += 1;\n      assert_eq!(eventualities.block_number, block.number().unwrap());\n    }\n\n    for block_num in (eventualities.block_number + 1) .. block.number().unwrap() {\n      let block = {\n        let mut block;\n        while {\n          block = self.get_block(block_num).await;\n          block.is_err()\n        } {\n          log::error!(\"couldn't get block {}: {}\", block_num, block.err().unwrap());\n          sleep(Duration::from_secs(60)).await;\n        }\n        block.unwrap()\n      };\n\n      check_block(self, eventualities, &block, &mut res).await;\n    }\n\n    // Also check the current block\n    check_block(self, eventualities, block, &mut res).await;\n    assert_eq!(eventualities.block_number, block.number().unwrap());\n\n    res\n  }\n\n  async fn needed_fee(\n    &self,\n    block_number: usize,\n    inputs: &[Output],\n    payments: &[Payment<Self>],\n    change: &Option<Address>,\n  ) -> Result<Option<u64>, NetworkError> {\n    let res = self\n      .make_signable_transaction(block_number, &[0; 32], inputs, payments, change, true)\n      .await?;\n    let Some(res) = res else { return Ok(None) };\n    let MakeSignableTransactionResult::Fee(fee) = res else {\n      panic!(\"told make_signable_transaction calculating_fee and got transaction\")\n    };\n    Ok(Some(fee))\n  }\n\n  async fn signable_transaction(\n    &self,\n    block_number: usize,\n    plan_id: &[u8; 32],\n    _key: EdwardsPoint,\n    inputs: &[Output],\n    payments: &[Payment<Self>],\n    change: &Option<Address>,\n    (): &(),\n  ) -> Result<Option<(Self::SignableTransaction, Self::Eventuality)>, NetworkError> {\n    let res = self\n      .make_signable_transaction(block_number, plan_id, inputs, payments, change, false)\n      .await?;\n    let Some(res) = res else { return Ok(None) };\n    let MakeSignableTransactionResult::SignableTransaction(signable) = res else {\n      panic!(\"told make_signable_transaction not calculating_fee and got fee\")\n    };\n\n    let signable = SignableTransaction(signable);\n    let eventuality = signable.0.clone().into();\n    Ok(Some((signable, eventuality)))\n  }\n\n  async fn attempt_sign(\n    &self,\n    keys: ThresholdKeys<Self::Curve>,\n    transaction: SignableTransaction,\n  ) -> Result<Self::TransactionMachine, NetworkError> {\n    match transaction.0.clone().multisig(keys) {\n      Ok(machine) => Ok(machine),\n      Err(e) => panic!(\"failed to create a multisig machine for TX: {e}\"),\n    }\n  }\n\n  async fn publish_completion(&self, tx: &Transaction) -> Result<(), NetworkError> {\n    match self.rpc.publish_transaction(tx).await {\n      Ok(()) => Ok(()),\n      Err(RpcError::ConnectionError(e)) => {\n        log::debug!(\"Monero ConnectionError: {e}\");\n        Err(NetworkError::ConnectionError)?\n      }\n      // TODO: Distinguish already in pool vs double spend (other signing attempt succeeded) vs\n      // invalid transaction\n      Err(e) => panic!(\"failed to publish TX {}: {e}\", hex::encode(tx.hash())),\n    }\n  }\n\n  async fn confirm_completion(\n    &self,\n    eventuality: &Eventuality,\n    id: &[u8; 32],\n  ) -> Result<Option<Transaction>, NetworkError> {\n    let tx = self.rpc.get_transaction(*id).await.map_err(map_rpc_err)?;\n    if eventuality.matches(&tx.clone().into()) {\n      Ok(Some(tx))\n    } else {\n      Ok(None)\n    }\n  }\n\n  #[cfg(test)]\n  async fn get_block_number(&self, id: &[u8; 32]) -> usize {\n    self.rpc.get_block(*id).await.unwrap().number().unwrap()\n  }\n\n  #[cfg(test)]\n  async fn check_eventuality_by_claim(\n    &self,\n    eventuality: &Self::Eventuality,\n    claim: &[u8; 32],\n  ) -> bool {\n    return eventuality.matches(&self.rpc.get_pruned_transaction(*claim).await.unwrap());\n  }\n\n  #[cfg(test)]\n  async fn get_transaction_by_eventuality(\n    &self,\n    block: usize,\n    eventuality: &Eventuality,\n  ) -> Transaction {\n    let block = self.rpc.get_block_by_number(block).await.unwrap();\n    for tx in &block.transactions {\n      let tx = self.rpc.get_transaction(*tx).await.unwrap();\n      if eventuality.matches(&tx.clone().into()) {\n        return tx;\n      }\n    }\n    panic!(\"block didn't have a transaction for this eventuality\")\n  }\n\n  #[cfg(test)]\n  async fn mine_block(&self) {\n    // https://github.com/serai-dex/serai/issues/198\n    sleep(std::time::Duration::from_millis(100)).await;\n    self.rpc.generate_blocks(&Self::test_address().into(), 1).await.unwrap();\n  }\n\n  #[cfg(test)]\n  async fn test_send(&self, address: Address) -> Block {\n    use zeroize::Zeroizing;\n    use rand_core::{RngCore, OsRng};\n    use monero_wallet::rpc::FeePriority;\n\n    let new_block = self.get_latest_block_number().await.unwrap() + 1;\n    for _ in 0 .. 80 {\n      self.mine_block().await;\n    }\n\n    let new_block = self.rpc.get_block_by_number(new_block).await.unwrap();\n    let mut outputs = Self::test_scanner()\n      .scan(self.rpc.get_scannable_block(new_block.clone()).await.unwrap())\n      .unwrap()\n      .ignore_additional_timelock();\n    let output = outputs.swap_remove(0);\n\n    let amount = output.commitment().amount;\n    // The dust should always be sufficient for the fee\n    let fee = Monero::DUST;\n\n    let rct_type = match new_block.header.hardfork_version {\n      14 => RctType::ClsagBulletproof,\n      15 | 16 => RctType::ClsagBulletproofPlus,\n      _ => panic!(\"Monero hard forked and the processor wasn't updated for it\"),\n    };\n\n    let output = OutputWithDecoys::fingerprintable_deterministic_new(\n      &mut OsRng,\n      &self.rpc,\n      match rct_type {\n        RctType::ClsagBulletproof => 11,\n        RctType::ClsagBulletproofPlus => 16,\n        _ => panic!(\"selecting decoys for an unsupported RctType\"),\n      },\n      self.rpc.get_height().await.unwrap(),\n      output,\n    )\n    .await\n    .unwrap();\n\n    let mut outgoing_view_key = Zeroizing::new([0; 32]);\n    OsRng.fill_bytes(outgoing_view_key.as_mut());\n    let tx = MSignableTransaction::new(\n      rct_type,\n      outgoing_view_key,\n      vec![output],\n      vec![(address.into(), amount - fee)],\n      Change::fingerprintable(Some(Self::test_address().into())),\n      vec![],\n      self.rpc.get_fee_rate(FeePriority::Unimportant).await.unwrap(),\n    )\n    .unwrap()\n    .sign(&mut OsRng, &Zeroizing::new(Scalar::ONE.0))\n    .unwrap();\n\n    let block = self.get_latest_block_number().await.unwrap() + 1;\n    self.rpc.publish_transaction(&tx).await.unwrap();\n    for _ in 0 .. 10 {\n      self.mine_block().await;\n    }\n    self.get_block(block).await.unwrap()\n  }\n}\n\nimpl UtxoNetwork for Monero {\n  // wallet2 will not create a transaction larger than 100kb, and Monero won't relay a transaction\n  // larger than 150kb. This fits within the 100kb mark\n  // Technically, it can be ~124, yet a small bit of buffer is appreciated\n  // TODO: Test creating a TX this big\n  const MAX_INPUTS: usize = 120;\n}\n"
  },
  {
    "path": "processor/src/plan.rs",
    "content": "use std::io;\n\nuse scale::{Encode, Decode};\n\nuse transcript::{Transcript, RecommendedTranscript};\nuse ciphersuite::group::GroupEncoding;\nuse frost::curve::Ciphersuite;\n\nuse serai_client::primitives::ExternalBalance;\n\nuse crate::{\n  networks::{Output, Network},\n  multisigs::scheduler::{SchedulerAddendum, Scheduler},\n};\n\n#[derive(Clone, PartialEq, Eq, Debug)]\npub struct Payment<N: Network> {\n  pub address: N::Address,\n  pub data: Option<Vec<u8>>,\n  pub balance: ExternalBalance,\n}\n\nimpl<N: Network> Payment<N> {\n  pub fn transcript<T: Transcript>(&self, transcript: &mut T) {\n    transcript.domain_separate(b\"payment\");\n    transcript.append_message(b\"address\", self.address.to_string().as_bytes());\n    if let Some(data) = self.data.as_ref() {\n      transcript.append_message(b\"data\", data);\n    }\n    transcript.append_message(b\"coin\", self.balance.coin.encode());\n    transcript.append_message(b\"amount\", self.balance.amount.0.to_le_bytes());\n  }\n\n  pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {\n    // TODO: Don't allow creating Payments with an Address which can't be serialized\n    let address: Vec<u8> = self\n      .address\n      .clone()\n      .try_into()\n      .map_err(|_| io::Error::other(\"address couldn't be serialized\"))?;\n    writer.write_all(&u32::try_from(address.len()).unwrap().to_le_bytes())?;\n    writer.write_all(&address)?;\n\n    writer.write_all(&[u8::from(self.data.is_some())])?;\n    if let Some(data) = &self.data {\n      writer.write_all(&u32::try_from(data.len()).unwrap().to_le_bytes())?;\n      writer.write_all(data)?;\n    }\n\n    writer.write_all(&self.balance.encode())\n  }\n\n  pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {\n    let mut buf = [0; 4];\n    reader.read_exact(&mut buf)?;\n    let mut address = vec![0; usize::try_from(u32::from_le_bytes(buf)).unwrap()];\n    reader.read_exact(&mut address)?;\n    let address = N::Address::try_from(address).map_err(|_| io::Error::other(\"invalid address\"))?;\n\n    let mut buf = [0; 1];\n    reader.read_exact(&mut buf)?;\n    let data = if buf[0] == 1 {\n      let mut buf = [0; 4];\n      reader.read_exact(&mut buf)?;\n      let mut data = vec![0; usize::try_from(u32::from_le_bytes(buf)).unwrap()];\n      reader.read_exact(&mut data)?;\n      Some(data)\n    } else {\n      None\n    };\n\n    let balance = ExternalBalance::decode(&mut scale::IoReader(reader))\n      .map_err(|_| io::Error::other(\"invalid balance\"))?;\n\n    Ok(Payment { address, data, balance })\n  }\n}\n\n#[derive(Clone, PartialEq)]\npub struct Plan<N: Network> {\n  pub key: <N::Curve as Ciphersuite>::G,\n  pub inputs: Vec<N::Output>,\n  /// The payments this Plan is intended to create.\n  ///\n  /// This should only contain payments leaving Serai. While it is acceptable for users to enter\n  /// Serai's address(es) as the payment address, as that'll be handled by anything which expects\n  /// certain properties, Serai as a system MUST NOT use payments for internal transfers. Doing\n  /// so will cause a reduction in their value by the TX fee/operating costs, creating an\n  /// incomplete transfer.\n  pub payments: Vec<Payment<N>>,\n  /// The change this Plan should use.\n  ///\n  /// This MUST contain a Serai address. Operating costs may be deducted from the payments in this\n  /// Plan on the premise that the change address is Serai's, and accordingly, Serai will recoup\n  /// the operating costs.\n  //\n  // TODO: Consider moving to ::G?\n  pub change: Option<N::Address>,\n  /// The scheduler's additional data.\n  pub scheduler_addendum: <N::Scheduler as Scheduler<N>>::Addendum,\n}\nimpl<N: Network> core::fmt::Debug for Plan<N> {\n  fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {\n    fmt\n      .debug_struct(\"Plan\")\n      .field(\"key\", &hex::encode(self.key.to_bytes()))\n      .field(\"inputs\", &self.inputs)\n      .field(\"payments\", &self.payments)\n      .field(\"change\", &self.change.as_ref().map(ToString::to_string))\n      .field(\"scheduler_addendum\", &self.scheduler_addendum)\n      .finish()\n  }\n}\n\nimpl<N: Network> Plan<N> {\n  pub fn transcript(&self) -> RecommendedTranscript {\n    let mut transcript = RecommendedTranscript::new(b\"Serai Processor Plan ID\");\n    transcript.domain_separate(b\"meta\");\n    transcript.append_message(b\"network\", N::ID);\n    transcript.append_message(b\"key\", self.key.to_bytes());\n\n    transcript.domain_separate(b\"inputs\");\n    for input in &self.inputs {\n      transcript.append_message(b\"input\", input.id());\n    }\n\n    transcript.domain_separate(b\"payments\");\n    for payment in &self.payments {\n      payment.transcript(&mut transcript);\n    }\n\n    if let Some(change) = &self.change {\n      transcript.append_message(b\"change\", change.to_string());\n    }\n\n    let mut addendum_bytes = vec![];\n    self.scheduler_addendum.write(&mut addendum_bytes).unwrap();\n    transcript.append_message(b\"scheduler_addendum\", addendum_bytes);\n\n    transcript\n  }\n\n  pub fn id(&self) -> [u8; 32] {\n    let challenge = self.transcript().challenge(b\"id\");\n    let mut res = [0; 32];\n    res.copy_from_slice(&challenge[.. 32]);\n    res\n  }\n\n  pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {\n    writer.write_all(self.key.to_bytes().as_ref())?;\n\n    writer.write_all(&u32::try_from(self.inputs.len()).unwrap().to_le_bytes())?;\n    for input in &self.inputs {\n      input.write(writer)?;\n    }\n\n    writer.write_all(&u32::try_from(self.payments.len()).unwrap().to_le_bytes())?;\n    for payment in &self.payments {\n      payment.write(writer)?;\n    }\n\n    // TODO: Have Plan construction fail if change cannot be serialized\n    let change = if let Some(change) = &self.change {\n      change.clone().try_into().map_err(|_| {\n        io::Error::other(format!(\n          \"an address we said to use as change couldn't be converted to a Vec<u8>: {}\",\n          change.to_string(),\n        ))\n      })?\n    } else {\n      vec![]\n    };\n    assert!(serai_client::primitives::MAX_ADDRESS_LEN <= u8::MAX.into());\n    writer.write_all(&[u8::try_from(change.len()).unwrap()])?;\n    writer.write_all(&change)?;\n    self.scheduler_addendum.write(writer)\n  }\n\n  pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {\n    let key = N::Curve::read_G(reader)?;\n\n    let mut inputs = vec![];\n    let mut buf = [0; 4];\n    reader.read_exact(&mut buf)?;\n    for _ in 0 .. u32::from_le_bytes(buf) {\n      inputs.push(N::Output::read(reader)?);\n    }\n\n    let mut payments = vec![];\n    reader.read_exact(&mut buf)?;\n    for _ in 0 .. u32::from_le_bytes(buf) {\n      payments.push(Payment::<N>::read(reader)?);\n    }\n\n    let mut len = [0; 1];\n    reader.read_exact(&mut len)?;\n    let mut change = vec![0; usize::from(len[0])];\n    reader.read_exact(&mut change)?;\n    let change =\n      if change.is_empty() {\n        None\n      } else {\n        Some(N::Address::try_from(change).map_err(|_| {\n          io::Error::other(\"couldn't deserialize an Address serialized into a Plan\")\n        })?)\n      };\n\n    let scheduler_addendum = <N::Scheduler as Scheduler<N>>::Addendum::read(reader)?;\n    Ok(Plan { key, inputs, payments, change, scheduler_addendum })\n  }\n}\n"
  },
  {
    "path": "processor/src/signer.rs",
    "content": "use core::{marker::PhantomData, fmt};\nuse std::collections::HashMap;\n\nuse rand_core::OsRng;\nuse frost::{\n  ThresholdKeys, FrostError,\n  sign::{Writable, PreprocessMachine, SignMachine, SignatureMachine},\n};\n\nuse log::{info, debug, warn, error};\n\nuse serai_client::validator_sets::primitives::Session;\nuse messages::sign::*;\n\npub use serai_db::*;\n\nuse crate::{\n  Get, DbTxn, Db,\n  networks::{Eventuality, Network},\n};\n\ncreate_db!(\n  SignerDb {\n    CompletionsDb: (id: [u8; 32]) -> Vec<u8>,\n    EventualityDb: (id: [u8; 32]) -> Vec<u8>,\n    AttemptDb: (id: &SignId) -> (),\n    CompletionDb: (claim: &[u8]) -> Vec<u8>,\n    ActiveSignsDb: () -> Vec<[u8; 32]>,\n    CompletedOnChainDb: (id: &[u8; 32]) -> (),\n  }\n);\n\nimpl ActiveSignsDb {\n  fn add_active_sign(txn: &mut impl DbTxn, id: &[u8; 32]) {\n    if CompletedOnChainDb::get(txn, id).is_some() {\n      return;\n    }\n    let mut active = ActiveSignsDb::get(txn).unwrap_or_default();\n    active.push(*id);\n    ActiveSignsDb::set(txn, &active);\n  }\n}\n\nimpl CompletedOnChainDb {\n  fn complete_on_chain(txn: &mut impl DbTxn, id: &[u8; 32]) {\n    CompletedOnChainDb::set(txn, id, &());\n    ActiveSignsDb::set(\n      txn,\n      &ActiveSignsDb::get(txn)\n        .unwrap_or_default()\n        .into_iter()\n        .filter(|active| active != id)\n        .collect::<Vec<_>>(),\n    );\n  }\n}\nimpl CompletionsDb {\n  fn completions<N: Network>(\n    getter: &impl Get,\n    id: [u8; 32],\n  ) -> Vec<<N::Eventuality as Eventuality>::Claim> {\n    let Some(completions) = Self::get(getter, id) else { return vec![] };\n\n    // If this was set yet is empty, it's because it's the encoding of a claim with a length of 0\n    if completions.is_empty() {\n      let default = <N::Eventuality as Eventuality>::Claim::default();\n      assert_eq!(default.as_ref().len(), 0);\n      return vec![default];\n    }\n\n    let mut completions_ref = completions.as_slice();\n    let mut res = vec![];\n    while !completions_ref.is_empty() {\n      let mut id = <N::Eventuality as Eventuality>::Claim::default();\n      let id_len = id.as_ref().len();\n      id.as_mut().copy_from_slice(&completions_ref[.. id_len]);\n      completions_ref = &completions_ref[id_len ..];\n      res.push(id);\n    }\n    res\n  }\n\n  fn complete<N: Network>(\n    txn: &mut impl DbTxn,\n    id: [u8; 32],\n    completion: &<N::Eventuality as Eventuality>::Completion,\n  ) {\n    // Completions can be completed by multiple signatures\n    // Save every solution in order to be robust\n    CompletionDb::save_completion::<N>(txn, completion);\n\n    let claim = N::Eventuality::claim(completion);\n    let claim: &[u8] = claim.as_ref();\n\n    // If claim has a 0-byte encoding, the set key, even if empty, is the claim\n    if claim.is_empty() {\n      Self::set(txn, id, &vec![]);\n      return;\n    }\n\n    let mut existing = Self::get(txn, id).unwrap_or_default();\n    assert_eq!(existing.len() % claim.len(), 0);\n\n    // Don't add this completion if it's already present\n    let mut i = 0;\n    while i < existing.len() {\n      if &existing[i .. (i + claim.len())] == claim {\n        return;\n      }\n      i += claim.len();\n    }\n\n    existing.extend(claim);\n    Self::set(txn, id, &existing);\n  }\n}\n\nimpl EventualityDb {\n  fn save_eventuality<N: Network>(\n    txn: &mut impl DbTxn,\n    id: [u8; 32],\n    eventuality: &N::Eventuality,\n  ) {\n    txn.put(Self::key(id), eventuality.serialize());\n  }\n\n  fn eventuality<N: Network>(getter: &impl Get, id: [u8; 32]) -> Option<N::Eventuality> {\n    Some(N::Eventuality::read(&mut getter.get(Self::key(id))?.as_slice()).unwrap())\n  }\n}\n\nimpl CompletionDb {\n  fn save_completion<N: Network>(\n    txn: &mut impl DbTxn,\n    completion: &<N::Eventuality as Eventuality>::Completion,\n  ) {\n    let claim = N::Eventuality::claim(completion);\n    let claim: &[u8] = claim.as_ref();\n    Self::set(txn, claim, &N::Eventuality::serialize_completion(completion));\n  }\n\n  fn completion<N: Network>(\n    getter: &impl Get,\n    claim: &<N::Eventuality as Eventuality>::Claim,\n  ) -> Option<<N::Eventuality as Eventuality>::Completion> {\n    Self::get(getter, claim.as_ref())\n      .map(|completion| N::Eventuality::read_completion::<&[u8]>(&mut completion.as_ref()).unwrap())\n  }\n}\n\ntype PreprocessFor<N> = <<N as Network>::TransactionMachine as PreprocessMachine>::Preprocess;\ntype SignMachineFor<N> = <<N as Network>::TransactionMachine as PreprocessMachine>::SignMachine;\ntype SignatureShareFor<N> = <SignMachineFor<N> as SignMachine<\n  <<N as Network>::Eventuality as Eventuality>::Completion,\n>>::SignatureShare;\ntype SignatureMachineFor<N> = <SignMachineFor<N> as SignMachine<\n  <<N as Network>::Eventuality as Eventuality>::Completion,\n>>::SignatureMachine;\n\npub struct Signer<N: Network, D: Db> {\n  db: PhantomData<D>,\n\n  network: N,\n\n  session: Session,\n  keys: Vec<ThresholdKeys<N::Curve>>,\n\n  signable: HashMap<[u8; 32], N::SignableTransaction>,\n  attempt: HashMap<[u8; 32], u32>,\n  #[allow(clippy::type_complexity)]\n  preprocessing: HashMap<[u8; 32], (Vec<SignMachineFor<N>>, Vec<PreprocessFor<N>>)>,\n  #[allow(clippy::type_complexity)]\n  signing: HashMap<[u8; 32], (SignatureMachineFor<N>, Vec<SignatureShareFor<N>>)>,\n}\n\nimpl<N: Network, D: Db> fmt::Debug for Signer<N, D> {\n  fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {\n    fmt\n      .debug_struct(\"Signer\")\n      .field(\"network\", &self.network)\n      .field(\"signable\", &self.signable)\n      .field(\"attempt\", &self.attempt)\n      .finish_non_exhaustive()\n  }\n}\n\nimpl<N: Network, D: Db> Signer<N, D> {\n  /// Rebroadcast already signed TXs which haven't had their completions mined into a sufficiently\n  /// confirmed block.\n  pub async fn rebroadcast_task(db: D, network: N) {\n    log::info!(\"rebroadcasting transactions for plans whose completions yet to be confirmed...\");\n    loop {\n      for active in ActiveSignsDb::get(&db).unwrap_or_default() {\n        for claim in CompletionsDb::completions::<N>(&db, active) {\n          log::info!(\"rebroadcasting completion with claim {}\", hex::encode(claim.as_ref()));\n          // TODO: Don't drop the error entirely. Check for invariants\n          let _ =\n            network.publish_completion(&CompletionDb::completion::<N>(&db, &claim).unwrap()).await;\n        }\n      }\n      // Only run every five minutes so we aren't frequently loading tens to hundreds of KB from\n      // the DB\n      tokio::time::sleep(core::time::Duration::from_secs(5 * 60)).await;\n    }\n  }\n  pub fn new(network: N, session: Session, keys: Vec<ThresholdKeys<N::Curve>>) -> Signer<N, D> {\n    assert!(!keys.is_empty());\n    Signer {\n      db: PhantomData,\n\n      network,\n\n      session,\n      keys,\n\n      signable: HashMap::new(),\n      attempt: HashMap::new(),\n      preprocessing: HashMap::new(),\n      signing: HashMap::new(),\n    }\n  }\n\n  fn verify_id(&self, id: &SignId) -> Result<(), ()> {\n    // Check the attempt lines up\n    match self.attempt.get(&id.id) {\n      // If we don't have an attempt logged, it's because the coordinator is faulty OR because we\n      // rebooted OR we detected the signed transaction on chain, so there's notable network\n      // latency/a malicious validator\n      None => {\n        warn!(\n          \"not attempting {} #{}. this is an error if we didn't reboot\",\n          hex::encode(id.id),\n          id.attempt\n        );\n        Err(())?;\n      }\n      Some(attempt) => {\n        if attempt != &id.attempt {\n          warn!(\n            \"sent signing data for {} #{} yet we have attempt #{}\",\n            hex::encode(id.id),\n            id.attempt,\n            attempt\n          );\n          Err(())?;\n        }\n      }\n    }\n\n    Ok(())\n  }\n\n  #[must_use]\n  fn already_completed(txn: &mut D::Transaction<'_>, id: [u8; 32]) -> bool {\n    if !CompletionsDb::completions::<N>(txn, id).is_empty() {\n      debug!(\n        \"SignTransaction/Reattempt order for {}, which we've already completed signing\",\n        hex::encode(id)\n      );\n\n      true\n    } else {\n      false\n    }\n  }\n\n  #[must_use]\n  fn complete(\n    &mut self,\n    id: [u8; 32],\n    claim: &<N::Eventuality as Eventuality>::Claim,\n  ) -> ProcessorMessage {\n    // Assert we're actively signing for this TX\n    assert!(self.signable.remove(&id).is_some(), \"completed a TX we weren't signing for\");\n    assert!(self.attempt.remove(&id).is_some(), \"attempt had an ID signable didn't have\");\n    // If we weren't selected to participate, we'll have a preprocess\n    self.preprocessing.remove(&id);\n    // If we were selected, the signature will only go through if we contributed a share\n    // Despite this, we then need to get everyone's shares, and we may get a completion before\n    // we get everyone's shares\n    // This would be if the coordinator fails and we find the eventuality completion on-chain\n    self.signing.remove(&id);\n\n    // Emit the event for it\n    ProcessorMessage::Completed { session: self.session, id, tx: claim.as_ref().to_vec() }\n  }\n\n  #[must_use]\n  pub fn completed(\n    &mut self,\n    txn: &mut D::Transaction<'_>,\n    id: [u8; 32],\n    completion: &<N::Eventuality as Eventuality>::Completion,\n  ) -> Option<ProcessorMessage> {\n    let first_completion = !Self::already_completed(txn, id);\n\n    // Save this completion to the DB\n    CompletedOnChainDb::complete_on_chain(txn, &id);\n    CompletionsDb::complete::<N>(txn, id, completion);\n\n    if first_completion {\n      Some(self.complete(id, &N::Eventuality::claim(completion)))\n    } else {\n      None\n    }\n  }\n\n  /// Returns Some if the first completion.\n  // Doesn't use any loops/retries since we'll eventually get this from the Scanner anyways\n  #[must_use]\n  async fn claimed_eventuality_completion(\n    &mut self,\n    txn: &mut D::Transaction<'_>,\n    id: [u8; 32],\n    claim: &<N::Eventuality as Eventuality>::Claim,\n  ) -> Option<ProcessorMessage> {\n    if let Some(eventuality) = EventualityDb::eventuality::<N>(txn, id) {\n      match self.network.confirm_completion(&eventuality, claim).await {\n        Ok(Some(completion)) => {\n          info!(\n            \"signer eventuality for {} resolved in {}\",\n            hex::encode(id),\n            hex::encode(claim.as_ref())\n          );\n\n          let first_completion = !Self::already_completed(txn, id);\n\n          // Save this completion to the DB\n          CompletionsDb::complete::<N>(txn, id, &completion);\n\n          if first_completion {\n            return Some(self.complete(id, claim));\n          }\n        }\n        Ok(None) => {\n          warn!(\n            \"a validator claimed {} completed {} when it did not\",\n            hex::encode(claim.as_ref()),\n            hex::encode(id),\n          );\n        }\n        Err(_) => {\n          // Transaction hasn't hit our mempool/was dropped for a different signature\n          // The latter can happen given certain latency conditions/a single malicious signer\n          // In the case of a single malicious signer, they can drag multiple honest validators down\n          // with them, so we unfortunately can't slash on this case\n          warn!(\n            \"a validator claimed {} completed {} yet we couldn't check that claim\",\n            hex::encode(claim.as_ref()),\n            hex::encode(id),\n          );\n        }\n      }\n    } else {\n      warn!(\n        \"informed of completion {} for eventuality {}, when we didn't have that eventuality\",\n        hex::encode(claim.as_ref()),\n        hex::encode(id),\n      );\n    }\n    None\n  }\n\n  #[must_use]\n  async fn attempt(\n    &mut self,\n    txn: &mut D::Transaction<'_>,\n    id: [u8; 32],\n    attempt: u32,\n  ) -> Option<ProcessorMessage> {\n    if Self::already_completed(txn, id) {\n      return None;\n    }\n\n    // Check if we're already working on this attempt\n    if let Some(curr_attempt) = self.attempt.get(&id) {\n      if curr_attempt >= &attempt {\n        warn!(\n          \"told to attempt {} #{} yet we're already working on {}\",\n          hex::encode(id),\n          attempt,\n          curr_attempt\n        );\n        return None;\n      }\n    }\n\n    // Start this attempt\n    // Clone the TX so we don't have an immutable borrow preventing the below mutable actions\n    // (also because we do need an owned tx anyways)\n    let Some(tx) = self.signable.get(&id).cloned() else {\n      warn!(\"told to attempt a TX we aren't currently signing for\");\n      return None;\n    };\n\n    // Delete any existing machines\n    self.preprocessing.remove(&id);\n    self.signing.remove(&id);\n\n    // Update the attempt number\n    self.attempt.insert(id, attempt);\n\n    let id = SignId { session: self.session, id, attempt };\n\n    info!(\"signing for {} #{}\", hex::encode(id.id), id.attempt);\n\n    // If we reboot mid-sign, the current design has us abort all signs and wait for latter\n    // attempts/new signing protocols\n    // This is distinct from the DKG which will continue DKG sessions, even on reboot\n    // This is because signing is tolerant of failures of up to 1/3rd of the group\n    // The DKG requires 100% participation\n    // While we could apply similar tricks as the DKG (a seeded RNG) to achieve support for\n    // reboots, it's not worth the complexity when messing up here leaks our secret share\n    //\n    // Despite this, on reboot, we'll get told of active signing items, and may be in this\n    // branch again for something we've already attempted\n    //\n    // Only run if this hasn't already been attempted\n    // TODO: This isn't complete as this txn may not be committed with the expected timing\n    if AttemptDb::get(txn, &id).is_some() {\n      warn!(\n        \"already attempted {} #{}. this is an error if we didn't reboot\",\n        hex::encode(id.id),\n        id.attempt\n      );\n      return None;\n    }\n    AttemptDb::set(txn, &id, &());\n\n    // Attempt to create the TX\n    let mut machines = vec![];\n    let mut preprocesses = vec![];\n    let mut serialized_preprocesses = vec![];\n    for keys in &self.keys {\n      let machine = match self.network.attempt_sign(keys.clone(), tx.clone()).await {\n        Err(e) => {\n          error!(\"failed to attempt {}, #{}: {:?}\", hex::encode(id.id), id.attempt, e);\n          return None;\n        }\n        Ok(machine) => machine,\n      };\n\n      let (machine, preprocess) = machine.preprocess(&mut OsRng);\n      machines.push(machine);\n      serialized_preprocesses.push(preprocess.serialize());\n      preprocesses.push(preprocess);\n    }\n\n    self.preprocessing.insert(id.id, (machines, preprocesses));\n\n    // Broadcast our preprocess\n    Some(ProcessorMessage::Preprocess { id, preprocesses: serialized_preprocesses })\n  }\n\n  #[must_use]\n  pub async fn sign_transaction(\n    &mut self,\n    txn: &mut D::Transaction<'_>,\n    id: [u8; 32],\n    tx: N::SignableTransaction,\n    eventuality: &N::Eventuality,\n  ) -> Option<ProcessorMessage> {\n    // The caller is expected to re-issue sign orders on reboot\n    // This is solely used by the rebroadcast task\n    ActiveSignsDb::add_active_sign(txn, &id);\n\n    if Self::already_completed(txn, id) {\n      return None;\n    }\n\n    EventualityDb::save_eventuality::<N>(txn, id, eventuality);\n\n    self.signable.insert(id, tx);\n    self.attempt(txn, id, 0).await\n  }\n\n  #[must_use]\n  pub async fn handle(\n    &mut self,\n    txn: &mut D::Transaction<'_>,\n    msg: CoordinatorMessage,\n  ) -> Option<ProcessorMessage> {\n    match msg {\n      CoordinatorMessage::Preprocesses { id, preprocesses } => {\n        if self.verify_id(&id).is_err() {\n          return None;\n        }\n\n        let (machines, our_preprocesses) = match self.preprocessing.remove(&id.id) {\n          // Either rebooted or RPC error, or some invariant\n          None => {\n            warn!(\n              \"not preprocessing for {}. this is an error if we didn't reboot\",\n              hex::encode(id.id)\n            );\n            return None;\n          }\n          Some(machine) => machine,\n        };\n\n        let mut parsed = HashMap::new();\n        for l in {\n          let mut keys = preprocesses.keys().copied().collect::<Vec<_>>();\n          keys.sort();\n          keys\n        } {\n          let mut preprocess_ref = preprocesses.get(&l).unwrap().as_slice();\n          let Ok(res) = machines[0].read_preprocess(&mut preprocess_ref) else {\n            return Some(ProcessorMessage::InvalidParticipant { id, participant: l });\n          };\n          if !preprocess_ref.is_empty() {\n            return Some(ProcessorMessage::InvalidParticipant { id, participant: l });\n          }\n          parsed.insert(l, res);\n        }\n        let preprocesses = parsed;\n\n        // Only keep a single machine as we only need one to get the signature\n        let mut signature_machine = None;\n        let mut shares = vec![];\n        let mut serialized_shares = vec![];\n        for (m, machine) in machines.into_iter().enumerate() {\n          let mut preprocesses = preprocesses.clone();\n          for (i, our_preprocess) in our_preprocesses.clone().into_iter().enumerate() {\n            if i != m {\n              assert!(preprocesses.insert(self.keys[i].params().i(), our_preprocess).is_none());\n            }\n          }\n\n          // Use an empty message, as expected of TransactionMachines\n          let (machine, share) = match machine.sign(preprocesses, &[]) {\n            Ok(res) => res,\n            Err(e) => match e {\n              FrostError::InternalError(_) |\n              FrostError::InvalidParticipant(_, _) |\n              FrostError::InvalidSigningSet(_) |\n              FrostError::InvalidParticipantQuantity(_, _) |\n              FrostError::DuplicatedParticipant(_) |\n              FrostError::MissingParticipant(_) => unreachable!(),\n\n              FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => {\n                return Some(ProcessorMessage::InvalidParticipant { id, participant: l })\n              }\n            },\n          };\n          if m == 0 {\n            signature_machine = Some(machine);\n          }\n          serialized_shares.push(share.serialize());\n          shares.push(share);\n        }\n        self.signing.insert(id.id, (signature_machine.unwrap(), shares));\n\n        // Broadcast our shares\n        Some(ProcessorMessage::Share { id, shares: serialized_shares })\n      }\n\n      CoordinatorMessage::Shares { id, shares } => {\n        if self.verify_id(&id).is_err() {\n          return None;\n        }\n\n        let (machine, our_shares) = match self.signing.remove(&id.id) {\n          // Rebooted, RPC error, or some invariant\n          None => {\n            // If preprocessing has this ID, it means we were never sent the preprocess by the\n            // coordinator\n            if self.preprocessing.contains_key(&id.id) {\n              panic!(\"never preprocessed yet signing?\");\n            }\n\n            warn!(\n              \"not preprocessing for {}. this is an error if we didn't reboot\",\n              hex::encode(id.id)\n            );\n            return None;\n          }\n          Some(machine) => machine,\n        };\n\n        let mut parsed = HashMap::new();\n        for l in {\n          let mut keys = shares.keys().copied().collect::<Vec<_>>();\n          keys.sort();\n          keys\n        } {\n          let mut share_ref = shares.get(&l).unwrap().as_slice();\n          let Ok(res) = machine.read_share(&mut share_ref) else {\n            return Some(ProcessorMessage::InvalidParticipant { id, participant: l });\n          };\n          if !share_ref.is_empty() {\n            return Some(ProcessorMessage::InvalidParticipant { id, participant: l });\n          }\n          parsed.insert(l, res);\n        }\n        let mut shares = parsed;\n\n        for (i, our_share) in our_shares.into_iter().enumerate().skip(1) {\n          assert!(shares.insert(self.keys[i].params().i(), our_share).is_none());\n        }\n\n        let completion = match machine.complete(shares) {\n          Ok(res) => res,\n          Err(e) => match e {\n            FrostError::InternalError(_) |\n            FrostError::InvalidParticipant(_, _) |\n            FrostError::InvalidSigningSet(_) |\n            FrostError::InvalidParticipantQuantity(_, _) |\n            FrostError::DuplicatedParticipant(_) |\n            FrostError::MissingParticipant(_) => unreachable!(),\n\n            FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => {\n              return Some(ProcessorMessage::InvalidParticipant { id, participant: l })\n            }\n          },\n        };\n\n        // Save the completion in case it's needed for recovery\n        CompletionsDb::complete::<N>(txn, id.id, &completion);\n\n        // Publish it\n        if let Err(e) = self.network.publish_completion(&completion).await {\n          error!(\"couldn't publish completion for plan {}: {:?}\", hex::encode(id.id), e);\n        } else {\n          info!(\"published completion for plan {}\", hex::encode(id.id));\n        }\n\n        // Stop trying to sign for this TX\n        Some(self.complete(id.id, &N::Eventuality::claim(&completion)))\n      }\n\n      CoordinatorMessage::Reattempt { id } => self.attempt(txn, id.id, id.attempt).await,\n\n      CoordinatorMessage::Completed { session: _, id, tx: mut claim_vec } => {\n        let mut claim = <N::Eventuality as Eventuality>::Claim::default();\n        if claim.as_ref().len() != claim_vec.len() {\n          let true_len = claim_vec.len();\n          claim_vec.truncate(2 * claim.as_ref().len());\n          warn!(\n            \"a validator claimed {}... (actual length {}) completed {} yet {}\",\n            hex::encode(&claim_vec),\n            true_len,\n            hex::encode(id),\n            \"that's not a valid Claim\",\n          );\n          return None;\n        }\n        claim.as_mut().copy_from_slice(&claim_vec);\n\n        self.claimed_eventuality_completion(txn, id, &claim).await\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "processor/src/slash_report_signer.rs",
    "content": "use core::fmt;\nuse std::collections::HashMap;\n\nuse rand_core::OsRng;\n\nuse frost::{\n  curve::Ristretto,\n  ThresholdKeys, FrostError,\n  algorithm::Algorithm,\n  sign::{\n    Writable, PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine,\n    AlgorithmSignMachine, AlgorithmSignatureMachine,\n  },\n};\nuse frost_schnorrkel::Schnorrkel;\n\nuse log::{info, warn};\n\nuse serai_client::{\n  primitives::ExternalNetworkId,\n  validator_sets::primitives::{report_slashes_message, ExternalValidatorSet, Session},\n  Public,\n};\n\nuse messages::coordinator::*;\nuse crate::{Get, DbTxn, create_db};\n\ncreate_db! {\n  SlashReportSignerDb {\n    Completed: (session: Session) -> (),\n    Attempt: (session: Session, attempt: u32) -> (),\n  }\n}\n\ntype Preprocess = <AlgorithmMachine<Ristretto, Schnorrkel> as PreprocessMachine>::Preprocess;\ntype SignatureShare = <AlgorithmSignMachine<Ristretto, Schnorrkel> as SignMachine<\n  <Schnorrkel as Algorithm<Ristretto>>::Signature,\n>>::SignatureShare;\n\npub struct SlashReportSigner {\n  network: ExternalNetworkId,\n  session: Session,\n  keys: Vec<ThresholdKeys<Ristretto>>,\n  report: Vec<([u8; 32], u32)>,\n\n  attempt: u32,\n  #[allow(clippy::type_complexity)]\n  preprocessing: Option<(Vec<AlgorithmSignMachine<Ristretto, Schnorrkel>>, Vec<Preprocess>)>,\n  #[allow(clippy::type_complexity)]\n  signing: Option<(AlgorithmSignatureMachine<Ristretto, Schnorrkel>, Vec<SignatureShare>)>,\n}\n\nimpl fmt::Debug for SlashReportSigner {\n  fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {\n    fmt\n      .debug_struct(\"SlashReportSigner\")\n      .field(\"session\", &self.session)\n      .field(\"report\", &self.report)\n      .field(\"attempt\", &self.attempt)\n      .field(\"preprocessing\", &self.preprocessing.is_some())\n      .field(\"signing\", &self.signing.is_some())\n      .finish_non_exhaustive()\n  }\n}\n\nimpl SlashReportSigner {\n  pub fn new(\n    txn: &mut impl DbTxn,\n    network: ExternalNetworkId,\n    session: Session,\n    keys: Vec<ThresholdKeys<Ristretto>>,\n    report: Vec<([u8; 32], u32)>,\n    attempt: u32,\n  ) -> Option<(SlashReportSigner, ProcessorMessage)> {\n    assert!(!keys.is_empty());\n\n    if Completed::get(txn, session).is_some() {\n      return None;\n    }\n\n    if Attempt::get(txn, session, attempt).is_some() {\n      warn!(\n        \"already attempted signing slash report for session {:?}, attempt #{}. {}\",\n        session, attempt, \"this is an error if we didn't reboot\",\n      );\n      return None;\n    }\n    Attempt::set(txn, session, attempt, &());\n\n    info!(\"signing slash report for session {:?} with attempt #{}\", session, attempt);\n\n    let mut machines = vec![];\n    let mut preprocesses = vec![];\n    let mut serialized_preprocesses = vec![];\n    for keys in &keys {\n      // b\"substrate\" is a literal from sp-core\n      let machine = AlgorithmMachine::new(Schnorrkel::new(b\"substrate\"), keys.clone());\n\n      let (machine, preprocess) = machine.preprocess(&mut OsRng);\n      machines.push(machine);\n      serialized_preprocesses.push(preprocess.serialize().try_into().unwrap());\n      preprocesses.push(preprocess);\n    }\n    let preprocessing = Some((machines, preprocesses));\n\n    let substrate_sign_id =\n      SubstrateSignId { session, id: SubstrateSignableId::SlashReport, attempt };\n\n    Some((\n      SlashReportSigner { network, session, keys, report, attempt, preprocessing, signing: None },\n      ProcessorMessage::SlashReportPreprocess {\n        id: substrate_sign_id,\n        preprocesses: serialized_preprocesses,\n      },\n    ))\n  }\n\n  #[must_use]\n  pub fn handle(\n    &mut self,\n    txn: &mut impl DbTxn,\n    msg: CoordinatorMessage,\n  ) -> Option<ProcessorMessage> {\n    match msg {\n      CoordinatorMessage::CosignSubstrateBlock { .. } => {\n        panic!(\"SlashReportSigner passed CosignSubstrateBlock\")\n      }\n\n      CoordinatorMessage::SignSlashReport { .. } => {\n        panic!(\"SlashReportSigner passed SignSlashReport\")\n      }\n\n      CoordinatorMessage::SubstratePreprocesses { id, preprocesses } => {\n        assert_eq!(id.session, self.session);\n        assert_eq!(id.id, SubstrateSignableId::SlashReport);\n        if id.attempt != self.attempt {\n          panic!(\"given preprocesses for a distinct attempt than SlashReportSigner is signing\")\n        }\n\n        let (machines, our_preprocesses) = match self.preprocessing.take() {\n          // Either rebooted or RPC error, or some invariant\n          None => {\n            warn!(\"not preprocessing. this is an error if we didn't reboot\");\n            return None;\n          }\n          Some(preprocess) => preprocess,\n        };\n\n        let mut parsed = HashMap::new();\n        for l in {\n          let mut keys = preprocesses.keys().copied().collect::<Vec<_>>();\n          keys.sort();\n          keys\n        } {\n          let mut preprocess_ref = preprocesses.get(&l).unwrap().as_slice();\n          let Ok(res) = machines[0].read_preprocess(&mut preprocess_ref) else {\n            return Some(ProcessorMessage::InvalidParticipant { id, participant: l });\n          };\n          if !preprocess_ref.is_empty() {\n            return Some(ProcessorMessage::InvalidParticipant { id, participant: l });\n          }\n          parsed.insert(l, res);\n        }\n        let preprocesses = parsed;\n\n        // Only keep a single machine as we only need one to get the signature\n        let mut signature_machine = None;\n        let mut shares = vec![];\n        let mut serialized_shares = vec![];\n        for (m, machine) in machines.into_iter().enumerate() {\n          let mut preprocesses = preprocesses.clone();\n          for (i, our_preprocess) in our_preprocesses.clone().into_iter().enumerate() {\n            if i != m {\n              assert!(preprocesses.insert(self.keys[i].params().i(), our_preprocess).is_none());\n            }\n          }\n\n          let (machine, share) = match machine.sign(\n            preprocesses,\n            &report_slashes_message(\n              &ExternalValidatorSet { network: self.network, session: self.session },\n              &self\n                .report\n                .clone()\n                .into_iter()\n                .map(|(validator, points)| (Public::from(validator), points))\n                .collect::<Vec<_>>(),\n            ),\n          ) {\n            Ok(res) => res,\n            Err(e) => match e {\n              FrostError::InternalError(_) |\n              FrostError::InvalidParticipant(_, _) |\n              FrostError::InvalidSigningSet(_) |\n              FrostError::InvalidParticipantQuantity(_, _) |\n              FrostError::DuplicatedParticipant(_) |\n              FrostError::MissingParticipant(_) => unreachable!(),\n\n              FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => {\n                return Some(ProcessorMessage::InvalidParticipant { id, participant: l })\n              }\n            },\n          };\n          if m == 0 {\n            signature_machine = Some(machine);\n          }\n\n          let mut share_bytes = [0; 32];\n          share_bytes.copy_from_slice(&share.serialize());\n          serialized_shares.push(share_bytes);\n\n          shares.push(share);\n        }\n        self.signing = Some((signature_machine.unwrap(), shares));\n\n        // Broadcast our shares\n        Some(ProcessorMessage::SubstrateShare { id, shares: serialized_shares })\n      }\n\n      CoordinatorMessage::SubstrateShares { id, shares } => {\n        assert_eq!(id.session, self.session);\n        assert_eq!(id.id, SubstrateSignableId::SlashReport);\n        if id.attempt != self.attempt {\n          panic!(\"given preprocesses for a distinct attempt than SlashReportSigner is signing\")\n        }\n\n        let (machine, our_shares) = match self.signing.take() {\n          // Rebooted, RPC error, or some invariant\n          None => {\n            // If preprocessing has this ID, it means we were never sent the preprocess by the\n            // coordinator\n            if self.preprocessing.is_some() {\n              panic!(\"never preprocessed yet signing?\");\n            }\n\n            warn!(\"not preprocessing. this is an error if we didn't reboot\");\n            return None;\n          }\n          Some(signing) => signing,\n        };\n\n        let mut parsed = HashMap::new();\n        for l in {\n          let mut keys = shares.keys().copied().collect::<Vec<_>>();\n          keys.sort();\n          keys\n        } {\n          let mut share_ref = shares.get(&l).unwrap().as_slice();\n          let Ok(res) = machine.read_share(&mut share_ref) else {\n            return Some(ProcessorMessage::InvalidParticipant { id, participant: l });\n          };\n          if !share_ref.is_empty() {\n            return Some(ProcessorMessage::InvalidParticipant { id, participant: l });\n          }\n          parsed.insert(l, res);\n        }\n        let mut shares = parsed;\n\n        for (i, our_share) in our_shares.into_iter().enumerate().skip(1) {\n          assert!(shares.insert(self.keys[i].params().i(), our_share).is_none());\n        }\n\n        let sig = match machine.complete(shares) {\n          Ok(res) => res,\n          Err(e) => match e {\n            FrostError::InternalError(_) |\n            FrostError::InvalidParticipant(_, _) |\n            FrostError::InvalidSigningSet(_) |\n            FrostError::InvalidParticipantQuantity(_, _) |\n            FrostError::DuplicatedParticipant(_) |\n            FrostError::MissingParticipant(_) => unreachable!(),\n\n            FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => {\n              return Some(ProcessorMessage::InvalidParticipant { id, participant: l })\n            }\n          },\n        };\n\n        info!(\"signed slash report for session {:?} with attempt #{}\", self.session, id.attempt);\n\n        Completed::set(txn, self.session, &());\n\n        Some(ProcessorMessage::SignedSlashReport {\n          session: self.session,\n          signature: sig.to_bytes().to_vec(),\n        })\n      }\n      CoordinatorMessage::BatchReattempt { .. } => {\n        panic!(\"BatchReattempt passed to SlashReportSigner\")\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "processor/src/tests/addresses.rs",
    "content": "use core::{time::Duration, pin::Pin, future::Future};\nuse std::collections::HashMap;\n\nuse rand_core::OsRng;\n\nuse frost::{Participant, ThresholdKeys};\n\nuse tokio::time::timeout;\n\nuse serai_client::validator_sets::primitives::Session;\n\nuse serai_db::{DbTxn, MemDb};\n\nuse crate::{\n  Plan, Db,\n  networks::{OutputType, Output, Block, UtxoNetwork},\n  multisigs::{\n    scheduler::Scheduler,\n    scanner::{ScannerEvent, Scanner, ScannerHandle},\n  },\n  tests::sign,\n};\n\nasync fn spend<N: UtxoNetwork, D: Db>(\n  db: &mut D,\n  network: &N,\n  keys: &HashMap<Participant, ThresholdKeys<N::Curve>>,\n  scanner: &mut ScannerHandle<N, D>,\n  outputs: Vec<N::Output>,\n) where\n  <N::Scheduler as Scheduler<N>>::Addendum: From<()>,\n{\n  let key = keys[&Participant::new(1).unwrap()].group_key();\n\n  let mut keys_txs = HashMap::new();\n  for (i, keys) in keys {\n    keys_txs.insert(\n      *i,\n      (\n        keys.clone(),\n        network\n          .prepare_send(\n            network.get_latest_block_number().await.unwrap() - N::CONFIRMATIONS,\n            // Send to a change output\n            Plan {\n              key,\n              inputs: outputs.clone(),\n              payments: vec![],\n              change: Some(N::change_address(key).unwrap()),\n              scheduler_addendum: ().into(),\n            },\n            0,\n          )\n          .await\n          .unwrap()\n          .tx\n          .unwrap(),\n      ),\n    );\n  }\n  sign(network.clone(), Session(0), keys_txs).await;\n\n  for _ in 0 .. N::CONFIRMATIONS {\n    network.mine_block().await;\n  }\n  match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() {\n    ScannerEvent::Block { is_retirement_block, block, outputs } => {\n      scanner.multisig_completed.send(false).unwrap();\n      assert!(!is_retirement_block);\n      assert_eq!(outputs.len(), 1);\n      // Make sure this is actually a change output\n      assert_eq!(outputs[0].kind(), OutputType::Change);\n      assert_eq!(outputs[0].key(), key);\n      let mut txn = db.txn();\n      assert_eq!(scanner.ack_block(&mut txn, block).await.1, outputs);\n      scanner.release_lock().await;\n      txn.commit();\n    }\n    ScannerEvent::Completed(_, _, _, _, _) => {\n      panic!(\"unexpectedly got eventuality completion\");\n    }\n  }\n}\n\npub async fn test_addresses<N: UtxoNetwork>(\n  new_network: impl Fn(MemDb) -> Pin<Box<dyn Send + Future<Output = N>>>,\n) where\n  <N::Scheduler as Scheduler<N>>::Addendum: From<()>,\n{\n  let mut keys = frost::tests::key_gen::<_, N::Curve>(&mut OsRng);\n  for keys in keys.values_mut() {\n    N::tweak_keys(keys);\n  }\n  let key = keys[&Participant::new(1).unwrap()].group_key();\n\n  let mut db = MemDb::new();\n  let network = new_network(db.clone()).await;\n\n  // Mine blocks so there's a confirmed block\n  for _ in 0 .. N::CONFIRMATIONS {\n    network.mine_block().await;\n  }\n\n  let (mut scanner, current_keys) = Scanner::new(network.clone(), db.clone());\n  assert!(current_keys.is_empty());\n  let mut txn = db.txn();\n  scanner.register_key(&mut txn, network.get_latest_block_number().await.unwrap(), key).await;\n  txn.commit();\n  for _ in 0 .. N::CONFIRMATIONS {\n    network.mine_block().await;\n  }\n\n  // Receive funds to the various addresses and make sure they're properly identified\n  let mut received_outputs = vec![];\n  for (kind, address) in [\n    (OutputType::External, N::external_address(&network, key).await),\n    (OutputType::Branch, N::branch_address(key).unwrap()),\n    (OutputType::Change, N::change_address(key).unwrap()),\n    (OutputType::Forwarded, N::forward_address(key).unwrap()),\n  ] {\n    let block_id = network.test_send(address).await.id();\n\n    // Verify the Scanner picked them up\n    match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() {\n      ScannerEvent::Block { is_retirement_block, block, outputs } => {\n        scanner.multisig_completed.send(false).unwrap();\n        assert!(!is_retirement_block);\n        assert_eq!(block, block_id);\n        assert_eq!(outputs.len(), 1);\n        assert_eq!(outputs[0].kind(), kind);\n        assert_eq!(outputs[0].key(), key);\n        let mut txn = db.txn();\n        assert_eq!(scanner.ack_block(&mut txn, block).await.1, outputs);\n        scanner.release_lock().await;\n        txn.commit();\n        received_outputs.extend(outputs);\n      }\n      ScannerEvent::Completed(_, _, _, _, _) => {\n        panic!(\"unexpectedly got eventuality completion\");\n      }\n    };\n  }\n\n  // Spend the branch output, creating a change output and ensuring we actually get change\n  spend(&mut db, &network, &keys, &mut scanner, received_outputs).await;\n}\n"
  },
  {
    "path": "processor/src/tests/batch_signer.rs",
    "content": "use std::collections::HashMap;\n\nuse rand_core::{RngCore, OsRng};\n\nuse ciphersuite::group::GroupEncoding;\nuse frost::{\n  curve::Ristretto,\n  Participant,\n  tests::{key_gen, clone_without},\n};\n\nuse sp_application_crypto::{RuntimePublic, sr25519::Public};\n\nuse serai_db::{DbTxn, Db, MemDb};\n\n#[rustfmt::skip]\nuse serai_client::{primitives::*, in_instructions::primitives::*, validator_sets::primitives::Session};\n\nuse messages::{\n  substrate,\n  coordinator::{self, SubstrateSignableId, SubstrateSignId, CoordinatorMessage},\n  ProcessorMessage,\n};\nuse crate::batch_signer::BatchSigner;\n\n#[test]\nfn test_batch_signer() {\n  let keys = key_gen::<_, Ristretto>(&mut OsRng);\n\n  let participant_one = Participant::new(1).unwrap();\n\n  let id: u32 = 5;\n  let block = BlockHash([0xaa; 32]);\n\n  let batch = Batch {\n    network: ExternalNetworkId::Monero,\n    id,\n    block,\n    instructions: vec![\n      InInstructionWithBalance {\n        instruction: InInstruction::Transfer(SeraiAddress([0xbb; 32])),\n        balance: ExternalBalance { coin: ExternalCoin::Bitcoin, amount: Amount(1000) },\n      },\n      InInstructionWithBalance {\n        instruction: InInstruction::Dex(DexCall::SwapAndAddLiquidity(SeraiAddress([0xbb; 32]))),\n        balance: ExternalBalance { coin: ExternalCoin::Monero, amount: Amount(9999999999999999) },\n      },\n    ],\n  };\n\n  let actual_id =\n    SubstrateSignId { session: Session(0), id: SubstrateSignableId::Batch(batch.id), attempt: 0 };\n\n  let mut signing_set = vec![];\n  while signing_set.len() < usize::from(keys.values().next().unwrap().params().t()) {\n    let candidate = Participant::new(\n      u16::try_from((OsRng.next_u64() % u64::try_from(keys.len()).unwrap()) + 1).unwrap(),\n    )\n    .unwrap();\n    if signing_set.contains(&candidate) {\n      continue;\n    }\n    signing_set.push(candidate);\n  }\n\n  let mut signers = HashMap::new();\n  let mut dbs = HashMap::new();\n  let mut preprocesses = HashMap::new();\n  for i in 1 ..= keys.len() {\n    let i = Participant::new(u16::try_from(i).unwrap()).unwrap();\n    let keys = keys.get(&i).unwrap().clone();\n\n    let mut signer = BatchSigner::<MemDb>::new(ExternalNetworkId::Monero, Session(0), vec![keys]);\n    let mut db = MemDb::new();\n\n    let mut txn = db.txn();\n    match signer.sign(&mut txn, batch.clone()).unwrap() {\n      // All participants should emit a preprocess\n      coordinator::ProcessorMessage::BatchPreprocess {\n        id,\n        block: batch_block,\n        preprocesses: mut these_preprocesses,\n      } => {\n        assert_eq!(id, actual_id);\n        assert_eq!(batch_block, block);\n        assert_eq!(these_preprocesses.len(), 1);\n        if signing_set.contains(&i) {\n          preprocesses.insert(i, these_preprocesses.swap_remove(0));\n        }\n      }\n      _ => panic!(\"didn't get preprocess back\"),\n    }\n    txn.commit();\n\n    signers.insert(i, signer);\n    dbs.insert(i, db);\n  }\n\n  let mut shares = HashMap::new();\n  for i in &signing_set {\n    let mut txn = dbs.get_mut(i).unwrap().txn();\n    match signers\n      .get_mut(i)\n      .unwrap()\n      .handle(\n        &mut txn,\n        CoordinatorMessage::SubstratePreprocesses {\n          id: actual_id.clone(),\n          preprocesses: clone_without(&preprocesses, i),\n        },\n      )\n      .unwrap()\n    {\n      ProcessorMessage::Coordinator(coordinator::ProcessorMessage::SubstrateShare {\n        id,\n        shares: mut these_shares,\n      }) => {\n        assert_eq!(id, actual_id);\n        assert_eq!(these_shares.len(), 1);\n        shares.insert(*i, these_shares.swap_remove(0));\n      }\n      _ => panic!(\"didn't get share back\"),\n    }\n    txn.commit();\n  }\n\n  for i in &signing_set {\n    let mut txn = dbs.get_mut(i).unwrap().txn();\n    match signers\n      .get_mut(i)\n      .unwrap()\n      .handle(\n        &mut txn,\n        CoordinatorMessage::SubstrateShares {\n          id: actual_id.clone(),\n          shares: clone_without(&shares, i),\n        },\n      )\n      .unwrap()\n    {\n      ProcessorMessage::Substrate(substrate::ProcessorMessage::SignedBatch {\n        batch: signed_batch,\n      }) => {\n        assert_eq!(signed_batch.batch, batch);\n        assert!(Public::from_raw(keys[&participant_one].group_key().to_bytes())\n          .verify(&batch_message(&batch), &signed_batch.signature));\n      }\n      _ => panic!(\"didn't get signed batch back\"),\n    }\n    txn.commit();\n  }\n}\n"
  },
  {
    "path": "processor/src/tests/cosigner.rs",
    "content": "use std::collections::HashMap;\n\nuse rand_core::{RngCore, OsRng};\n\nuse ciphersuite::group::GroupEncoding;\nuse frost::{\n  curve::Ristretto,\n  Participant,\n  tests::{key_gen, clone_without},\n};\n\nuse sp_application_crypto::{RuntimePublic, sr25519::Public};\n\nuse serai_db::{DbTxn, Db, MemDb};\n\nuse serai_client::{primitives::*, validator_sets::primitives::Session};\n\nuse messages::coordinator::*;\nuse crate::cosigner::Cosigner;\n\n#[test]\nfn test_cosigner() {\n  let keys = key_gen::<_, Ristretto>(&mut OsRng);\n\n  let participant_one = Participant::new(1).unwrap();\n\n  let block_number = OsRng.next_u64();\n  let block = [0xaa; 32];\n\n  let actual_id = SubstrateSignId {\n    session: Session(0),\n    id: SubstrateSignableId::CosigningSubstrateBlock(block),\n    attempt: (OsRng.next_u64() >> 32).try_into().unwrap(),\n  };\n\n  let mut signing_set = vec![];\n  while signing_set.len() < usize::from(keys.values().next().unwrap().params().t()) {\n    let candidate = Participant::new(\n      u16::try_from((OsRng.next_u64() % u64::try_from(keys.len()).unwrap()) + 1).unwrap(),\n    )\n    .unwrap();\n    if signing_set.contains(&candidate) {\n      continue;\n    }\n    signing_set.push(candidate);\n  }\n\n  let mut signers = HashMap::new();\n  let mut dbs = HashMap::new();\n  let mut preprocesses = HashMap::new();\n  for i in 1 ..= keys.len() {\n    let i = Participant::new(u16::try_from(i).unwrap()).unwrap();\n    let keys = keys.get(&i).unwrap().clone();\n\n    let mut db = MemDb::new();\n    let mut txn = db.txn();\n    let (signer, preprocess) =\n      Cosigner::new(&mut txn, Session(0), vec![keys], block_number, block, actual_id.attempt)\n        .unwrap();\n\n    match preprocess {\n      // All participants should emit a preprocess\n      ProcessorMessage::CosignPreprocess { id, preprocesses: mut these_preprocesses } => {\n        assert_eq!(id, actual_id);\n        assert_eq!(these_preprocesses.len(), 1);\n        if signing_set.contains(&i) {\n          preprocesses.insert(i, these_preprocesses.swap_remove(0));\n        }\n      }\n      _ => panic!(\"didn't get preprocess back\"),\n    }\n    txn.commit();\n\n    signers.insert(i, signer);\n    dbs.insert(i, db);\n  }\n\n  let mut shares = HashMap::new();\n  for i in &signing_set {\n    let mut txn = dbs.get_mut(i).unwrap().txn();\n    match signers\n      .get_mut(i)\n      .unwrap()\n      .handle(\n        &mut txn,\n        CoordinatorMessage::SubstratePreprocesses {\n          id: actual_id.clone(),\n          preprocesses: clone_without(&preprocesses, i),\n        },\n      )\n      .unwrap()\n    {\n      ProcessorMessage::SubstrateShare { id, shares: mut these_shares } => {\n        assert_eq!(id, actual_id);\n        assert_eq!(these_shares.len(), 1);\n        shares.insert(*i, these_shares.swap_remove(0));\n      }\n      _ => panic!(\"didn't get share back\"),\n    }\n    txn.commit();\n  }\n\n  for i in &signing_set {\n    let mut txn = dbs.get_mut(i).unwrap().txn();\n    match signers\n      .get_mut(i)\n      .unwrap()\n      .handle(\n        &mut txn,\n        CoordinatorMessage::SubstrateShares {\n          id: actual_id.clone(),\n          shares: clone_without(&shares, i),\n        },\n      )\n      .unwrap()\n    {\n      ProcessorMessage::CosignedBlock { block_number, block: signed_block, signature } => {\n        assert_eq!(signed_block, block);\n        assert!(Public::from_raw(keys[&participant_one].group_key().to_bytes()).verify(\n          &cosign_block_msg(block_number, block),\n          &Signature::from(<[u8; 64]>::try_from(signature).unwrap())\n        ));\n      }\n      _ => panic!(\"didn't get cosigned block back\"),\n    }\n    txn.commit();\n  }\n}\n"
  },
  {
    "path": "processor/src/tests/key_gen.rs",
    "content": "use std::collections::HashMap;\n\nuse zeroize::Zeroizing;\n\nuse rand_core::{RngCore, OsRng};\n\nuse ciphersuite::group::GroupEncoding;\nuse frost::{Participant, ThresholdParams, tests::clone_without};\n\nuse serai_db::{DbTxn, Db, MemDb};\n\nuse sp_application_crypto::sr25519;\nuse serai_client::validator_sets::primitives::{Session, KeyPair};\n\nuse messages::key_gen::*;\nuse crate::{\n  networks::Network,\n  key_gen::{KeyConfirmed, KeyGen},\n};\n\nconst ID: KeyGenId = KeyGenId { session: Session(1), attempt: 3 };\n\npub fn test_key_gen<N: Network>() {\n  let mut entropies = HashMap::new();\n  let mut dbs = HashMap::new();\n  let mut key_gens = HashMap::new();\n  for i in 1 ..= 5 {\n    let mut entropy = Zeroizing::new([0; 32]);\n    OsRng.fill_bytes(entropy.as_mut());\n    entropies.insert(i, entropy);\n    let db = MemDb::new();\n    dbs.insert(i, db.clone());\n    key_gens.insert(i, KeyGen::<N, MemDb>::new(db, entropies[&i].clone()));\n  }\n\n  let mut all_commitments = HashMap::new();\n  for i in 1 ..= 5 {\n    let key_gen = key_gens.get_mut(&i).unwrap();\n    let mut txn = dbs.get_mut(&i).unwrap().txn();\n    if let ProcessorMessage::Commitments { id, mut commitments } = key_gen.handle(\n      &mut txn,\n      CoordinatorMessage::GenerateKey {\n        id: ID,\n        params: ThresholdParams::new(3, 5, Participant::new(u16::try_from(i).unwrap()).unwrap())\n          .unwrap(),\n        shares: 1,\n      },\n    ) {\n      assert_eq!(id, ID);\n      assert_eq!(commitments.len(), 1);\n      all_commitments\n        .insert(Participant::new(u16::try_from(i).unwrap()).unwrap(), commitments.swap_remove(0));\n    } else {\n      panic!(\"didn't get commitments back\");\n    }\n    txn.commit();\n  }\n\n  // 1 is rebuilt on every step\n  // 2 is rebuilt here\n  // 3 ... are rebuilt once, one at each of the following steps\n  let rebuild = |key_gens: &mut HashMap<_, _>, dbs: &HashMap<_, MemDb>, i| {\n    key_gens.remove(&i);\n    key_gens.insert(i, KeyGen::<N, _>::new(dbs[&i].clone(), entropies[&i].clone()));\n  };\n  rebuild(&mut key_gens, &dbs, 1);\n  rebuild(&mut key_gens, &dbs, 2);\n\n  let mut all_shares = HashMap::new();\n  for i in 1 ..= 5 {\n    let key_gen = key_gens.get_mut(&i).unwrap();\n    let mut txn = dbs.get_mut(&i).unwrap().txn();\n    let i = Participant::new(u16::try_from(i).unwrap()).unwrap();\n    if let ProcessorMessage::Shares { id, mut shares } = key_gen.handle(\n      &mut txn,\n      CoordinatorMessage::Commitments { id: ID, commitments: clone_without(&all_commitments, &i) },\n    ) {\n      assert_eq!(id, ID);\n      assert_eq!(shares.len(), 1);\n      all_shares.insert(i, shares.swap_remove(0));\n    } else {\n      panic!(\"didn't get shares back\");\n    }\n    txn.commit();\n  }\n\n  // Rebuild 1 and 3\n  rebuild(&mut key_gens, &dbs, 1);\n  rebuild(&mut key_gens, &dbs, 3);\n\n  let mut res = None;\n  for i in 1 ..= 5 {\n    let key_gen = key_gens.get_mut(&i).unwrap();\n    let mut txn = dbs.get_mut(&i).unwrap().txn();\n    let i = Participant::new(u16::try_from(i).unwrap()).unwrap();\n    if let ProcessorMessage::GeneratedKeyPair { id, substrate_key, network_key } = key_gen.handle(\n      &mut txn,\n      CoordinatorMessage::Shares {\n        id: ID,\n        shares: vec![all_shares\n          .iter()\n          .filter_map(|(l, shares)| if i == *l { None } else { Some((*l, shares[&i].clone())) })\n          .collect()],\n      },\n    ) {\n      assert_eq!(id, ID);\n      if res.is_none() {\n        res = Some((substrate_key, network_key.clone()));\n      }\n      assert_eq!(res.as_ref().unwrap(), &(substrate_key, network_key));\n    } else {\n      panic!(\"didn't get key back\");\n    }\n    txn.commit();\n  }\n  let res = res.unwrap();\n\n  // Rebuild 1 and 4\n  rebuild(&mut key_gens, &dbs, 1);\n  rebuild(&mut key_gens, &dbs, 4);\n\n  for i in 1 ..= 5 {\n    let key_gen = key_gens.get_mut(&i).unwrap();\n    let mut txn = dbs.get_mut(&i).unwrap().txn();\n    let KeyConfirmed { mut substrate_keys, mut network_keys } = key_gen.confirm(\n      &mut txn,\n      ID.session,\n      &KeyPair(sr25519::Public::from(res.0), res.1.clone().try_into().unwrap()),\n    );\n    txn.commit();\n\n    assert_eq!(substrate_keys.len(), 1);\n    let substrate_keys = substrate_keys.swap_remove(0);\n    assert_eq!(network_keys.len(), 1);\n    let network_keys = network_keys.swap_remove(0);\n\n    let params =\n      ThresholdParams::new(3, 5, Participant::new(u16::try_from(i).unwrap()).unwrap()).unwrap();\n    assert_eq!(substrate_keys.params(), params);\n    assert_eq!(network_keys.params(), params);\n    assert_eq!(\n      (\n        substrate_keys.group_key().to_bytes(),\n        network_keys.group_key().to_bytes().as_ref().to_vec()\n      ),\n      res\n    );\n  }\n}\n"
  },
  {
    "path": "processor/src/tests/literal/mod.rs",
    "content": "use dockertest::{\n  PullPolicy, StartPolicy, LogOptions, LogAction, LogPolicy, LogSource, Image,\n  TestBodySpecification, DockerOperations, DockerTest,\n};\n\nuse serai_db::MemDb;\n\n#[cfg(feature = \"bitcoin\")]\nmod bitcoin {\n  use std::sync::Arc;\n\n  use rand_core::OsRng;\n\n  use frost::Participant;\n\n  use bitcoin_serai::bitcoin::{\n    secp256k1::{SECP256K1, SecretKey, Message},\n    PrivateKey, PublicKey,\n    hashes::{HashEngine, Hash, sha256::Hash as Sha256},\n    sighash::{SighashCache, EcdsaSighashType},\n    absolute::LockTime,\n    Amount as BAmount, Sequence, Script, Witness, OutPoint,\n    address::Address as BAddress,\n    transaction::{Version, Transaction, TxIn, TxOut},\n    Network as BNetwork, ScriptBuf,\n    opcodes::all::{OP_SHA256, OP_EQUALVERIFY},\n  };\n\n  use scale::Encode;\n  use sp_application_crypto::Pair;\n  use serai_client::{in_instructions::primitives::Shorthand, primitives::insecure_pair_from_name};\n\n  use tokio::{\n    time::{timeout, Duration},\n    sync::Mutex,\n  };\n\n  use super::*;\n  use crate::{\n    networks::{Network, Bitcoin, Output, OutputType, Block},\n    tests::scanner::new_scanner,\n    multisigs::scanner::ScannerEvent,\n  };\n\n  #[test]\n  fn test_dust_constant() {\n    struct IsTrue<const V: bool>;\n    trait True {}\n    impl True for IsTrue<true> {}\n    fn check<T: True>() {\n      core::hint::black_box(());\n    }\n    check::<IsTrue<{ Bitcoin::DUST >= bitcoin_serai::wallet::DUST }>>();\n  }\n\n  #[test]\n  fn test_receive_data_from_input() {\n    let docker = spawn_bitcoin();\n    docker.run(|ops| async move {\n      let btc = bitcoin(&ops).await(MemDb::new()).await;\n\n      // generate a multisig address to receive the coins\n      let mut keys = frost::tests::key_gen::<_, <Bitcoin as Network>::Curve>(&mut OsRng)\n        .remove(&Participant::new(1).unwrap())\n        .unwrap();\n      <Bitcoin as Network>::tweak_keys(&mut keys);\n      let group_key = keys.group_key();\n      let serai_btc_address = <Bitcoin as Network>::external_address(&btc, group_key).await;\n\n      // btc key pair to send from\n      let private_key = PrivateKey::new(SecretKey::new(&mut rand_core::OsRng), BNetwork::Regtest);\n      let public_key = PublicKey::from_private_key(SECP256K1, &private_key);\n      let main_addr = BAddress::p2pkh(public_key, BNetwork::Regtest);\n\n      // get unlocked coins\n      let new_block = btc.get_latest_block_number().await.unwrap() + 1;\n      btc\n        .rpc\n        .rpc_call::<Vec<String>>(\"generatetoaddress\", serde_json::json!([100, main_addr]))\n        .await\n        .unwrap();\n\n      // create a scanner\n      let db = MemDb::new();\n      let mut scanner = new_scanner(&btc, &db, group_key, &Arc::new(Mutex::new(true))).await;\n\n      // make a transfer instruction & hash it for script.\n      let serai_address = insecure_pair_from_name(\"alice\").public();\n      let message = Shorthand::transfer(None, serai_address.into()).encode();\n      let mut data = Sha256::engine();\n      data.input(&message);\n\n      // make the output script => msg_script(OP_SHA256 PUSH MSG_HASH OP_EQUALVERIFY) + any_script\n      let mut script = ScriptBuf::builder()\n        .push_opcode(OP_SHA256)\n        .push_slice(Sha256::from_engine(data).as_byte_array())\n        .push_opcode(OP_EQUALVERIFY)\n        .into_script();\n      // append a regular spend script\n      for i in main_addr.script_pubkey().instructions() {\n        script.push_instruction(i.unwrap());\n      }\n\n      // Create the first transaction\n      let tx = btc.get_block(new_block).await.unwrap().txdata.swap_remove(0);\n      let mut tx = Transaction {\n        version: Version(2),\n        lock_time: LockTime::ZERO,\n        input: vec![TxIn {\n          previous_output: OutPoint { txid: tx.compute_txid(), vout: 0 },\n          script_sig: Script::new().into(),\n          sequence: Sequence(u32::MAX),\n          witness: Witness::default(),\n        }],\n        output: vec![TxOut {\n          value: tx.output[0].value - BAmount::from_sat(10000),\n          script_pubkey: ScriptBuf::new_p2wsh(&script.wscript_hash()),\n        }],\n      };\n      tx.input[0].script_sig = Bitcoin::sign_btc_input_for_p2pkh(&tx, 0, &private_key);\n      let initial_output_value = tx.output[0].value;\n\n      // send it\n      btc.rpc.send_raw_transaction(&tx).await.unwrap();\n\n      // Chain a transaction spending it with the InInstruction embedded in the input\n      let mut tx = Transaction {\n        version: Version(2),\n        lock_time: LockTime::ZERO,\n        input: vec![TxIn {\n          previous_output: OutPoint { txid: tx.compute_txid(), vout: 0 },\n          script_sig: Script::new().into(),\n          sequence: Sequence(u32::MAX),\n          witness: Witness::new(),\n        }],\n        output: vec![TxOut {\n          value: tx.output[0].value - BAmount::from_sat(10000),\n          script_pubkey: serai_btc_address.into(),\n        }],\n      };\n\n      // add the witness script\n      // This is the standard script with an extra argument of the InInstruction\n      let mut sig = SECP256K1\n        .sign_ecdsa_low_r(\n          &Message::from_digest_slice(\n            SighashCache::new(&tx)\n              .p2wsh_signature_hash(0, &script, initial_output_value, EcdsaSighashType::All)\n              .unwrap()\n              .to_raw_hash()\n              .as_ref(),\n          )\n          .unwrap(),\n          &private_key.inner,\n        )\n        .serialize_der()\n        .to_vec();\n      sig.push(1);\n      tx.input[0].witness.push(sig);\n      tx.input[0].witness.push(public_key.inner.serialize());\n      tx.input[0].witness.push(message.clone());\n      tx.input[0].witness.push(script);\n\n      // Send it immediately, as Bitcoin allows mempool chaining\n      btc.rpc.send_raw_transaction(&tx).await.unwrap();\n\n      // Mine enough confirmations\n      let block_number = btc.get_latest_block_number().await.unwrap() + 1;\n      for _ in 0 .. <Bitcoin as Network>::CONFIRMATIONS {\n        btc.mine_block().await;\n      }\n      let tx_block = btc.get_block(block_number).await.unwrap();\n\n      // verify that scanner picked up the output\n      let outputs =\n        match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() {\n          ScannerEvent::Block { is_retirement_block, block, outputs } => {\n            scanner.multisig_completed.send(false).unwrap();\n            assert!(!is_retirement_block);\n            assert_eq!(block, tx_block.id());\n            assert_eq!(outputs.len(), 1);\n            assert_eq!(outputs[0].kind(), OutputType::External);\n            outputs\n          }\n          _ => panic!(\"unexpectedly got eventuality completion\"),\n        };\n\n      // verify that the amount and message are correct\n      assert_eq!(outputs[0].balance().amount.0, tx.output[0].value.to_sat());\n      assert_eq!(outputs[0].data(), message);\n    });\n  }\n\n  fn spawn_bitcoin() -> DockerTest {\n    serai_docker_tests::build(\"bitcoin\".to_string());\n\n    let composition = TestBodySpecification::with_image(\n      Image::with_repository(\"serai-dev-bitcoin\").pull_policy(PullPolicy::Never),\n    )\n    .set_start_policy(StartPolicy::Strict)\n    .set_log_options(Some(LogOptions {\n      action: LogAction::Forward,\n      policy: LogPolicy::OnError,\n      source: LogSource::Both,\n    }))\n    .set_publish_all_ports(true);\n\n    let mut test = DockerTest::new().with_network(dockertest::Network::Isolated);\n    test.provide_container(composition);\n    test\n  }\n\n  async fn bitcoin(\n    ops: &DockerOperations,\n  ) -> impl Fn(MemDb) -> Pin<Box<dyn Send + Future<Output = Bitcoin>>> {\n    let handle = ops.handle(\"serai-dev-bitcoin\").host_port(8332).unwrap();\n    let url = format!(\"http://serai:seraidex@{}:{}\", handle.0, handle.1);\n    let bitcoin = Bitcoin::new(url.clone()).await;\n    bitcoin.fresh_chain().await;\n    move |_db| Box::pin(Bitcoin::new(url.clone()))\n  }\n\n  test_utxo_network!(\n    Bitcoin,\n    spawn_bitcoin,\n    bitcoin,\n    bitcoin_key_gen,\n    bitcoin_scanner,\n    bitcoin_no_deadlock_in_multisig_completed,\n    bitcoin_signer,\n    bitcoin_wallet,\n    bitcoin_addresses,\n  );\n}\n\n#[cfg(feature = \"monero\")]\nmod monero {\n  use super::*;\n  use crate::networks::{Network, Monero};\n\n  fn spawn_monero() -> DockerTest {\n    serai_docker_tests::build(\"monero\".to_string());\n\n    let composition = TestBodySpecification::with_image(\n      Image::with_repository(\"serai-dev-monero\").pull_policy(PullPolicy::Never),\n    )\n    .set_start_policy(StartPolicy::Strict)\n    .set_log_options(Some(LogOptions {\n      action: LogAction::Forward,\n      policy: LogPolicy::OnError,\n      source: LogSource::Both,\n    }))\n    .set_publish_all_ports(true);\n\n    let mut test = DockerTest::new();\n    test.provide_container(composition);\n    test\n  }\n\n  async fn monero(\n    ops: &DockerOperations,\n  ) -> impl Fn(MemDb) -> Pin<Box<dyn Send + Future<Output = Monero>>> {\n    let handle = ops.handle(\"serai-dev-monero\").host_port(18081).unwrap();\n    let url = format!(\"http://serai:seraidex@{}:{}\", handle.0, handle.1);\n    let monero = Monero::new(url.clone()).await;\n    while monero.get_latest_block_number().await.unwrap() < 150 {\n      monero.mine_block().await;\n    }\n    move |_db| Box::pin(Monero::new(url.clone()))\n  }\n\n  test_utxo_network!(\n    Monero,\n    spawn_monero,\n    monero,\n    monero_key_gen,\n    monero_scanner,\n    monero_no_deadlock_in_multisig_completed,\n    monero_signer,\n    monero_wallet,\n    monero_addresses,\n  );\n}\n\n#[cfg(feature = \"ethereum\")]\nmod ethereum {\n  use super::*;\n\n  use ciphersuite::Ciphersuite;\n  use ciphersuite_kp256::Secp256k1;\n\n  use serai_client::validator_sets::primitives::Session;\n\n  use crate::networks::Ethereum;\n\n  fn spawn_ethereum() -> DockerTest {\n    serai_docker_tests::build(\"ethereum\".to_string());\n\n    let composition = TestBodySpecification::with_image(\n      Image::with_repository(\"serai-dev-ethereum\").pull_policy(PullPolicy::Never),\n    )\n    .set_start_policy(StartPolicy::Strict)\n    .set_log_options(Some(LogOptions {\n      action: LogAction::Forward,\n      policy: LogPolicy::OnError,\n      source: LogSource::Both,\n    }))\n    .set_publish_all_ports(true);\n\n    let mut test = DockerTest::new();\n    test.provide_container(composition);\n    test\n  }\n\n  async fn ethereum(\n    ops: &DockerOperations,\n  ) -> impl Fn(MemDb) -> Pin<Box<dyn Send + Future<Output = Ethereum<MemDb>>>> {\n    use std::sync::Arc;\n    use ethereum_serai::{\n      alloy::{\n        primitives::U256,\n        simple_request_transport::SimpleRequest,\n        rpc_client::ClientBuilder,\n        provider::{Provider, RootProvider},\n      },\n      deployer::Deployer,\n    };\n\n    let handle = ops.handle(\"serai-dev-ethereum\").host_port(8545).unwrap();\n    let url = format!(\"http://{}:{}\", handle.0, handle.1);\n    tokio::time::sleep(core::time::Duration::from_secs(15)).await;\n\n    {\n      let provider = Arc::new(RootProvider::new(\n        ClientBuilder::default().transport(SimpleRequest::new(url.clone()), true),\n      ));\n      provider.raw_request::<_, ()>(\"evm_setAutomine\".into(), [false]).await.unwrap();\n      provider.raw_request::<_, ()>(\"anvil_mine\".into(), [96]).await.unwrap();\n\n      // Perform deployment\n      {\n        // Make sure the Deployer constructor returns None, as it doesn't exist yet\n        assert!(Deployer::new(provider.clone()).await.unwrap().is_none());\n\n        // Deploy the Deployer\n        let tx = Deployer::deployment_tx();\n\n        provider\n          .raw_request::<_, ()>(\n            \"anvil_setBalance\".into(),\n            [\n              tx.recover_signer().unwrap().to_string(),\n              (U256::from(tx.tx().gas_limit) * U256::from(tx.tx().gas_price)).to_string(),\n            ],\n          )\n          .await\n          .unwrap();\n\n        let (tx, sig, _) = tx.into_parts();\n        let mut bytes = vec![];\n        tx.encode_with_signature_fields(&sig, &mut bytes);\n\n        let pending_tx = provider.send_raw_transaction(&bytes).await.unwrap();\n        provider.raw_request::<_, ()>(\"anvil_mine\".into(), [96]).await.unwrap();\n        //tokio::time::sleep(core::time::Duration::from_secs(15)).await;\n        let receipt = pending_tx.get_receipt().await.unwrap();\n        assert!(receipt.status());\n\n        let _ = Deployer::new(provider.clone())\n          .await\n          .expect(\"network error\")\n          .expect(\"deployer wasn't deployed\");\n      }\n    }\n\n    move |db| {\n      let url = url.clone();\n      Box::pin(async move {\n        {\n          let db = db.clone();\n          let url = url.clone();\n          // Spawn a task to deploy the proper Router when the time comes\n          tokio::spawn(async move {\n            let key = loop {\n              let Some(key) = crate::key_gen::NetworkKeyDb::get(&db, Session(0)) else {\n                tokio::time::sleep(core::time::Duration::from_secs(1)).await;\n                continue;\n              };\n              break ethereum_serai::crypto::PublicKey::new(\n                Secp256k1::read_G(&mut key.as_slice()).unwrap(),\n              )\n              .unwrap();\n            };\n            let provider = Arc::new(RootProvider::new(\n              ClientBuilder::default().transport(SimpleRequest::new(url.clone()), true),\n            ));\n            let deployer = Deployer::new(provider.clone()).await.unwrap().unwrap();\n\n            let mut tx = deployer.deploy_router(&key);\n            tx.gas_limit = 1_000_000u64;\n            tx.gas_price = 1_000_000_000u64.into();\n            let tx = ethereum_serai::crypto::deterministically_sign(&tx);\n\n            provider\n              .raw_request::<_, ()>(\n                \"anvil_setBalance\".into(),\n                [\n                  tx.recover_signer().unwrap().to_string(),\n                  (U256::from(tx.tx().gas_limit) * U256::from(tx.tx().gas_price)).to_string(),\n                ],\n              )\n              .await\n              .unwrap();\n\n            let (tx, sig, _) = tx.into_parts();\n            let mut bytes = vec![];\n            tx.encode_with_signature_fields(&sig, &mut bytes);\n            let pending_tx = provider.send_raw_transaction(&bytes).await.unwrap();\n            provider.raw_request::<_, ()>(\"anvil_mine\".into(), [96]).await.unwrap();\n            let receipt = pending_tx.get_receipt().await.unwrap();\n            assert!(receipt.status());\n\n            let _router = deployer.find_router(provider.clone(), &key).await.unwrap().unwrap();\n          });\n        }\n\n        Ethereum::new(db, url.clone(), String::new()).await\n      })\n    }\n  }\n\n  test_network!(\n    Ethereum<MemDb>,\n    spawn_ethereum,\n    ethereum,\n    ethereum_key_gen,\n    ethereum_scanner,\n    ethereum_no_deadlock_in_multisig_completed,\n    ethereum_signer,\n    ethereum_wallet,\n  );\n}\n"
  },
  {
    "path": "processor/src/tests/mod.rs",
    "content": "use std::sync::OnceLock;\n\nmod key_gen;\n\nmod scanner;\n\nmod signer;\npub(crate) use signer::sign;\n\nmod cosigner;\nmod batch_signer;\n\nmod wallet;\n\nmod addresses;\n\n// Effective Once\nstatic INIT_LOGGER_CELL: OnceLock<()> = OnceLock::new();\nfn init_logger() {\n  *INIT_LOGGER_CELL.get_or_init(env_logger::init)\n}\n\n#[macro_export]\nmacro_rules! test_network {\n  (\n    $N: ty,\n    $docker: ident,\n    $network: ident,\n    $key_gen: ident,\n    $scanner: ident,\n    $no_deadlock_in_multisig_completed: ident,\n    $signer: ident,\n    $wallet: ident,\n  ) => {\n    use core::{pin::Pin, future::Future};\n    use $crate::tests::{\n      init_logger,\n      key_gen::test_key_gen,\n      scanner::{test_scanner, test_no_deadlock_in_multisig_completed},\n      signer::test_signer,\n      wallet::test_wallet,\n    };\n\n    // This doesn't interact with a node and accordingly doesn't need to be spawn one\n    #[tokio::test]\n    async fn $key_gen() {\n      init_logger();\n      test_key_gen::<$N>();\n    }\n\n    #[test]\n    fn $scanner() {\n      init_logger();\n      let docker = $docker();\n      docker.run(|ops| async move {\n        let new_network = $network(&ops).await;\n        test_scanner(new_network).await;\n      });\n    }\n\n    #[test]\n    fn $no_deadlock_in_multisig_completed() {\n      init_logger();\n      let docker = $docker();\n      docker.run(|ops| async move {\n        let new_network = $network(&ops).await;\n        test_no_deadlock_in_multisig_completed(new_network).await;\n      });\n    }\n\n    #[test]\n    fn $signer() {\n      init_logger();\n      let docker = $docker();\n      docker.run(|ops| async move {\n        let new_network = $network(&ops).await;\n        test_signer(new_network).await;\n      });\n    }\n\n    #[test]\n    fn $wallet() {\n      init_logger();\n      let docker = $docker();\n      docker.run(|ops| async move {\n        let new_network = $network(&ops).await;\n        test_wallet(new_network).await;\n      });\n    }\n  };\n}\n\n#[macro_export]\nmacro_rules! test_utxo_network {\n  (\n    $N: ty,\n    $docker: ident,\n    $network: ident,\n    $key_gen: ident,\n    $scanner: ident,\n    $no_deadlock_in_multisig_completed: ident,\n    $signer: ident,\n    $wallet: ident,\n    $addresses: ident,\n  ) => {\n    use $crate::tests::addresses::test_addresses;\n\n    test_network!(\n      $N,\n      $docker,\n      $network,\n      $key_gen,\n      $scanner,\n      $no_deadlock_in_multisig_completed,\n      $signer,\n      $wallet,\n    );\n\n    #[test]\n    fn $addresses() {\n      init_logger();\n      let docker = $docker();\n      docker.run(|ops| async move {\n        let new_network = $network(&ops).await;\n        test_addresses(new_network).await;\n      });\n    }\n  };\n}\n\nmod literal;\n"
  },
  {
    "path": "processor/src/tests/scanner.rs",
    "content": "use core::{pin::Pin, time::Duration, future::Future};\nuse std::sync::Arc;\n\nuse rand_core::OsRng;\n\nuse ciphersuite::{group::GroupEncoding, Ciphersuite};\nuse frost::{Participant, tests::key_gen};\n\nuse tokio::{sync::Mutex, time::timeout};\n\nuse serai_db::{DbTxn, Db, MemDb};\nuse serai_client::validator_sets::primitives::Session;\n\nuse crate::{\n  networks::{OutputType, Output, Block, Network},\n  key_gen::NetworkKeyDb,\n  multisigs::scanner::{ScannerEvent, Scanner, ScannerHandle},\n};\n\npub async fn new_scanner<N: Network, D: Db>(\n  network: &N,\n  db: &D,\n  group_key: <N::Curve as Ciphersuite>::G,\n  first: &Arc<Mutex<bool>>,\n) -> ScannerHandle<N, D> {\n  let activation_number = network.get_latest_block_number().await.unwrap();\n  let mut db = db.clone();\n  let (mut scanner, current_keys) = Scanner::new(network.clone(), db.clone());\n  let mut first = first.lock().await;\n  if *first {\n    assert!(current_keys.is_empty());\n    let mut txn = db.txn();\n    scanner.register_key(&mut txn, activation_number, group_key).await;\n    txn.commit();\n    for _ in 0 .. N::CONFIRMATIONS {\n      network.mine_block().await;\n    }\n    *first = false;\n  } else {\n    assert_eq!(current_keys.len(), 1);\n  }\n  scanner\n}\n\npub async fn test_scanner<N: Network>(\n  new_network: impl Fn(MemDb) -> Pin<Box<dyn Send + Future<Output = N>>>,\n) {\n  let mut keys =\n    frost::tests::key_gen::<_, N::Curve>(&mut OsRng).remove(&Participant::new(1).unwrap()).unwrap();\n  N::tweak_keys(&mut keys);\n  let group_key = keys.group_key();\n\n  let mut db = MemDb::new();\n  {\n    let mut txn = db.txn();\n    NetworkKeyDb::set(&mut txn, Session(0), &group_key.to_bytes().as_ref().to_vec());\n    txn.commit();\n  }\n  let network = new_network(db.clone()).await;\n\n  // Mine blocks so there's a confirmed block\n  for _ in 0 .. N::CONFIRMATIONS {\n    network.mine_block().await;\n  }\n\n  let first = Arc::new(Mutex::new(true));\n  let scanner = new_scanner(&network, &db, group_key, &first).await;\n\n  // Receive funds\n  let block = network.test_send(N::external_address(&network, keys.group_key()).await).await;\n  let block_id = block.id();\n\n  // Verify the Scanner picked them up\n  let verify_event = |mut scanner: ScannerHandle<N, MemDb>| async {\n    let outputs =\n      match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() {\n        ScannerEvent::Block { is_retirement_block, block, outputs } => {\n          scanner.multisig_completed.send(false).unwrap();\n          assert!(!is_retirement_block);\n          assert_eq!(block, block_id);\n          assert_eq!(outputs.len(), 1);\n          assert_eq!(outputs[0].kind(), OutputType::External);\n          outputs\n        }\n        ScannerEvent::Completed(_, _, _, _, _) => {\n          panic!(\"unexpectedly got eventuality completion\");\n        }\n      };\n    (scanner, outputs)\n  };\n  let (mut scanner, outputs) = verify_event(scanner).await;\n\n  // Create a new scanner off the current DB and verify it re-emits the above events\n  verify_event(new_scanner(&network, &db, group_key, &first).await).await;\n\n  // Acknowledge the block\n  let mut cloned_db = db.clone();\n  let mut txn = cloned_db.txn();\n  assert_eq!(scanner.ack_block(&mut txn, block_id).await.1, outputs);\n  scanner.release_lock().await;\n  txn.commit();\n\n  // There should be no more events\n  assert!(timeout(Duration::from_secs(30), scanner.events.recv()).await.is_err());\n\n  // Create a new scanner off the current DB and make sure it also does nothing\n  assert!(timeout(\n    Duration::from_secs(30),\n    new_scanner(&network, &db, group_key, &first).await.events.recv()\n  )\n  .await\n  .is_err());\n}\n\npub async fn test_no_deadlock_in_multisig_completed<N: Network>(\n  new_network: impl Fn(MemDb) -> Pin<Box<dyn Send + Future<Output = N>>>,\n) {\n  // This test scans two blocks then acknowledges one, yet a network with one confirm won't scan\n  // two blocks before the first is acknowledged (due to the look-ahead limit)\n  if N::CONFIRMATIONS <= 1 {\n    return;\n  }\n\n  let mut db = MemDb::new();\n  let network = new_network(db.clone()).await;\n\n  // Mine blocks so there's a confirmed block\n  for _ in 0 .. N::CONFIRMATIONS {\n    network.mine_block().await;\n  }\n\n  let (mut scanner, current_keys) = Scanner::new(network.clone(), db.clone());\n  assert!(current_keys.is_empty());\n\n  // Register keys to cause Block events at CONFIRMATIONS (dropped since first keys),\n  // CONFIRMATIONS + 1, and CONFIRMATIONS + 2\n  for i in 0 .. 3 {\n    let key = {\n      let mut keys = key_gen(&mut OsRng);\n      for keys in keys.values_mut() {\n        N::tweak_keys(keys);\n      }\n      let key = keys[&Participant::new(1).unwrap()].group_key();\n      if i == 0 {\n        let mut txn = db.txn();\n        NetworkKeyDb::set(&mut txn, Session(0), &key.to_bytes().as_ref().to_vec());\n        txn.commit();\n\n        // Sleep for 5 seconds as setting the Network key value will trigger an async task for\n        // Ethereum\n        tokio::time::sleep(Duration::from_secs(5)).await;\n      }\n      key\n    };\n\n    let mut txn = db.txn();\n    scanner\n      .register_key(\n        &mut txn,\n        network.get_latest_block_number().await.unwrap() + N::CONFIRMATIONS + i,\n        key,\n      )\n      .await;\n    txn.commit();\n  }\n\n  for _ in 0 .. (3 * N::CONFIRMATIONS) {\n    network.mine_block().await;\n  }\n\n  // Block for the second set of keys registered\n  let block_id =\n    match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() {\n      ScannerEvent::Block { is_retirement_block, block, outputs: _ } => {\n        scanner.multisig_completed.send(false).unwrap();\n        assert!(!is_retirement_block);\n        block\n      }\n      ScannerEvent::Completed(_, _, _, _, _) => {\n        panic!(\"unexpectedly got eventuality completion\");\n      }\n    };\n\n  // Block for the third set of keys registered\n  match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() {\n    ScannerEvent::Block { .. } => {}\n    ScannerEvent::Completed(_, _, _, _, _) => {\n      panic!(\"unexpectedly got eventuality completion\");\n    }\n  };\n\n  // The ack_block acquisition shows the Scanner isn't maintaining the lock on its own thread after\n  // emitting the Block event\n  // TODO: This is incomplete. Also test after emitting Completed\n  let mut txn = db.txn();\n  assert_eq!(scanner.ack_block(&mut txn, block_id).await.1, vec![]);\n  scanner.release_lock().await;\n  txn.commit();\n\n  scanner.multisig_completed.send(false).unwrap();\n}\n"
  },
  {
    "path": "processor/src/tests/signer.rs",
    "content": "use core::{pin::Pin, future::Future};\nuse std::collections::HashMap;\n\nuse rand_core::{RngCore, OsRng};\n\nuse ciphersuite::group::GroupEncoding;\nuse frost::{\n  Participant, ThresholdKeys,\n  tests::{key_gen, clone_without},\n};\n\nuse serai_db::{DbTxn, Db, MemDb};\n\nuse serai_client::{\n  primitives::{ExternalNetworkId, ExternalCoin, Amount, ExternalBalance},\n  validator_sets::primitives::Session,\n};\n\nuse messages::sign::*;\nuse crate::{\n  Payment,\n  networks::{Output, Transaction, Eventuality, Network},\n  key_gen::NetworkKeyDb,\n  multisigs::scheduler::Scheduler,\n  signer::Signer,\n};\n\n#[allow(clippy::type_complexity)]\npub async fn sign<N: Network>(\n  network: N,\n  session: Session,\n  mut keys_txs: HashMap<\n    Participant,\n    (ThresholdKeys<N::Curve>, (N::SignableTransaction, N::Eventuality)),\n  >,\n) -> <N::Eventuality as Eventuality>::Claim {\n  let actual_id = SignId { session, id: [0xaa; 32], attempt: 0 };\n\n  let mut keys = HashMap::new();\n  let mut txs = HashMap::new();\n  for (i, (these_keys, this_tx)) in keys_txs.drain() {\n    keys.insert(i, these_keys);\n    txs.insert(i, this_tx);\n  }\n\n  let mut signers = HashMap::new();\n  let mut dbs = HashMap::new();\n  let mut t = 0;\n  for i in 1 ..= keys.len() {\n    let i = Participant::new(u16::try_from(i).unwrap()).unwrap();\n    let keys = keys.remove(&i).unwrap();\n    t = keys.params().t();\n    signers.insert(i, Signer::<_, MemDb>::new(network.clone(), Session(0), vec![keys]));\n    dbs.insert(i, MemDb::new());\n  }\n  drop(keys);\n\n  let mut signing_set = vec![];\n  while signing_set.len() < usize::from(t) {\n    let candidate = Participant::new(\n      u16::try_from((OsRng.next_u64() % u64::try_from(signers.len()).unwrap()) + 1).unwrap(),\n    )\n    .unwrap();\n    if signing_set.contains(&candidate) {\n      continue;\n    }\n    signing_set.push(candidate);\n  }\n\n  let mut preprocesses = HashMap::new();\n\n  let mut eventuality = None;\n  for i in 1 ..= signers.len() {\n    let i = Participant::new(u16::try_from(i).unwrap()).unwrap();\n    let (tx, this_eventuality) = txs.remove(&i).unwrap();\n    let mut txn = dbs.get_mut(&i).unwrap().txn();\n    match signers\n      .get_mut(&i)\n      .unwrap()\n      .sign_transaction(&mut txn, actual_id.id, tx, &this_eventuality)\n      .await\n    {\n      // All participants should emit a preprocess\n      Some(ProcessorMessage::Preprocess { id, preprocesses: mut these_preprocesses }) => {\n        assert_eq!(id, actual_id);\n        assert_eq!(these_preprocesses.len(), 1);\n        if signing_set.contains(&i) {\n          preprocesses.insert(i, these_preprocesses.swap_remove(0));\n        }\n      }\n      _ => panic!(\"didn't get preprocess back\"),\n    }\n    txn.commit();\n\n    if eventuality.is_none() {\n      eventuality = Some(this_eventuality.clone());\n    }\n    assert_eq!(eventuality, Some(this_eventuality));\n  }\n\n  let mut shares = HashMap::new();\n  for i in &signing_set {\n    let mut txn = dbs.get_mut(i).unwrap().txn();\n    match signers\n      .get_mut(i)\n      .unwrap()\n      .handle(\n        &mut txn,\n        CoordinatorMessage::Preprocesses {\n          id: actual_id.clone(),\n          preprocesses: clone_without(&preprocesses, i),\n        },\n      )\n      .await\n      .unwrap()\n    {\n      ProcessorMessage::Share { id, shares: mut these_shares } => {\n        assert_eq!(id, actual_id);\n        assert_eq!(these_shares.len(), 1);\n        shares.insert(*i, these_shares.swap_remove(0));\n      }\n      _ => panic!(\"didn't get share back\"),\n    }\n    txn.commit();\n  }\n\n  let mut tx_id = None;\n  for i in &signing_set {\n    let mut txn = dbs.get_mut(i).unwrap().txn();\n    match signers\n      .get_mut(i)\n      .unwrap()\n      .handle(\n        &mut txn,\n        CoordinatorMessage::Shares { id: actual_id.clone(), shares: clone_without(&shares, i) },\n      )\n      .await\n      .unwrap()\n    {\n      ProcessorMessage::Completed { session, id, tx } => {\n        assert_eq!(session, Session(0));\n        assert_eq!(id, actual_id.id);\n        if tx_id.is_none() {\n          tx_id = Some(tx.clone());\n        }\n        assert_eq!(tx_id, Some(tx));\n      }\n      _ => panic!(\"didn't get TX back\"),\n    }\n    txn.commit();\n  }\n\n  let mut typed_claim = <N::Eventuality as Eventuality>::Claim::default();\n  typed_claim.as_mut().copy_from_slice(tx_id.unwrap().as_ref());\n  assert!(network.check_eventuality_by_claim(&eventuality.unwrap(), &typed_claim).await);\n  typed_claim\n}\n\npub async fn test_signer<N: Network>(\n  new_network: impl Fn(MemDb) -> Pin<Box<dyn Send + Future<Output = N>>>,\n) {\n  let mut keys = key_gen(&mut OsRng);\n  for keys in keys.values_mut() {\n    N::tweak_keys(keys);\n  }\n  let key = keys[&Participant::new(1).unwrap()].group_key();\n\n  let mut db = MemDb::new();\n  {\n    let mut txn = db.txn();\n    NetworkKeyDb::set(&mut txn, Session(0), &key.to_bytes().as_ref().to_vec());\n    txn.commit();\n  }\n  let network = new_network(db.clone()).await;\n\n  let outputs = network\n    .get_outputs(&network.test_send(N::external_address(&network, key).await).await, key)\n    .await;\n  let sync_block = network.get_latest_block_number().await.unwrap() - N::CONFIRMATIONS;\n\n  let amount = (2 * N::DUST) + 1000;\n  let plan = {\n    let mut txn = db.txn();\n    let mut scheduler = N::Scheduler::new::<MemDb>(&mut txn, key, N::NETWORK);\n    let payments = vec![Payment {\n      address: N::external_address(&network, key).await,\n      data: None,\n      balance: ExternalBalance {\n        coin: match N::NETWORK {\n          ExternalNetworkId::Bitcoin => ExternalCoin::Bitcoin,\n          ExternalNetworkId::Ethereum => ExternalCoin::Ether,\n          ExternalNetworkId::Monero => ExternalCoin::Monero,\n        },\n        amount: Amount(amount),\n      },\n    }];\n    let mut plans = scheduler.schedule::<MemDb>(&mut txn, outputs.clone(), payments, key, false);\n    assert_eq!(plans.len(), 1);\n    plans.swap_remove(0)\n  };\n\n  let mut keys_txs = HashMap::new();\n  let mut eventualities = vec![];\n  for (i, keys) in keys.drain() {\n    let (signable, eventuality) =\n      network.prepare_send(sync_block, plan.clone(), 0).await.unwrap().tx.unwrap();\n\n    eventualities.push(eventuality.clone());\n    keys_txs.insert(i, (keys, (signable, eventuality)));\n  }\n\n  let claim = sign(network.clone(), Session(0), keys_txs).await;\n\n  // Mine a block, and scan it, to ensure that the TX actually made it on chain\n  network.mine_block().await;\n  let block_number = network.get_latest_block_number().await.unwrap();\n  let tx = network.get_transaction_by_eventuality(block_number, &eventualities[0]).await;\n  let outputs = network\n    .get_outputs(\n      &network.get_block(network.get_latest_block_number().await.unwrap()).await.unwrap(),\n      key,\n    )\n    .await;\n  // Don't run if Ethereum as the received output will revert by the contract\n  // (and therefore not actually exist)\n  if N::NETWORK != ExternalNetworkId::Ethereum {\n    assert_eq!(outputs.len(), 1 + usize::from(u8::from(plan.change.is_some())));\n    // Adjust the amount for the fees\n    let amount = amount - tx.fee(&network).await;\n    if plan.change.is_some() {\n      // Check either output since Monero will randomize its output order\n      assert!(\n        (outputs[0].balance().amount.0 == amount) || (outputs[1].balance().amount.0 == amount)\n      );\n    } else {\n      assert!(outputs[0].balance().amount.0 == amount);\n    }\n  }\n\n  // Check the eventualities pass\n  for eventuality in eventualities {\n    let completion = network.confirm_completion(&eventuality, &claim).await.unwrap().unwrap();\n    assert_eq!(N::Eventuality::claim(&completion), claim);\n  }\n}\n"
  },
  {
    "path": "processor/src/tests/wallet.rs",
    "content": "use core::{time::Duration, pin::Pin, future::Future};\nuse std::collections::HashMap;\n\nuse rand_core::OsRng;\n\nuse ciphersuite::group::GroupEncoding;\nuse frost::{Participant, tests::key_gen};\n\nuse tokio::time::timeout;\n\nuse serai_db::{DbTxn, Db, MemDb};\n\nuse serai_client::{\n  primitives::{ExternalNetworkId, ExternalCoin, Amount, ExternalBalance},\n  validator_sets::primitives::Session,\n};\n\nuse crate::{\n  Payment, Plan,\n  networks::{Output, Transaction, Eventuality, Block, Network},\n  key_gen::NetworkKeyDb,\n  multisigs::{\n    scanner::{ScannerEvent, Scanner},\n    scheduler::{self, Scheduler},\n  },\n  tests::sign,\n};\n\n// Tests the Scanner, Scheduler, and Signer together\npub async fn test_wallet<N: Network>(\n  new_network: impl Fn(MemDb) -> Pin<Box<dyn Send + Future<Output = N>>>,\n) {\n  let mut keys = key_gen(&mut OsRng);\n  for keys in keys.values_mut() {\n    N::tweak_keys(keys);\n  }\n  let key = keys[&Participant::new(1).unwrap()].group_key();\n\n  let mut db = MemDb::new();\n  {\n    let mut txn = db.txn();\n    NetworkKeyDb::set(&mut txn, Session(0), &key.to_bytes().as_ref().to_vec());\n    txn.commit();\n  }\n  let network = new_network(db.clone()).await;\n\n  // Mine blocks so there's a confirmed block\n  for _ in 0 .. N::CONFIRMATIONS {\n    network.mine_block().await;\n  }\n\n  let (mut scanner, current_keys) = Scanner::new(network.clone(), db.clone());\n  assert!(current_keys.is_empty());\n  let (block_id, outputs) = {\n    let mut txn = db.txn();\n    scanner.register_key(&mut txn, network.get_latest_block_number().await.unwrap(), key).await;\n    txn.commit();\n    for _ in 0 .. N::CONFIRMATIONS {\n      network.mine_block().await;\n    }\n\n    let block = network.test_send(N::external_address(&network, key).await).await;\n    let block_id = block.id();\n\n    match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() {\n      ScannerEvent::Block { is_retirement_block, block, outputs } => {\n        scanner.multisig_completed.send(false).unwrap();\n        assert!(!is_retirement_block);\n        assert_eq!(block, block_id);\n        assert_eq!(outputs.len(), 1);\n        (block_id, outputs)\n      }\n      ScannerEvent::Completed(_, _, _, _, _) => {\n        panic!(\"unexpectedly got eventuality completion\");\n      }\n    }\n  };\n  let mut txn = db.txn();\n  assert_eq!(scanner.ack_block(&mut txn, block_id.clone()).await.1, outputs);\n  scanner.release_lock().await;\n  txn.commit();\n\n  let mut txn = db.txn();\n  let mut scheduler = N::Scheduler::new::<MemDb>(&mut txn, key, N::NETWORK);\n  let amount = 2 * N::DUST;\n  let plans = scheduler.schedule::<MemDb>(\n    &mut txn,\n    outputs.clone(),\n    vec![Payment {\n      address: N::external_address(&network, key).await,\n      data: None,\n      balance: ExternalBalance {\n        coin: match N::NETWORK {\n          ExternalNetworkId::Bitcoin => ExternalCoin::Bitcoin,\n          ExternalNetworkId::Ethereum => ExternalCoin::Ether,\n          ExternalNetworkId::Monero => ExternalCoin::Monero,\n        },\n        amount: Amount(amount),\n      },\n    }],\n    key,\n    false,\n  );\n  txn.commit();\n  assert_eq!(plans.len(), 1);\n  assert_eq!(plans[0].key, key);\n  if std::any::TypeId::of::<N::Scheduler>() ==\n    std::any::TypeId::of::<scheduler::smart_contract::Scheduler<N>>()\n  {\n    assert_eq!(plans[0].inputs, vec![]);\n  } else {\n    assert_eq!(plans[0].inputs, outputs);\n  }\n  assert_eq!(\n    plans[0].payments,\n    vec![Payment {\n      address: N::external_address(&network, key).await,\n      data: None,\n      balance: ExternalBalance {\n        coin: match N::NETWORK {\n          ExternalNetworkId::Bitcoin => ExternalCoin::Bitcoin,\n          ExternalNetworkId::Ethereum => ExternalCoin::Ether,\n          ExternalNetworkId::Monero => ExternalCoin::Monero,\n        },\n        amount: Amount(amount),\n      }\n    }]\n  );\n  assert_eq!(plans[0].change, N::change_address(key));\n\n  {\n    let mut buf = vec![];\n    plans[0].write(&mut buf).unwrap();\n    assert_eq!(plans[0], Plan::<N>::read::<&[u8]>(&mut buf.as_ref()).unwrap());\n  }\n\n  // Execute the plan\n  let mut keys_txs = HashMap::new();\n  let mut eventualities = vec![];\n  for (i, keys) in keys.drain() {\n    let (signable, eventuality) = network\n      .prepare_send(network.get_block_number(&block_id).await, plans[0].clone(), 0)\n      .await\n      .unwrap()\n      .tx\n      .unwrap();\n\n    eventualities.push(eventuality.clone());\n    keys_txs.insert(i, (keys, (signable, eventuality)));\n  }\n\n  let claim = sign(network.clone(), Session(0), keys_txs).await;\n  network.mine_block().await;\n  let block_number = network.get_latest_block_number().await.unwrap();\n  let tx = network.get_transaction_by_eventuality(block_number, &eventualities[0]).await;\n  let block = network.get_block(block_number).await.unwrap();\n  let outputs = network.get_outputs(&block, key).await;\n\n  // Don't run if Ethereum as the received output will revert by the contract\n  // (and therefore not actually exist)\n  if N::NETWORK != ExternalNetworkId::Ethereum {\n    assert_eq!(outputs.len(), 1 + usize::from(u8::from(plans[0].change.is_some())));\n    // Adjust the amount for the fees\n    let amount = amount - tx.fee(&network).await;\n    if plans[0].change.is_some() {\n      // Check either output since Monero will randomize its output order\n      assert!(\n        (outputs[0].balance().amount.0 == amount) || (outputs[1].balance().amount.0 == amount)\n      );\n    } else {\n      assert!(outputs[0].balance().amount.0 == amount);\n    }\n  }\n\n  for eventuality in eventualities {\n    let completion = network.confirm_completion(&eventuality, &claim).await.unwrap().unwrap();\n    assert_eq!(N::Eventuality::claim(&completion), claim);\n  }\n\n  for _ in 1 .. N::CONFIRMATIONS {\n    network.mine_block().await;\n  }\n\n  if N::NETWORK != ExternalNetworkId::Ethereum {\n    match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() {\n      ScannerEvent::Block { is_retirement_block, block: block_id, outputs: these_outputs } => {\n        scanner.multisig_completed.send(false).unwrap();\n        assert!(!is_retirement_block);\n        assert_eq!(block_id, block.id());\n        assert_eq!(these_outputs, outputs);\n      }\n      ScannerEvent::Completed(_, _, _, _, _) => {\n        panic!(\"unexpectedly got eventuality completion\");\n      }\n    }\n\n    // Check the Scanner DB can reload the outputs\n    let mut txn = db.txn();\n    assert_eq!(scanner.ack_block(&mut txn, block.id()).await.1, outputs);\n    scanner.release_lock().await;\n    txn.commit();\n  }\n}\n"
  },
  {
    "path": "rust-toolchain.toml",
    "content": "[toolchain]\nchannel = \"1.89\"\ntargets = [\"wasm32v1-none\"]\nprofile = \"minimal\"\ncomponents = [\"rust-src\", \"rustfmt\", \"clippy\"]\n"
  },
  {
    "path": "spec/DKG Exclusions.md",
    "content": "Upon an issue with the DKG, the honest validators must remove the malicious\nvalidators. Ideally, a threshold signature would be used, yet that would require\na threshold key (which would require authentication by a MuSig signature). A\nMuSig signature which specifies the signing set (or rather, the excluded\nsigners) achieves the most efficiency.\n\nWhile that resolves the on-chain behavior, the Tributary also has to perform\nexclusion. This has the following forms:\n\n1) Rejecting further transactions (required)\n2) Rejecting further participation in Tendermint\n\nWith regards to rejecting further participation in Tendermint, it's *ideal* to\nremove the validator from the list of validators. Each validator removed from\nparticipation, yet not from the list of validators, increases the likelihood of\nthe network failing to form consensus.\n\nWith regards to the economic security, an honest 67% may remove a faulty\n(explicitly or simply offline) 33%, letting 67% of the remaining 67% (4/9ths)\ntake control of the associated private keys. In such a case, the malicious\nparties are defined as the 4/9ths of validators with access to the private key\nand the 33% removed (who together form >67% of the originally intended\nvalidator set and have presumably provided enough stake to cover losses).\n"
  },
  {
    "path": "spec/Getting Started.md",
    "content": "# Getting Started\n\n### Dependencies\n\n##### Ubuntu\n\n```\nsudo apt-get install -y build-essential clang-11 pkg-config cmake git curl protobuf-compiler\n```\n\n### Install rustup\n\n##### Linux\n\n```\ncurl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh\n```\n\n##### macOS\n\n```\nbrew install rustup\n```\n\n### Install Rust\n\n```\nrustup update\nrustup toolchain install stable\nrustup target add wasm32v1-none\nrustup toolchain install nightly\nrustup target add wasm32v1-none --toolchain nightly\n```\n\n### Install Solidity with `svm`\n\n```\ncargo install svm-rs\nsvm install 0.8.26\nsvm use 0.8.26\n```\n\n### Install foundry (for tests)\n\n```\ncurl -L https://foundry.paradigm.xyz | bash\nfoundryup\n```\n\n### Clone and Build Serai\n\n```\ngit clone https://github.com/serai-dex/serai\ncd serai\ncargo build --release --all-features\n```\n\n### Run Tests\n\nRunning tests requires:\n\n- [A rootless Docker setup](https://docs.docker.com/engine/security/rootless/)\n- A properly configured Bitcoin regtest node (available via Docker)\n- A properly configured Monero regtest node (available via Docker)\n- A properly configured monero-wallet-rpc instance (available via Docker)\n\nTo start the required daemons, one may run:\n\n```\ncargo run -p serai-orchestrator -- key_gen dev\ncargo run -p serai-orchestrator -- setup dev\n```\n\nand then:\n\n```\ncargo run -p serai-orchestrator -- start dev bitcoin-daemon monero-daemon monero-wallet-rpc\n```\n\nFinally, to run the tests:\n\n```\ncargo test --all-features\n```\n"
  },
  {
    "path": "spec/Serai.md",
    "content": "# Serai\n\nSerai is a decentralized execution layer whose validators form multisig wallets\nfor various connected networks, offering secure decentralized control of foreign\ncoins to applications built on it.\n\nSerai is exemplified by Serai DEX, an automated-market-maker (AMM) decentralized\nexchange, allowing swapping Bitcoin, Ether, DAI, and Monero. It is the premier\napplication of Serai.\n\n### Substrate\n\nSerai is based on [Substrate](https://docs.substrate.io), a blockchain framework\noffering a robust infrastructure.\n"
  },
  {
    "path": "spec/coordinator/Coordinator.md",
    "content": "# Coordinator\n\nThe coordinator is a service which communicates with all of the processors,\nall of the other coordinators over a secondary P2P network, and with the Serai\nnode.\n\nThis document primarily details its flow with regards to the Serai node and\nprocessor.\n\n### New Set Event\n\nOn `validator_sets::pallet::Event::NewSet`, the coordinator spawns a tributary\nfor the new set. It additionally sends the processor\n`key_gen::CoordinatorMessage::GenerateKey`.\n\n### Key Generation Event\n\nOn `validator_sets::pallet::Event::KeyGen`, the coordinator sends\n`substrate::CoordinatorMessage::ConfirmKeyPair` to the processor.\n\n### Batch\n\nOn `substrate::ProcessorMessage::Batch`, the coordinator notes what the on-chain\n`Batch` should be, for verification once published.\n\n### SignedBatch\n\nOn `substrate::ProcessorMessage::SignedBatch`, the coordinator publishes an\nunsigned transaction containing the signed batch to the Serai blockchain.\n\n### Sign Completed\n\nOn `sign::ProcessorMessage::Completed`, the coordinator makes a tributary\ntransaction containing the transaction hash the signing process was supposedly\ncompleted with.\n\nDue to rushing adversaries, the actual transaction completing the plan may be\ndistinct on-chain. These messages solely exist to coordinate the signing\nprocess, not to determine chain state.\n"
  },
  {
    "path": "spec/coordinator/Tributary.md",
    "content": "# Tributary\n\nA tributary is a side-chain, created for a specific multisig instance, used\nas a verifiable broadcast layer.\n\n## Transactions\n\n### Key Gen Commitments\n\n`DkgCommitments` is created when a processor sends the coordinator\n`key_gen::ProcessorMessage::Commitments`. When all validators participating in\na multisig publish `DkgCommitments`, the coordinator sends the processor\n`key_gen::CoordinatorMessage::Commitments`, excluding the processor's own\ncommitments.\n\n### Key Gen Shares\n\n`DkgShares` is created when a processor sends the coordinator\n`key_gen::ProcessorMessage::Shares`. The coordinator additionally includes its\nown pair of MuSig nonces, used in a signing protocol to inform Substrate of the\nkey's successful creation.\n\nWhen all validators participating in a multisig publish `DkgShares`, the\ncoordinator sends the processor `key_gen::CoordinatorMessage::Shares`, excluding\nthe processor's own shares and the MuSig nonces.\n\n### Key Gen Confirmation\n\n`DkgConfirmed` is created when a processor sends the coordinator\n`key_gen::ProcessorMessage::GeneratedKeyPair`. The coordinator takes the MuSig\nnonces they prior associated with this DKG attempt and publishes their signature\nshare.\n\nWhen all validators participating in the multisig publish `DkgConfirmed`, an\nextrinsic calling `validator_sets::pallet::set_keys` is made to confirm the\nkeys.\n\nSetting the keys on the Serai blockchain as such lets it receive `Batch`s,\nprovides a BFT consensus guarantee, and enables accessibility by users. While\nthe tributary itself could offer both the BFT consensus guarantee, and\nverifiable accessibility to users, they'd both require users access the\ntributary. Since Substrate must already know the resulting key, there's no value\nto usage of the tributary as-such, as all desired properties are already offered\nby Substrate.\n\nNote that the keys are confirmed when Substrate emits a `KeyGen` event,\nregardless of if the Tributary has the expected `DkgConfirmed` transactions.\n\n### Batch\n\nWhen *TODO*, a `Batch` transaction is provided. This is used to have the group\nacknowledge and synchronize around a batch, without the overhead of voting in\nits acknowledgment.\n\nWhen a `Batch` transaction is included, participants are allowed to publish\ntransactions to produce a threshold signature for the batch synchronized over.\n\n### Substrate Block\n\n`SubstrateBlock` is provided when the processor sends the coordinator\n`substrate::ProcessorMessage::SubstrateBlockAck`.\n\nWhen a `SubstrateBlock` transaction is included, participants are allowed to\npublish transactions for the signing protocols it causes.\n\n### Batch Preprocess\n\n`BatchPreprocess` is created when a processor sends the coordinator\n`coordinator::ProcessorMessage::BatchPreprocess` and an `Batch` transaction\nallowing the batch to be signed has already been included on chain.\n\nWhen `t` validators have published `BatchPreprocess` transactions, if the\ncoordinator represents one of the first `t` validators to do so, a\n`coordinator::ProcessorMessage::BatchPreprocesses` is sent to the processor,\nexcluding the processor's own preprocess.\n\n### Batch Share\n\n`BatchShare` is created when a processor sends the coordinator\n`coordinator::ProcessorMessage::BatchShare`. The relevant `Batch`\ntransaction having already been included on chain follows from\n`coordinator::ProcessorMessage::BatchShare` being a response to a message which\nalso has that precondition.\n\nWhen the `t` validators who first published `BatchPreprocess` transactions have\npublished `BatchShare` transactions, if the coordinator represents one of the\nfirst `t` validators to do so, a `coordinator::ProcessorMessage::BatchShares`\nwith the relevant shares (excluding the processor's own) is sent to the\nprocessor.\n\n### Sign Preprocess\n\n`SignPreprocess` is created when a processor sends the coordinator\n`sign::ProcessorMessage::Preprocess` and a `SubstrateBlock` transaction\nallowing the transaction to be signed has already been included on chain.\n\nWhen `t` validators have published `SignPreprocess` transactions, if the\ncoordinator represents one of the first `t` validators to do so, a\n`sign::ProcessorMessage::Preprocesses` is sent to the processor,\nexcluding the processor's own preprocess.\n\n### Sign Share\n\n`SignShare` is created when a processor sends the coordinator\n`sign::ProcessorMessage::Share`. The relevant `SubstrateBlock` transaction\nhaving already been included on chain follows from\n`sign::ProcessorMessage::Share` being a response to a message which\nalso has that precondition.\n\nWhen the `t` validators who first published `SignPreprocess` transactions have\npublished `SignShare` transactions, if the coordinator represents one of the\nfirst `t` validators to do so, a `sign::ProcessorMessage::Shares` with the\nrelevant shares (excluding the processor's own) is sent to the processor.\n\n### Sign Completed\n\n`SignCompleted` is created when a processor sends the coordinator\n`sign::ProcessorMessage::Completed`. As soon as 34% of validators send\n`Completed`, the signing protocol is no longer further attempted.\n\n## Re-attempts\n\nKey generation protocols may fail if a validator is malicious. Signing\nprotocols, whether batch or transaction, may fail if a validator goes offline or\ntakes too long to respond. Accordingly, the tributary will schedule re-attempts.\nThese are communicated with `key_gen::CoordinatorMessage::GenerateKey`,\n`coordinator::CoordinatorMessage::BatchReattempt`, and\n`sign::CoordinatorMessage::Reattempt`.\n\nTODO: Document the re-attempt scheduling logic.\n"
  },
  {
    "path": "spec/cryptography/Distributed Key Generation.md",
    "content": "# Distributed Key Generation\n\nSerai uses a modification of Pedersen's Distributed Key Generation, which is\nactually Feldman's Verifiable Secret Sharing Scheme run by every participant, as\ndescribed in the FROST paper. The modification included in FROST was to include\na Schnorr Proof of Knowledge for coefficient zero, preventing rogue key attacks.\nThis results in a two-round protocol.\n\n### Encryption\n\nIn order to protect the secret shares during communication, the `dkg` library\nestablishes a public key for encryption at the start of a given protocol.\nEvery encrypted message (such as the secret shares) then includes a per-message\nencryption key. These two keys are used in an Elliptic-curve Diffie-Hellman\nhandshake to derive a shared key. This shared key is then hashed to obtain a key\nand IV for use in a ChaCha20 stream cipher instance, which is xor'd against a\nmessage to encrypt it.\n\n### Blame\n\nSince each message has a distinct key attached, and accordingly a distinct\nshared key, it's possible to reveal the shared key for a specific message\nwithout revealing any other message's decryption keys. This is utilized when a\nparticipant misbehaves. A participant who receives an invalid encrypted message\npublishes its key, able to without concern for side effects, With the key\npublished, all participants can decrypt the message in order to decide blame.\n\nWhile key reuse by a participant is considered as them revealing the messages\nthemselves, and therefore out of scope, there is an attack where a malicious\nadversary claims another participant's encryption key. They'll fail to encrypt\ntheir message, and the recipient will issue a blame statement. This blame\nstatement, intended to reveal the malicious adversary, also reveals the message\nby the participant whose keys were co-opted. To resolve this, a\nproof-of-possession is also included with encrypted messages, ensuring only\nthose actually with per-message keys can claim to use them.\n"
  },
  {
    "path": "spec/cryptography/FROST.md",
    "content": "# FROST\n\nSerai implements [FROST](https://eprint.iacr.org/2020/852), as specified in\n[draft-irtf-cfrg-frost-11](https://datatracker.ietf.org/doc/draft-irtf-cfrg-frost/).\n\n### Modularity\n\nIn order to support other algorithms which decompose to Schnorr, our FROST\nimplementation is generic, able to run any algorithm satisfying its `Algorithm`\ntrait. With these algorithms, there's frequently a requirement for further\ntranscripting than what FROST expects. Accordingly, the transcript format is\nalso modular so formats which aren't naive like the IETF's can be used.\n\n### Extensions\n\nIn order to support algorithms which require their nonces be represented across\nmultiple generators, FROST supports providing a nonce's commitments across\nmultiple generators. In order to ensure their correctness, an extended\n[CP93's Discrete Log Equality Proof](https://chaum.com/wp-content/uploads/2021/12/Wallet_Databases.pdf)\nis used. The extension is simply to transcript `n` generators, instead of just\ntwo, enabling proving for all of them at once.\n\nSince FROST nonces are binomial, every nonce would require two DLEq proofs. To\nmake this more efficient, we hash their commitments to obtain a binding factor,\nbefore doing a single DLEq proof for `d + be`, similar to how FROST calculates\nits nonces (as well as MuSig's key aggregation).\n\nAs some algorithms require multiple nonces, effectively including multiple\nSchnorr signatures within one signature, the library also supports providing\nmultiple nonces. The second component of a FROST nonce is intended to be\nmultiplied by a per-participant binding factor to ensure the security of FROST.\nWhen additional nonces are used, this is actually a per-nonce per-participant\nbinding factor.\n\nWhen multiple nonces are used, with multiple generators, we use a single DLEq\nproof for all nonces, merging their challenges. This provides a proof of `1 + n`\nelements instead of `2n`.\n\nFinally, to support additive offset signing schemes (accounts, stealth\naddresses, randomization), it's possible to specify a scalar offset for keys.\nThe public key signed for is also offset by this value. During the signing\nprocess, the offset is explicitly transcripted. Then, the offset is added to the\nparticipant with the lowest ID.\n\n# Caching\n\nmodular-frost supports caching a preprocess. This is done by having all\npreprocesses use a seeded RNG. Accordingly, the entire preprocess can be derived\nfrom the RNG seed, making the cache just the seed.\n\nReusing preprocesses would enable a third-party to recover your private key\nshare. Accordingly, you MUST not reuse preprocesses. Third-party knowledge of\nyour preprocess would also enable their recovery of your private key share.\nAccordingly, you MUST treat cached preprocesses with the same security as your\nprivate key share.\n\nSince a reused seed will lead to a reused preprocess, seeded RNGs are generally\nfrowned upon when doing multisignature operations. This isn't an issue as each\nnew preprocess obtains a fresh seed from the specified RNG. Assuming the\nprovided RNG isn't generating the same seed multiple times, the only way for\nthis seeded RNG to fail is if a preprocess is loaded multiple times, which was\nalready a failure point.\n"
  },
  {
    "path": "spec/integrations/Bitcoin.md",
    "content": "# Bitcoin\n\n### Addresses\n\nBitcoin addresses are an enum, defined as follows:\n\n  - `p2pkh`:  20-byte hash.\n  - `p2sh`:   20-byte hash.\n  - `p2wpkh`: 20-byte hash.\n  - `p2wsh`:  32-byte hash.\n  - `p2tr`:   32-byte key.\n\n### In Instructions\n\nBitcoin In Instructions are present via the transaction's last output in the\nform of `OP_RETURN`, and accordingly limited to 80 bytes. `origin` is\nautomatically set to the transaction's first input's address, if recognized.\nIf it's not recognized, an address of the multisig's current Bitcoin address is\nused, causing any failure to become a donation.\n\n### Out Instructions\n\nOut Instructions ignore `data`.\n"
  },
  {
    "path": "spec/integrations/Ethereum.md",
    "content": "# Ethereum\n\n### Addresses\n\nEthereum addresses are 20-byte hashes, identical to Ethereum proper.\n\n### In Instructions\n\nIn Instructions may be created in one of two ways.\n\n1) Have an EOA call `transfer` or `transferFrom` on an ERC20, appending the\n   encoded InInstruction directly after the calldata. `origin` defaults to the\n   party transferred from.\n2) Call `inInstruction` on the Router. `origin` defaults to `msg.sender`.\n\n### Out Instructions\n\n`data` is limited to 512 bytes.\n\nIf `data` isn't provided or is malformed, ETH transfers will execute with 5,000\ngas and token transfers with 100,000 gas.\n\nIf `data` is provided and well-formed, `destination` is ignored and the Ethereum\nRouter will construct and call a new contract to proxy the contained calls. The\ntransfer executes to the constructed contract as above, before the constructed\ncontract is called with the calls inside `data`. The sandboxed execution has a\ngas limit of 350,000.\n"
  },
  {
    "path": "spec/integrations/Instructions.md",
    "content": "# Instructions\n\nInstructions are used to communicate with networks connected to Serai, and they\ncome in two forms:\n\n  - In Instructions are programmable specifications paired with incoming coins,\nencoded into transactions on connected networks. Serai will parse included\ninstructions when it receives coins, executing the included specs.\n\n  - Out Instructions detail how to transfer coins, either to a Serai address or\nan address native to the network of the coins in question.\n\nA transaction containing an In Instruction and an Out Instruction (to a native\naddress) will receive coins to Serai and send coins from Serai, without\nrequiring directly performing any transactions on Serai itself.\n\nAll instructions are encoded under [Shorthand](#shorthand). Shorthand provides\nfrequent use cases to create minimal data representations on connected networks.\n\nInstructions are interpreted according to their non-Serai network. Addresses\nhave no validation performed unless otherwise noted. If the processor is\ninstructed to act on invalid data, it will drop the entire instruction.\n\n### Serialization\n\nInstructions are [SCALE](https://docs.substrate.io/reference/scale-codec/) encoded.\n\n### In Instruction\n\nInInstruction is an enum of:\n\n  - `Transfer`\n  - `Dex(Data)`\n\nThe specified target will be minted an appropriate amount of the respective\nSerai token. If `Dex`, the encoded call will be executed.\n\n### Refundable In Instruction\n\n  - `origin`      (Option\\<ExternalAddress>): Address, from the network of\norigin, which sent coins in.\n  - `instruction` (InInstruction):            The action to perform with the\nincoming coins.\n\nNetworks may automatically provide `origin`. If they do, the instruction may\nstill provide `origin`, overriding the automatically provided value.\n\nIf the instruction fails, coins are scheduled to be returned to `origin`,\nif provided.\n\n### Out Instruction\n\n  - `address` (ExternalAddress): Address to transfer the coins included with\nthis instruction to.\n  - `data`    (Option<Data>):    Data to include when transferring coins.\n\nNo validation of external addresses/data is performed on-chain. If data is\nspecified for a chain not supporting data, it is silently dropped.\n\n### Destination\n\nDestination is an enum of SeraiAddress and OutInstruction.\n\n### Shorthand\n\nShorthand is an enum which expands to an Refundable In Instruction.\n\n##### Raw\n\nRaw Shorthand contains a Refundable In Instruction directly. This is a verbose\nfallback option for infrequent use cases not covered by Shorthand.\n\n##### Swap\n\n  - `origin`  (Option\\<ExternalAddress>): Refundable In Instruction's `origin`.\n  - `coin`    (Coin):                     Coin to swap funds for.\n  - `minimum` (Amount):                   Minimum amount of `coin` to receive.\n  - `out`     (Destination):              Final destination for funds.\n\nwhich expands to:\n\n```\nRefundableInInstruction {\n  origin,\n  instruction: InInstruction::Dex(swap(Incoming Asset, coin, minimum, out)),\n}\n```\n\nwhere `swap` is a function which:\n\n  1) Swaps the incoming funds for SRI.\n  2) Swaps the SRI for `coin`.\n  3) Checks the amount of `coin` received is greater than `minimum`.\n  4) Executes `out` with the amount of `coin` received.\n\n##### Add Liquidity\n\n  - `origin`  (Option\\<ExternalAddress>): Refundable In Instruction's `origin`.\n  - `minimum` (Amount):                   Minimum amount of SRI tokens to swap\nhalf for.\n  - `gas`     (Amount):                   Amount of SRI to send to `address` to\ncover gas in the future.\n  - `address` (Address):                  Account to send the created liquidity\ntokens.\n\nwhich expands to:\n\n```\nRefundableInInstruction {\n  origin,\n  instruction: InInstruction::Dex(\n    swap_and_add_liquidity(Incoming Asset, minimum, gas, address)\n  ),\n}\n```\n\nwhere `swap_and_add_liquidity` is a function which:\n\n  1) Swaps half of the incoming funds for SRI.\n  2) Checks the amount of SRI received is greater than `minimum`.\n  3) Calls `swap_and_add_liquidity` with the amount of SRI received - `gas`, and\na matching amount of the incoming coin.\n  4) Transfers any leftover funds to `address`.\n"
  },
  {
    "path": "spec/integrations/Monero.md",
    "content": "# Monero\n\n### Addresses\n\nMonero addresses are structs, defined as follows:\n\n  - `kind`:  Enum {\n               Standard,\n               Subaddress,\n               Featured { flags: u8 }\n             }\n  - `spend`: [u8; 32]\n  - `view`:  [u8; 32]\n\nIntegrated addresses are not supported due to only being able to send to one\nper Monero transaction. Supporting them would add a level of complexity\nto Serai which isn't worth it.\n\nThis definition of Featured Addresses is non-standard since the flags are\nrepresented by a u8, not a VarInt. Currently, only half of the bits are used,\nwith no further planned features. Accordingly, it should be fine to fix its\nsize. If needed, another enum entry for a 2-byte flags Featured Address could be\nadded.\n\nThis definition is also non-standard by not having a Payment ID field. This is\nper not supporting integrated addresses.\n\n### In Instructions\n\nMonero In Instructions are present via `tx.extra`, specifically via inclusion\nin a `TX_EXTRA_NONCE` tag. The tag is followed by the VarInt length of its\ncontents, and then additionally marked by a byte `127`. The following data is\nlimited to 254 bytes.\n\n### Out Instructions\n\nOut Instructions ignore `data`.\n"
  },
  {
    "path": "spec/policy/Canonical Chain.md",
    "content": "# Canonical Chain\n\nAs Serai is a network connected to many external networks, at some point we will\nlikely have to ask ourselves what the canonical chain for a network is. This\ndocument intends to establish soft, non-binding policy, in the hopes it'll guide\nmost discussions on the matter.\n\nThe canonical chain is the chain Serai follows and honors transactions on. Serai\ndoes not guarantee operations availability nor integrity on any chains other\nthan the canonical chain. Which chain is considered canonical is dependent on\nseveral factors.\n\n### Finalization\n\nSerai finalizes blocks from external networks onto itself. Once a block is\nfinalized, it is considered irreversible. Accordingly, the primary tenet\nregarding what chain Serai will honor is the chain Serai has finalized. We can\nonly assume the integrity of our coins on that chain.\n\n### Node Software\n\nOnly node software which passes a quality threshold and actively identifies as\nbelonging to an external network's protocol should be run. Never should a\ntransformative node (a node trying to create a new network from an existing one)\nbe run in place of a node actually for the external network. Beyond active\nidentification, it must have community recognition as belonging.\n\nIf the majority of a community actively identifying as the network stands behind\na hard fork, it should not be considered as a new network yet the next step of\nthe existing one. If a hard fork breaks Serai's integrity, it should not be\nsupported.\n\nMultiple independent nodes should be run in order to reduce the likelihood of\nvulnerabilities to any specific node's faults.\n\n### Rollbacks\n\nOver time, various networks have rolled back in response to exploits. A rollback\nshould undergo the same scrutiny as a hard fork. If the rollback breaks Serai's\nintegrity, yet someone identifying as from the project offers to restore\nintegrity out-of-band, integrity is considered kept so long as the offer is\nfollowed through on.\n\nSince a rollback would break Serai's finalization policy, a technical note on\nhow it could be implemented is provided.\n\nAssume a blockchain from `0 .. 100` exists, with `100a ..= 500a` being rolled\nback blocks. The new chain extends from `99` with `100b ..= 200b`. Serai would\ndefine the canonical chain as `0 .. 100`, `100a ..= 500a`, `100b ..= 200b`, with\n`100b` building off `500a`. Serai would have to perform data-availability for\n`100a ..= 500a` (such as via a JSON file in-tree), and would have to modify the\nprocessor to edit its `Eventuality`s/UTXOs at `500a` back to the state at `99`.\nAny `Burn`s handled after `99` should be handled once again, if the transactions\nfrom `100a ..= 500a` cannot simply be carried over.\n\n### On Fault\n\nIf the canonical chain does put Serai's coins into an invalid state,\nirreversibly and without amends, then the discrepancy should be amortized to all\nusers as feasible, yet affected operations should otherwise halt if under\npermanent duress.\n\nFor example, if Serai lists a token which has a by-governance blacklist\nfunction, and is blacklisted without appeal, Serai should destroy all associated\nsriXYZ and cease operations.\n\nIf a bug, either in the chain or in Serai's own code, causes a loss of 10% of\ncoins (without amends), operations should halt until all outputs in system can\nhave their virtual amount reduced by a total amount of the loss,\nproportionalized to each output. Alternatively, Serai could decrease all token\nbalances by 10%. All liquidity/swap operations should be halted until users are\ngiven proper time to withdraw, if they so choose, before operations resume.\n"
  },
  {
    "path": "spec/processor/Multisig Rotation.md",
    "content": "# Multisig Rotation\n\nSubstrate is expected to determine when a new validator set instance will be\ncreated, and with it, a new multisig. Upon the successful creation of a new\nmultisig, as determined by the new multisig setting their key pair on Substrate,\nrotation begins.\n\n### Timeline\n\nThe following timeline is established:\n\n1) The new multisig is created, and has its keys set on Serai. Once the next\n   `Batch` with a new external network block is published, its block becomes the\n   \"queue block\". The new multisig is set to activate at the \"queue block\", plus\n   `CONFIRMATIONS` blocks (the \"activation block\").\n\n   We don't use the last `Batch`'s external network block, as that `Batch` may\n   be older than `CONFIRMATIONS` blocks. Any yet-to-be-included-and-finalized\n   `Batch` will be within `CONFIRMATIONS` blocks of what any processor has\n   scanned however, as it'll wait for inclusion and finalization before\n   continuing scanning.\n\n2) Once the \"activation block\" itself has been finalized on Serai, UIs should\n   start exclusively using the new multisig. If the \"activation block\" isn't\n   finalized within `2 * CONFIRMATIONS` blocks, UIs should stop making\n   transactions to any multisig on that network.\n\n   Waiting for Serai's finalization prevents a UI from using an unfinalized\n   \"activation block\" before a re-organization to a shorter chain. If a\n   transaction to Serai was carried from the unfinalized \"activation block\"\n   to the shorter chain, it'd no longer be after the \"activation block\" and\n   accordingly would be ignored.\n\n   We could not wait for Serai to finalize the block, yet instead wait for the\n   block to have `CONFIRMATIONS` confirmations. This would prevent needing to\n   wait for an indeterminate amount of time for Serai to finalize the\n   \"activation block\", with the knowledge it should be finalized. Doing so would\n   open UIs to eclipse attacks, where they live on an alternate chain where a\n   possible \"activation block\" is finalized, yet Serai finalizes a distinct\n   \"activation block\". If the alternate chain was longer than the finalized\n   chain, the above issue would be reopened.\n\n   The reason for UIs stopping under abnormal behavior is as follows. Given a\n   sufficiently delayed `Batch` for the \"activation block\", UIs will use the old\n   multisig past the point it will be deprecated. Accordingly, UIs must realize\n   when `Batch`s are so delayed and continued transactions are a risk. While\n   `2 * CONFIRMATIONS` is presumably well within the 6 hour period (defined\n   below), that period exists for low-fee transactions at time of congestion. It\n   does not exist for UIs with old state, though it can be used to compensate\n   for them (reducing the tolerance for inclusion delays). `2 * CONFIRMATIONS`\n   is before the 6 hour period is enacted, preserving the tolerance for\n   inclusion delays, yet still should only happen under highly abnormal\n   circumstances.\n\n   In order to minimize the time it takes for \"activation block\" to be\n   finalized, a `Batch` will always be created for it, regardless of it would\n   otherwise have a `Batch` created.\n\n3) The prior multisig continues handling `Batch`s and `Burn`s for\n   `CONFIRMATIONS` blocks, plus 10 minutes, after the \"activation block\".\n\n   The first `CONFIRMATIONS` blocks is due to the fact the new multisig\n   shouldn't actually be sent coins during this period, making it irrelevant.\n   If coins are prematurely sent to the new multisig, they're artificially\n   delayed until the end of the `CONFIRMATIONS` blocks plus 10 minutes period.\n   This prevents an adversary from minting Serai tokens using coins in the new\n   multisig, yet then burning them to drain the prior multisig, creating a lack\n   of liquidity for several blocks.\n\n   The reason for the 10 minutes is to provide grace to honest UIs. Since UIs\n   will wait until Serai confirms the \"activation block\" for keys before sending\n   to them, which will take `CONFIRMATIONS` blocks plus some latency, UIs would\n   make transactions to the prior multisig past the end of this period if it was\n   `CONFIRMATIONS` alone. Since the next period is `CONFIRMATIONS` blocks, which\n   is how long transactions take to confirm, transactions made past the end of\n   this period would only received after the next period. After the next period,\n   the prior multisig adds fees and a delay to all received funds (as it\n   forwards the funds from itself to the new multisig). The 10 minutes provides\n   grace for latency.\n\n   The 10 minutes is a delay on anyone who immediately transitions to the new\n   multisig, in a no latency environment, yet the delay is preferable to fees\n   from forwarding. It also should be less than 10 minutes thanks to various\n   latencies.\n\n4) The prior multisig continues handling `Batch`s and `Burn`s for another\n   `CONFIRMATIONS` blocks.\n\n   This is for two reasons:\n\n   1) Coins sent to the new multisig still need time to gain sufficient\n      confirmations.\n   2) All outputs belonging to the prior multisig should become available within\n      `CONFIRMATIONS` blocks.\n\n   All `Burn`s handled during this period should use the new multisig for the\n   change address. This should effect a transfer of most outputs.\n\n   With the expected transfer of most outputs, and the new multisig receiving\n   new external transactions, the new multisig takes the responsibility of\n   signing all unhandled and newly emitted `Burn`s.\n\n5) For the next 6 hours, all non-`Branch` outputs received are immediately\n   forwarded to the new multisig. Only external transactions to the new multisig\n   are included in `Batch`s.\n\n   The new multisig infers the `InInstruction`, and refund address, for\n   forwarded `External` outputs via reading what they were for the original\n   `External` output.\n\n   Alternatively, the `InInstruction`, with refund address explicitly included,\n   could be included in the forwarding transaction. This may fail if the\n   `InInstruction` omitted the refund address and is too large to fit in a\n   transaction with one explicitly included. On such failure, the refund would\n   be immediately issued instead.\n\n6) Once the 6 hour period has expired, the prior multisig stops handling outputs\n   it didn't itself create. Any remaining `Eventuality`s are completed, and any\n   available/freshly available outputs are forwarded (creating new\n   `Eventuality`s which also need to successfully resolve).\n\n   Once all the 6 hour period has expired, no `Eventuality`s remain, and all\n   outputs are forwarded, the multisig publishes a final `Batch` of the first\n   block, plus `CONFIRMATIONS`, which met these conditions, regardless of if it\n   would've otherwise had a `Batch`. No further actions by it, nor its\n   validators, are expected (unless, of course, those validators remain present\n   in the new multisig).\n\n7) The new multisig confirms all transactions from all prior multisigs were made\n   as expected, including the reported `Batch`s.\n\n   Unfortunately, we cannot solely check the immediately prior multisig due to\n   the ability for two sequential malicious multisigs to steal. If multisig\n   `n - 2` only transfers a fraction of its coins to multisig `n - 1`, multisig\n   `n - 1` can 'honestly' operate on the dishonest state it was given,\n   laundering it. This would let multisig `n - 1` forward the results of its\n   as-expected operations from a dishonest starting point to the new multisig,\n   and multisig `n` would attest to multisig `n - 1`'s expected (and therefore\n   presumed honest) operations, assuming liability. This would cause an honest\n   multisig to face full liability for the invalid state, causing it to be fully\n   slashed (as needed to reacquire any lost coins).\n\n   This would appear short-circuitable if multisig `n - 1` transfers coins\n   exceeding the relevant Serai tokens' supply. Serai never expects to operate\n   in an over-solvent state, yet balance should trend upwards due to a flat fee\n   applied to each received output (preventing a griefing attack). Any balance\n   greater than the tokens' supply may have had funds skimmed off the top, yet\n   they'd still guarantee the solvency of Serai without any additional fees\n   passed to users. Unfortunately, due to the requirement to verify the `Batch`s\n   published (as else the Serai tokens' supply may be manipulated), this cannot\n   actually be achieved (at least, not without a ZK proof the published `Batch`s\n   were correct).\n\n8) The new multisig publishes the next `Batch`, signifying the accepting of full\n   responsibilities and a successful close of the prior multisig.\n\n### Latency and Fees\n\nSlightly before the end of step 3, the new multisig should start receiving new\nexternal outputs. These won't be confirmed for another `CONFIRMATIONS` blocks,\nand the new multisig won't start handling `Burn`s for another `CONFIRMATIONS`\nblocks plus 10 minutes. Accordingly, the new multisig should only become\nresponsible for `Burn`s shortly after it has taken ownership of the stream of\nnewly received coins.\n\nBefore it takes responsibility, it also should've been transferred all internal\noutputs under the standard scheduling flow. Any delayed outputs will be\nimmediately forwarded, and external stragglers are only reported to Serai once\nsufficiently confirmed in the new multisig. Accordingly, liquidity should avoid\nfragmentation during rotation. The only latency should be on the 10 minutes\npresent, and on delayed outputs, which should've been immediately usable, having\nto wait another `CONFIRMATIONS` blocks to be confirmed once forwarded.\n\nImmediate forwarding does unfortunately prevent batching inputs to reduce fees.\nGiven immediate forwarding only applies to latent outputs, considered\nexceptional, and the protocol's fee handling ensures solvency, this is accepted.\n"
  },
  {
    "path": "spec/processor/Processor.md",
    "content": "# Processor\n\nThe processor is a service which has an instance spawned per network. It is\nresponsible for several tasks, from scanning an external network to signing\ntransactions with payments.\n\nThis document primarily discusses its flow with regards to the coordinator.\n\n### Generate Key\n\nOn `key_gen::CoordinatorMessage::GenerateKey`, the processor begins a pair of\ninstances of the distributed key generation protocol specified in the FROST\npaper.\n\nThe first instance is for a key to use on the external network. The second\ninstance is for a Ristretto public key used to publish data to the Serai\nblockchain. This pair of FROST DKG instances is considered a single instance of\nSerai's overall key generation protocol.\n\nThe commitments for both protocols are sent to the coordinator in a single\n`key_gen::ProcessorMessage::Commitments`.\n\n### Key Gen Commitments\n\nOn `key_gen::CoordinatorMessage::Commitments`, the processor continues the\nspecified key generation instance. The secret shares for each fellow\nparticipant are sent to the coordinator in a\n`key_gen::ProcessorMessage::Shares`.\n\n#### Key Gen Shares\n\nOn `key_gen::CoordinatorMessage::Shares`, the processor completes the specified\nkey generation instance. The generated key pair is sent to the coordinator in a\n`key_gen::ProcessorMessage::GeneratedKeyPair`.\n\n### Confirm Key Pair\n\nOn `substrate::CoordinatorMessage::ConfirmKeyPair`, the processor starts using\nthe newly confirmed key, scanning blocks on the external network for\ntransfers to it.\n\n### External Network Block\n\nWhen the external network has a new block, which is considered finalized\n(either due to being literally finalized or due to having a sufficient amount\nof confirmations), it's scanned.\n\nOutputs to the key of Serai's multisig are saved to the database. Outputs which\nnewly transfer into Serai are used to build `Batch`s for the block. The\nprocessor then begins a threshold signature protocol with its key pair's\nRistretto key to sign the `Batch`s.\n\nThe `Batch`s are each sent to the coordinator in a\n`substrate::ProcessorMessage::Batch`, enabling the coordinator to know what\n`Batch`s *should* be published to Serai. After each\n`substrate::ProcessorMessage::Batch`, the preprocess for the first instance of\nits signing protocol is sent to the coordinator in a\n`coordinator::ProcessorMessage::BatchPreprocess`.\n\nAs a design comment, we *may* be able to sign now possible, already scheduled,\nbranch/leaf transactions at this point. Doing so would be giving a mutable\nborrow over the scheduler to both the external network and the Serai network,\nand would accordingly be unsafe. We may want to look at splitting the scheduler\nin two, in order to reduce latency (TODO).\n\n### Batch Preprocesses\n\nOn `coordinator::CoordinatorMessage::BatchPreprocesses`, the processor\ncontinues the specified batch signing protocol, sending\n`coordinator::ProcessorMessage::BatchShare` to the coordinator.\n\n### Batch Shares\n\nOn `coordinator::CoordinatorMessage::BatchShares`, the processor\ncompletes the specified batch signing protocol. If successful, the processor\nstops signing for this batch and sends\n`substrate::ProcessorMessage::SignedBatch` to the coordinator.\n\n### Batch Re-attempt\n\nOn `coordinator::CoordinatorMessage::BatchReattempt`, the processor will create\na new instance of the batch signing protocol. The new protocol's preprocess is\nsent to the coordinator in a `coordinator::ProcessorMessage::BatchPreprocess`.\n\n### Substrate Block\n\nOn `substrate::CoordinatorMessage::SubstrateBlock`, the processor:\n\n1) Marks all blocks, up to the external block now considered finalized by\n   Serai, as having had their batches signed.\n2) Adds the new outputs from newly finalized blocks to the scheduler, along\n   with the necessary payments from `Burn` events on Serai.\n3) Sends a `substrate::ProcessorMessage::SubstrateBlockAck`, containing the IDs\n   of all plans now being signed for, to the coordinator.\n4) Sends `sign::ProcessorMessage::Preprocess` for each plan now being signed\n   for.\n\n### Sign Preprocesses\n\nOn `sign::CoordinatorMessage::Preprocesses`, the processor continues the\nspecified transaction signing protocol, sending `sign::ProcessorMessage::Share`\nto the coordinator.\n\n### Sign Shares\n\nOn `sign::CoordinatorMessage::Shares`, the processor completes the specified\ntransaction signing protocol. If successful, the processor stops signing for\nthis transaction and publishes the signed transaction. Then,\n`sign::ProcessorMessage::Completed` is sent to the coordinator, to be\nbroadcasted to all validators so everyone can observe the attempt completed,\nproducing a signed and published transaction.\n\n### Sign Re-attempt\n\nOn `sign::CoordinatorMessage::Reattempt`, the processor will create a new\na new instance of the transaction signing protocol if it hasn't already\ncompleted/observed completion of an instance of the signing protocol. The new\nprotocol's preprocess is sent to the coordinator in a\n`sign::ProcessorMessage::Preprocess`.\n\n### Sign Completed\n\nOn `sign::CoordinatorMessage::Completed`, the processor verifies the included\ntransaction hash actually refers to an accepted transaction which completes the\nplan it was supposed to. If so, the processor stops locally signing for the\ntransaction, and emits `sign::ProcessorMessage::Completed` if it hasn't prior.\n"
  },
  {
    "path": "spec/processor/Scanning.md",
    "content": "# Scanning\n\nOnly blocks with finality, either actual or sufficiently probabilistic, are\noperated upon. This is referred to as a block with `CONFIRMATIONS`\nconfirmations, the block itself being the first confirmation.\n\nFor chains which promise finality on a known schedule, `CONFIRMATIONS` is set to\n`1` and each group of finalized blocks is treated as a single block, with the\ntail block's hash representing the entire group.\n\nFor chains which offer finality, on an unknown schedule, `CONFIRMATIONS` is\nstill set to `1` yet blocks aren't aggregated into a group. They're handled\nindividually, yet only once finalized. This allows networks which form\nfinalization erratically to not have to agree on when finalizations were formed,\nsolely that the blocks contained have a finalized descendant.\n\n### Notability, causing a `Batch`\n\n`Batch`s are only created for blocks which it benefits to achieve ordering on.\nThese are:\n\n- Blocks which contain transactions relevant to Serai\n- Blocks which in which a new multisig activates\n- Blocks in which a prior multisig retires\n\n### Waiting for `Batch` inclusion\n\nOnce a `Batch` is created, it is expected to eventually be included on Serai.\nIf the `Batch` isn't included within `CONFIRMATIONS` blocks of its creation, the\nscanner will wait until its inclusion before scanning\n`batch_block + CONFIRMATIONS`.\n"
  },
  {
    "path": "spec/processor/UTXO Management.md",
    "content": "# UTXO Management\n\nUTXO-based chains have practical requirements for efficient operation which can\neffectively be guaranteed to terminate with a safe end state. This document\nattempts to detail such requirements, and the implementations in Serai resolving\nthem.\n\n## Fees From Effecting Transactions Out\n\nWhen `sriXYZ` is burnt, Serai is expected to create an output for `XYZ` as\ninstructed. The transaction containing this output will presumably have some fee\nnecessitating payment. Serai linearly amortizes this fee over all outputs this\ntransaction intends to create in response to burns.\n\nWhile Serai could charge a fee in advance, either static or dynamic to views of\nthe fee market, it'd risk the fee being inaccurate. If it's too high, users have\npaid fees they shouldn't have. If it's too low, Serai is insolvent. This is why\nthe actual fee is amortized, rather than an estimation being prepaid.\n\nSerai could report a view, and when burning occurred, that view could be locked\nin as the basis for transaction fees as used to fulfill the output in question.\nThis would require burns specify the most recent fee market view they're aware\nof, signifying their agreeance, with Serai erroring is a new view is published\nbefore the burn is included on-chain. Not only would this require more data be\npublished to Serai (widening data pipeline requirements), it'd prevent any\nRBF-based solutions to dynamic fee markets causing transactions to get stuck.\n\n## Output Frequency\n\nOutputs can be created on an external network at rate\n`max_outputs_per_tx / external_tick_rate`, where `external_tick_rate` is the\nexternal's network limitations on spending outputs. While `external_tick_rate`\nis generally writable as zero, due to mempool chaining, some external networks\nmay not allow spending outputs from transactions which have yet to be ordered.\nMonero only allows spending outputs from transactions who have 10 confirmations,\nfor its own security.\n\nSerai defines its own tick rate per external network, such that\n`serai_tick_rate >= external_tick_rate`. This ensures that Serai never assumes\navailability before actual availability. `serai_tick_rate` is also `> 0`. This\nis since a zero `external_tick_rate` generally does not truly allow an infinite\noutput creation rate due to limitations on the amount of transactions allowed\nin the mempool.\n\nDefine `output_creation_rate` as `max_outputs_per_tx / serai_tick_rate`. Under a\nnaive system which greedily accumulates inputs and linearly processes outputs,\nthis is the highest speed at which outputs which may be processed.\n\nIf the Serai blockchain enables burning sriXYZ at a rate exceeding\n`output_creation_rate`, a backlog would form. This backlog could linearly grow\nat a rate larger than the outputs could linearly shrink, creating an\never-growing backlog, performing a DoS against Serai.\n\nOne solution would be to increase the fee associated with burning sriXYZ when\napproaching `output_creation_rate`, making such a DoS unsustainable. This would\nrequire the Serai blockchain be aware of each external network's\n`output_creation_rate` and implement such a sliding fee. This 'solution' isn't\npreferred as it still temporarily has a growing queue, and normal users would\nalso be affected by the increased fees.\n\nThe solution implemented into Serai is to consume all burns from the start of a\nglobal queue which can be satisfied under currently available inputs. While the\nconsumed queue may have 256 items, which can't be processed within a single tick\nby an external network whose `output_creation_rate` is 16, Serai can immediately\nset a finite bound on execution duration.\n\nFor the above example parameters, Serai would create 16 outputs within its tick,\nignoring the necessity of a change output. These 16 outputs would _not_ create\nany outputs Serai is expected to create in response to burns, yet instead create\n16 \"branch\" outputs. One tick later, when the branch outputs are available to\nspend, each would fund creating of 16 expected outputs.\n\nFor `e` expected outputs, the execution duration is just `log e` ticks _with the\nbase of the logarithm being `output_creation_rate`_. Since these `e` expected\noutputs are consumed from the linearly-implemented global queue into their own\ntree structure, execution duration cannot be extended. We can also re-consume\nthe entire global queue (barring input availability, see next section) after\njust one tick, when the change output becomes available again.\n\nDue to the logarithmic complexity of fulfilling burns, attacks require\nexponential growth (which is infeasible to scale). This solution does not\nrequire a sliding fee on Serai's side due to not needing to limit the on-chain\nrate of burns, which means it doesn't so adversely affect normal users. While\nan increased tree depth will increase the amount of transactions needed to\nfulfill an output, increasing the fee amortized over the output and its\nsiblings, this fee scales linearly with the logarithmically scaling tree depth.\nThis is considered acceptable.\n\n## Input Availability\n\nThe following section refers to spending an output, and then spending it again.\nSpending it again, which is impossible under the UTXO model, refers to spending\nthe change output of the transaction it was spent in. The following section\nalso assumes any published transaction is immediately ordered on-chain, ignoring\nthe potential for latency from mempool to blockchain (as it is assumed to have a\nnegligible effect in practice).\n\nWhen a burn for amount `a` is issued, the sum amount of immediately available\ninputs may be `< a`. This is because despite each output being considered usable\non a tick basis, there is no global tick. Each output may or may not be\nspendable at some moment, and spending it will prevent its availability for one\ntick of a clock newly started.\n\nThis means all outputs will become available by simply waiting a single tick,\nwithout spending any outputs during the waited tick. Any outputs unlocked at the\nstart of the tick will carry, and within the tick the rest of the outputs will\nbecome unlocked.\n\nThis means that within a tick of operations, the full balance of Serai can be\nconsidered unlocked and used to consume the entire global queue. While Serai\ncould wait for all its outputs to be available before popping from the front of\nthe global queue, eager execution as enough inputs become available provides\nlower latency. Considering the tick may be an hour (as in the case of Bitcoin),\nthis is very appreciated.\n\nIf a full tick is waited for, due to the front of the global queue having a\nnotably large burn, then the entire global queue will be consumed as full input\navailability means the ability to satisfy all potential burns in a solvent\nsystem.\n\n## Fees Incurred During Operations\n\nWhile fees incurred when satisfying burn were covered above, with documentation\non how solvency is maintained, two other operating costs exists.\n\n1) Input accumulation\n2) Multisig rotations\n\nInput accumulation refers to transactions which exist to merge inputs. Just as\nthere is a `max_outputs_per_tx`, there is a `max_inputs_per_tx`. When the amount\nof inputs belonging to Serai exceeds `max_inputs_per_tx`, a TX merging them is\ncreated. This TX incurs fees yet has no outputs mapping to burns to amortize\nthem over, accumulating operating costs.\n\nPlease note that this merging occurs in parallel to create a logarithmic\nexecution, similar to how outputs are also processed in parallel.\n\nAs for multisig rotation, multisig rotation occurs when a new multisig for an\nexternal network is created and the old multisig must transfer its inputs in\norder for Serai to continue its operations. This operation also incurs fees\nwithout having outputs immediately available to amortize over.\n\nSerai could charge fees on received outputs, deducting from the amount of\n`sriXYZ` minted in order to cover these operating fees. An overt amount would be\ndeducted to practically ensure solvency, forming a buffer. Once the buffer is\nfilled, fees would be reduced. As the buffer drains, fees would go back up.\n\nThis would keep charged fees in line with actual fees, once the buffer is\ninitially filled, yet requires:\n\n1) Creating and tracking a buffer\n2) Overcharging some users on fees\n\nwhile still risking insolvency, if the actual fees keep increasing in a way\npreventing successful estimation.\n\nThe solution Serai implements is to accrue operating costs, tracking with each\ncreated transaction the running operating costs. When a created transaction has\npayments out, all of the operating costs incurred so far, which have yet to be\namortized, are immediately and fully amortized.\n\n## Attacks by a Malicious Miner\n\nThere is the concern that a significant amount of outputs could be created,\nwhich when merged as inputs, create a significant amount of operating costs.\nThis would then be forced onto random users who burn `sriXYZ` soon after, while\nthe party who caused the operating costs would then be able to burn their own\n`sriXYZ` without notable fees.\n\nTo describe this attack in its optimal form, assume a sole malicious block\nproducer for an external network. The malicious miner adds an output to Serai,\nnot paying any fees as the block producer. This single output alone may trigger\nan aggregation transaction. Serai would pay for the transaction fee, the fee\ngoing to the malicious miner.\n\nWhen Serai users burn `sriXYZ`, they are hit with the aggregation transaction's\nfee plus the normally amortized fee. Then, the malicious miner burns their\n`sriXYZ`, having the fee they capture be amortized over their output. In this\nprocess, they remain net except for the increased transaction fees they gain\nfrom other users, which they profit.\n\nTo limit this attack vector, a flat fee of\n`2 * (the estimation of a 2-input-merging transaction fee)` is applied to each\ninput. This means, assuming an inability to manipulate Serai's fee estimations,\ncreating an output to force a merge transaction (and the associated fee) costs\nthe attacker twice as much as the associated fee.\n\nA 2-input TX's fee is used as aggregating multiple inputs at once actually\nyields in Serai's favor so long as the per-input fee exceeds the cost of the\nper-input addition to the TX. Since the per-input fee is the cost of an entire\nTX, this property is true.\n\n### Profitability Without the Flat Fee With a Minority of Hash Power\n\nIgnoring the above flat fee, a malicious miner could use aggregating multiple\ninputs to achieve profit with a minority of hash power. The following is how a\nminer with 7% of the external network's hash power could execute this attack\nprofitably over a network with a `max_inputs_per_tx` value of 16:\n\n1) Mint `sriXYZ` with 256 outputs during their own blocks. This incurs no fees\nand would force 16 aggregation transactions to be created.\n\n2) _A miner_, which has a 7% chance of being the malicious miner, collects the\n16 transaction fees.\n\n3) The malicious miner burns their sriXYZ, with a 7% chance of collecting their\nown fee or a 93% chance of losing a single transaction fee.\n\n16 attempts would cost 16 transaction fees if they always lose their single\ntransaction fee. Gaining the 16 transaction fees once, offsetting costs, is\nexpected to happen with just 6.25% of the hash power. Since the malicious miner\nhas 7%, they're statistically likely to recoup their costs and eventually turn\na profit.\n\nWith a flat fee of at least the cost to aggregate a single input in a full\naggregation transaction, this attack falls apart. Serai's flat fee is the higher\ncost of the fee to aggregate two inputs in an aggregation transaction.\n\n### Solvency Without the Flat Fee\n\nEven without the above flat fee, Serai remains solvent. With the above flat fee,\nmalicious miners on external networks can only steal from other users if they\ncan manipulate Serai's fee estimations so that the merge transaction fee used is\ntwice as high as the fees charged for causing a merge transaction. This is\nassumed infeasible to perform at scale, yet even if demonstrated feasible, it\nwould not be a critical vulnerability against Serai. Solely a low/medium/high\nvulnerability against the users (though one it would still be our responsibility\nto rectify).\n"
  },
  {
    "path": "spec/protocol/Constants.md",
    "content": "# Constants\n\n### Types\n\nThese are the list of types used to represent various properties within the\nprotocol.\n\n| Alias           | Type                                         |\n|-----------------|----------------------------------------------|\n| SeraiAddress    | sr25519::Public (unchecked [u8; 32] wrapper) |\n| Amount          | u64                                          |\n| NetworkId       | NetworkId (Rust enum, SCALE-encoded)         |\n| Coin            | Coin (Rust enum, SCALE-encoded)              |\n| Session         | u32                                          |\n| Validator Set   | (NetworkId, Session)                         |\n| Key             | BoundedVec\\<u8, 96>                          |\n| KeyPair         | (SeraiAddress, Key)                          |\n| ExternalAddress | BoundedVec\\<u8, 196>                         |\n| Data            | BoundedVec\\<u8, 512>                         |\n\n### Networks\n\nEvery network connected to Serai operates over a specific curve. The processor\ngenerates a distinct set of keys per network. Beyond the key-generation itself\nbeing isolated, the generated keys are further bound to their respective\nnetworks via an additive offset created by hashing the network's name (among\nother properties). The network's key is used for all coins on that network.\n\n| Network  | Curve     | ID |\n|----------|-----------|----|\n| Serai    | Ristretto | 0  |\n| Bitcoin  | Secp256k1 | 1  |\n| Ethereum | Secp256k1 | 2  |\n| Monero   | Ed25519   | 3  |\n\n### Coins\n\nCoins exist over a network and have a distinct integer ID.\n\n| Coin     | Network  | ID |\n|----------|----------|----|\n| Serai    | Serai    | 0  |\n| Bitcoin  | Bitcoin  | 1  |\n| Ether    | Ethereum | 2  |\n| DAI      | Ethereum | 3  |\n| Monero   | Monero   | 4  |\n"
  },
  {
    "path": "spec/protocol/In Instructions.md",
    "content": "# In Instructions\n\nIn Instructions are included onto the Serai blockchain via unsigned\ntransactions. In order to ensure the integrity of the included instructions, the\nvalidator set responsible for the network in question produces a threshold\nsignature of their authenticity.\n\nThis lets all other validators verify the instructions with an O(1) operation.\n"
  },
  {
    "path": "spec/protocol/Validator Sets.md",
    "content": "# Validator Sets\n\nValidator Sets are defined at the protocol level, with the following parameters:\n\n  - `network`                  (NetworkId): The network this validator set\n                                            operates over.\n  - `allocation_per_key_share` (Amount):    Amount of stake needing allocation\n                                            in order to receive a key share.\n\n### Participation in Consensus\n\nThe validator set for `NetworkId::Serai` participates in Serai's own consensus,\nproducing and finalizing blocks.\n\n### Multisig\n\nEvery Validator Set is expected to form a `t`-of-`n` multisig, where `n` is the\namount of key shares in the Validator Set and `t` is `n * 2 / 3 + 1`, for each\nof its networks. This multisig is secure to hold coins valued at up to 33% of\nthe Validator Set's allocated stake. If the coins exceed that threshold, there's\nmore value in the multisig and associated liquidity pool than in the\nsupermajority of allocated stake securing them both. Accordingly, it'd be no\nlonger financially secure, and it MUST reject newly added coins.\n\n### Multisig Creation\n\nMultisigs are created by Processors, communicating via their Coordinators.\nThey're then confirmed on chain via the `validator-sets` pallet. This is done by\nhaving 100% of participants agree on the resulting group key. While this isn't\nfault tolerant regarding liveliness, a malicious actor who forces a `t`-of-`n`\nmultisig to be `t`-of-`n-1` reduces the fault tolerance of the created multisig\nwhich is a greater issue. If a node does prevent multisig creation, other\nvalidators should issue slashes for it/remove it from the Validator Set\nentirely.\n\nPlacing the creation on chain also solves the question of if the multisig was\nsuccessfully created or not. Processors cannot simply ask each other if they\nsucceeded without creating an instance of the Byzantine Generals Problem.\nPlacing results within a Byzantine Fault Tolerant system resolves this.\n\n### Multisig Rotation\n\nPlease see `processor/Multisig Rotation.md` for details on the timing.\n\nOnce the new multisig publishes its first `Batch`, the old multisig's keys are\ncleared and the set is considered retired. After a one-session cooldown period,\nthey may deallocate their stake.\n\n### Set Keys (message)\n\n  - `network`   (Network):   Network whose key is being set.\n  - `key_pair`  (KeyPair):   Key pair being set for this `Session`.\n  - `signature` (Signature): A MuSig-style signature of all validators,\n                             confirming this key.\n"
  },
  {
    "path": "substrate/abi/Cargo.toml",
    "content": "[package]\nname = \"serai-abi\"\nversion = \"0.1.0\"\ndescription = \"ABI for the Serai runtime\"\nlicense = \"MIT\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/substrate/abi\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nedition = \"2021\"\nrust-version = \"1.74\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nscale = { package = \"parity-scale-codec\", version = \"3\", default-features = false, features = [\"derive\"] }\n\nborsh = { version = \"1\", default-features = false, features = [\"derive\", \"de_strict_order\"], optional = true }\nserde = { version = \"1\", default-features = false, features = [\"derive\", \"alloc\"], optional = true }\n\nsp-core = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nsp-runtime = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\n\nsp-consensus-babe = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nsp-consensus-grandpa = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\n\nframe-system = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nframe-support = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\n\nserai-primitives = { path = \"../primitives\", version = \"0.1\", default-features = false }\nserai-coins-primitives = { path = \"../coins/primitives\", version = \"0.1\", default-features = false }\nserai-validator-sets-primitives = { path = \"../validator-sets/primitives\", version = \"0.1\", default-features = false }\nserai-genesis-liquidity-primitives = { path = \"../genesis-liquidity/primitives\", version = \"0.1\", default-features = false }\nserai-emissions-primitives = { path = \"../emissions/primitives\", version = \"0.1\", default-features = false }\nserai-in-instructions-primitives = { path = \"../in-instructions/primitives\", version = \"0.1\", default-features = false }\nserai-signals-primitives = { path = \"../signals/primitives\", version = \"0.1\", default-features = false }\n\n[features]\nstd = [\n  \"scale/std\",\n\n  \"borsh?/std\",\n  \"serde?/std\",\n\n  \"sp-core/std\",\n  \"sp-runtime/std\",\n\n  \"sp-consensus-babe/std\",\n  \"sp-consensus-grandpa/std\",\n\n  \"frame-system/std\",\n  \"frame-support/std\",\n\n  \"serai-primitives/std\",\n  \"serai-coins-primitives/std\",\n  \"serai-validator-sets-primitives/std\",\n  \"serai-genesis-liquidity-primitives/std\",\n  \"serai-emissions-primitives/std\",\n  \"serai-in-instructions-primitives/std\",\n  \"serai-signals-primitives/std\",\n]\nborsh = [\n  \"dep:borsh\",\n  \"serai-primitives/borsh\",\n  \"serai-coins-primitives/borsh\",\n  \"serai-validator-sets-primitives/borsh\",\n  \"serai-genesis-liquidity-primitives/borsh\",\n  \"serai-in-instructions-primitives/borsh\",\n  \"serai-signals-primitives/borsh\",\n]\nserde = [\n  \"dep:serde\",\n  \"serai-primitives/serde\",\n  \"serai-coins-primitives/serde\",\n  \"serai-validator-sets-primitives/serde\",\n  \"serai-genesis-liquidity-primitives/serde\",\n  \"serai-in-instructions-primitives/serde\",\n  \"serai-signals-primitives/serde\",\n]\ndefault = [\"std\"]\n"
  },
  {
    "path": "substrate/abi/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2023 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "substrate/abi/src/babe.rs",
    "content": "use sp_consensus_babe::EquivocationProof;\n\nuse serai_primitives::{Header, SeraiAddress};\n\n#[derive(\n  Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking,\n)]\npub struct ReportEquivocation {\n  pub equivocation_proof: alloc::boxed::Box<EquivocationProof<Header>>,\n  pub key_owner_proof: SeraiAddress,\n}\n\n// We could define a Babe Config here and use the literal pallet_babe::Call\n// The disadvantage to this would be the complexity and presence of junk fields such as `__Ignore`\n#[derive(\n  Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking,\n)]\npub enum Call {\n  report_equivocation(ReportEquivocation),\n  report_equivocation_unsigned(ReportEquivocation),\n}\n"
  },
  {
    "path": "substrate/abi/src/coins.rs",
    "content": "use serai_primitives::{Balance, SeraiAddress};\n\npub use serai_coins_primitives as primitives;\nuse primitives::OutInstructionWithBalance;\n\n#[derive(\n  Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking,\n)]\n#[cfg_attr(feature = \"borsh\", derive(borsh::BorshSerialize, borsh::BorshDeserialize))]\n#[cfg_attr(feature = \"serde\", derive(serde::Serialize))]\n#[cfg_attr(all(feature = \"std\", feature = \"serde\"), derive(serde::Deserialize))]\npub enum Call {\n  transfer { to: SeraiAddress, balance: Balance },\n  burn { balance: Balance },\n  burn_with_instruction { instruction: OutInstructionWithBalance },\n}\n\n#[derive(\n  Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking,\n)]\n#[cfg_attr(feature = \"borsh\", derive(borsh::BorshSerialize, borsh::BorshDeserialize))]\n#[cfg_attr(feature = \"serde\", derive(serde::Serialize))]\n#[cfg_attr(all(feature = \"std\", feature = \"serde\"), derive(serde::Deserialize))]\npub enum Event {\n  Mint { to: SeraiAddress, balance: Balance },\n  Burn { from: SeraiAddress, balance: Balance },\n  BurnWithInstruction { from: SeraiAddress, instruction: OutInstructionWithBalance },\n  Transfer { from: SeraiAddress, to: SeraiAddress, balance: Balance },\n}\n"
  },
  {
    "path": "substrate/abi/src/dex.rs",
    "content": "use sp_runtime::BoundedVec;\n\nuse serai_primitives::*;\n\ntype PoolId = ExternalCoin;\ntype MaxSwapPathLength = sp_core::ConstU32<3>;\n\n#[derive(\n  Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking,\n)]\n#[cfg_attr(feature = \"serde\", derive(serde::Serialize))]\n#[cfg_attr(all(feature = \"std\", feature = \"serde\"), derive(serde::Deserialize))]\npub enum Call {\n  add_liquidity {\n    coin: ExternalCoin,\n    coin_desired: SubstrateAmount,\n    sri_desired: SubstrateAmount,\n    coin_min: SubstrateAmount,\n    sri_min: SubstrateAmount,\n    mint_to: SeraiAddress,\n  },\n  remove_liquidity {\n    coin: ExternalCoin,\n    lp_token_burn: SubstrateAmount,\n    coin_min_receive: SubstrateAmount,\n    sri_min_receive: SubstrateAmount,\n    withdraw_to: SeraiAddress,\n  },\n  swap_exact_tokens_for_tokens {\n    path: BoundedVec<Coin, MaxSwapPathLength>,\n    amount_in: SubstrateAmount,\n    amount_out_min: SubstrateAmount,\n    send_to: SeraiAddress,\n  },\n  swap_tokens_for_exact_tokens {\n    path: BoundedVec<Coin, MaxSwapPathLength>,\n    amount_out: SubstrateAmount,\n    amount_in_max: SubstrateAmount,\n    send_to: SeraiAddress,\n  },\n}\n\n#[derive(\n  Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking,\n)]\n#[cfg_attr(feature = \"serde\", derive(serde::Serialize))]\n#[cfg_attr(all(feature = \"std\", feature = \"serde\"), derive(serde::Deserialize))]\npub enum Event {\n  PoolCreated {\n    pool_id: PoolId,\n    pool_account: SeraiAddress,\n  },\n\n  LiquidityAdded {\n    who: SeraiAddress,\n    mint_to: SeraiAddress,\n    pool_id: PoolId,\n    coin_amount: SubstrateAmount,\n    sri_amount: SubstrateAmount,\n    lp_token_minted: SubstrateAmount,\n  },\n\n  LiquidityRemoved {\n    who: SeraiAddress,\n    withdraw_to: SeraiAddress,\n    pool_id: PoolId,\n    coin_amount: SubstrateAmount,\n    sri_amount: SubstrateAmount,\n    lp_token_burned: SubstrateAmount,\n  },\n\n  SwapExecuted {\n    who: SeraiAddress,\n    send_to: SeraiAddress,\n    path: BoundedVec<Coin, MaxSwapPathLength>,\n    amount_in: SubstrateAmount,\n    amount_out: SubstrateAmount,\n  },\n}\n"
  },
  {
    "path": "substrate/abi/src/economic_security.rs",
    "content": "use serai_primitives::ExternalNetworkId;\n\n#[derive(\n  Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking,\n)]\n#[cfg_attr(feature = \"borsh\", derive(borsh::BorshSerialize, borsh::BorshDeserialize))]\n#[cfg_attr(feature = \"serde\", derive(serde::Serialize, serde::Deserialize))]\npub enum Event {\n  EconomicSecurityReached { network: ExternalNetworkId },\n}\n"
  },
  {
    "path": "substrate/abi/src/emissions.rs",
    "content": "pub use serai_emissions_primitives as primitives;\n"
  },
  {
    "path": "substrate/abi/src/genesis_liquidity.rs",
    "content": "pub use serai_genesis_liquidity_primitives as primitives;\n\nuse serai_primitives::*;\nuse primitives::*;\n\n#[derive(\n  Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking,\n)]\n#[cfg_attr(feature = \"serde\", derive(serde::Serialize, serde::Deserialize))]\npub enum Call {\n  remove_coin_liquidity { balance: ExternalBalance },\n  oraclize_values { values: Values, signature: Signature },\n}\n\n#[derive(\n  Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking,\n)]\n#[cfg_attr(feature = \"borsh\", derive(borsh::BorshSerialize, borsh::BorshDeserialize))]\n#[cfg_attr(feature = \"serde\", derive(serde::Serialize, serde::Deserialize))]\npub enum Event {\n  GenesisLiquidityAdded { by: SeraiAddress, balance: ExternalBalance },\n  GenesisLiquidityRemoved { by: SeraiAddress, balance: ExternalBalance },\n  GenesisLiquidityAddedToPool { coin: ExternalBalance, sri: Amount },\n}\n"
  },
  {
    "path": "substrate/abi/src/grandpa.rs",
    "content": "use sp_consensus_grandpa::EquivocationProof;\n\nuse serai_primitives::{BlockNumber, SeraiAddress};\n\n#[derive(\n  Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking,\n)]\npub struct ReportEquivocation {\n  pub equivocation_proof: alloc::boxed::Box<EquivocationProof<[u8; 32], BlockNumber>>,\n  pub key_owner_proof: SeraiAddress,\n}\n\n#[derive(\n  Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking,\n)]\npub enum Call {\n  report_equivocation(ReportEquivocation),\n  report_equivocation_unsigned(ReportEquivocation),\n}\n\n#[derive(\n  Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking,\n)]\n#[cfg_attr(feature = \"serde\", derive(serde::Serialize))]\n#[cfg_attr(all(feature = \"std\", feature = \"serde\"), derive(serde::Deserialize))]\npub enum Event {\n  NewAuthorities { authority_set: alloc::vec::Vec<(SeraiAddress, u64)> },\n  // TODO: Remove these\n  Paused,\n  Resumed,\n}\n"
  },
  {
    "path": "substrate/abi/src/in_instructions.rs",
    "content": "use serai_primitives::*;\n\npub use serai_in_instructions_primitives as primitives;\nuse primitives::SignedBatch;\n\n#[derive(\n  Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking,\n)]\n#[cfg_attr(feature = \"borsh\", derive(borsh::BorshSerialize, borsh::BorshDeserialize))]\n#[cfg_attr(feature = \"serde\", derive(serde::Serialize))]\n#[cfg_attr(all(feature = \"std\", feature = \"serde\"), derive(serde::Deserialize))]\npub enum Call {\n  execute_batch { batch: SignedBatch },\n}\n\n#[derive(\n  Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking,\n)]\n#[cfg_attr(feature = \"borsh\", derive(borsh::BorshSerialize, borsh::BorshDeserialize))]\n#[cfg_attr(feature = \"serde\", derive(serde::Serialize))]\n#[cfg_attr(all(feature = \"std\", feature = \"serde\"), derive(serde::Deserialize))]\npub enum Event {\n  Batch { network: ExternalNetworkId, id: u32, block: BlockHash, instructions_hash: [u8; 32] },\n  InstructionFailure { network: ExternalNetworkId, id: u32, index: u32 },\n  Halt { network: ExternalNetworkId },\n}\n"
  },
  {
    "path": "substrate/abi/src/lib.rs",
    "content": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![cfg_attr(docsrs, feature(doc_cfg))]\n#![cfg_attr(not(feature = \"std\"), no_std)]\n#![allow(non_camel_case_types)]\n#![expect(clippy::cast_possible_truncation)]\n\nextern crate alloc;\n\npub use serai_primitives as primitives;\n\npub mod system;\n\npub mod timestamp;\n\npub mod coins;\npub mod liquidity_tokens;\npub mod dex;\n\npub mod validator_sets;\n\npub mod genesis_liquidity;\npub mod emissions;\n\npub mod economic_security;\n\npub mod in_instructions;\n\npub mod signals;\n\npub mod babe;\npub mod grandpa;\n\npub mod tx;\n\n#[derive(\n  Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking,\n)]\npub enum Call {\n  #[codec(index = 1)]\n  Timestamp(timestamp::Call),\n  #[codec(index = 3)]\n  Coins(coins::Call),\n  #[codec(index = 4)]\n  LiquidityTokens(liquidity_tokens::Call),\n  #[codec(index = 5)]\n  Dex(dex::Call),\n  #[codec(index = 6)]\n  ValidatorSets(validator_sets::Call),\n  #[codec(index = 7)]\n  GenesisLiquidity(genesis_liquidity::Call),\n  #[codec(index = 10)]\n  InInstructions(in_instructions::Call),\n  #[codec(index = 11)]\n  Signals(signals::Call),\n  #[codec(index = 12)]\n  Babe(babe::Call),\n  #[codec(index = 13)]\n  Grandpa(grandpa::Call),\n}\n\n// TODO: Remove this\n#[derive(\n  Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking,\n)]\npub enum TransactionPaymentEvent {\n  TransactionFeePaid { who: serai_primitives::SeraiAddress, actual_fee: u64, tip: u64 },\n}\n\n#[derive(\n  Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking,\n)]\npub enum Event {\n  #[codec(index = 0)]\n  System(system::Event),\n  #[codec(index = 2)]\n  TransactionPayment(TransactionPaymentEvent),\n  #[codec(index = 3)]\n  Coins(coins::Event),\n  #[codec(index = 4)]\n  LiquidityTokens(liquidity_tokens::Event),\n  #[codec(index = 5)]\n  Dex(dex::Event),\n  #[codec(index = 6)]\n  ValidatorSets(validator_sets::Event),\n  #[codec(index = 7)]\n  GenesisLiquidity(genesis_liquidity::Event),\n  #[codec(index = 9)]\n  EconomicSecurity(economic_security::Event),\n  #[codec(index = 10)]\n  InInstructions(in_instructions::Event),\n  #[codec(index = 11)]\n  Signals(signals::Event),\n  #[codec(index = 13)]\n  Grandpa(grandpa::Event),\n}\n\n#[derive(\n  Clone, Copy, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking,\n)]\n#[cfg_attr(feature = \"serde\", derive(serde::Serialize))]\n#[cfg_attr(all(feature = \"std\", feature = \"serde\"), derive(serde::Deserialize))]\npub struct Extra {\n  pub era: sp_runtime::generic::Era,\n  #[codec(compact)]\n  pub nonce: u32,\n  #[codec(compact)]\n  pub tip: u64,\n}\n\n#[derive(\n  Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking,\n)]\n#[cfg_attr(feature = \"borsh\", derive(borsh::BorshSerialize, borsh::BorshDeserialize))]\n#[cfg_attr(feature = \"serde\", derive(serde::Serialize))]\n#[cfg_attr(all(feature = \"std\", feature = \"serde\"), derive(serde::Deserialize))]\npub struct SignedPayloadExtra {\n  pub spec_version: u32,\n  pub tx_version: u32,\n  pub genesis: [u8; 32],\n  pub mortality_checkpoint: [u8; 32],\n}\n\npub type Transaction = tx::Transaction<Call, Extra>;\n"
  },
  {
    "path": "substrate/abi/src/liquidity_tokens.rs",
    "content": "use serai_primitives::{Balance, SeraiAddress};\n\n#[derive(\n  Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking,\n)]\n#[cfg_attr(feature = \"borsh\", derive(borsh::BorshSerialize, borsh::BorshDeserialize))]\n#[cfg_attr(feature = \"serde\", derive(serde::Serialize, serde::Deserialize))]\npub enum Call {\n  burn { balance: Balance },\n  transfer { to: SeraiAddress, balance: Balance },\n}\n\n#[derive(\n  Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking,\n)]\n#[cfg_attr(feature = \"borsh\", derive(borsh::BorshSerialize, borsh::BorshDeserialize))]\n#[cfg_attr(feature = \"serde\", derive(serde::Serialize, serde::Deserialize))]\npub enum Event {\n  Mint { to: SeraiAddress, balance: Balance },\n  Burn { from: SeraiAddress, balance: Balance },\n  Transfer { from: SeraiAddress, to: SeraiAddress, balance: Balance },\n}\n"
  },
  {
    "path": "substrate/abi/src/signals.rs",
    "content": "use serai_primitives::{NetworkId, SeraiAddress};\n\nuse serai_validator_sets_primitives::ValidatorSet;\n\npub use serai_signals_primitives as primitives;\nuse primitives::SignalId;\n\n#[derive(\n  Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking,\n)]\n#[cfg_attr(feature = \"borsh\", derive(borsh::BorshSerialize, borsh::BorshDeserialize))]\n#[cfg_attr(feature = \"serde\", derive(serde::Serialize))]\n#[cfg_attr(all(feature = \"std\", feature = \"serde\"), derive(serde::Deserialize))]\npub enum Call {\n  register_retirement_signal { in_favor_of: [u8; 32] },\n  revoke_retirement_signal { retirement_signal_id: [u8; 32] },\n  favor { signal_id: SignalId, for_network: NetworkId },\n  revoke_favor { signal_id: SignalId, for_network: NetworkId },\n  stand_against { signal_id: SignalId, for_network: NetworkId },\n}\n\n#[derive(\n  Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking,\n)]\n#[cfg_attr(feature = \"borsh\", derive(borsh::BorshSerialize, borsh::BorshDeserialize))]\n#[cfg_attr(feature = \"serde\", derive(serde::Serialize))]\n#[cfg_attr(all(feature = \"std\", feature = \"serde\"), derive(serde::Deserialize))]\npub enum Event {\n  RetirementSignalRegistered {\n    signal_id: [u8; 32],\n    in_favor_of: [u8; 32],\n    registrant: SeraiAddress,\n  },\n  RetirementSignalRevoked {\n    signal_id: [u8; 32],\n  },\n  SignalFavored {\n    signal_id: SignalId,\n    by: SeraiAddress,\n    for_network: NetworkId,\n  },\n  SetInFavor {\n    signal_id: SignalId,\n    set: ValidatorSet,\n  },\n  RetirementSignalLockedIn {\n    signal_id: [u8; 32],\n  },\n  SetNoLongerInFavor {\n    signal_id: SignalId,\n    set: ValidatorSet,\n  },\n  FavorRevoked {\n    signal_id: SignalId,\n    by: SeraiAddress,\n    for_network: NetworkId,\n  },\n  AgainstSignal {\n    signal_id: SignalId,\n    who: SeraiAddress,\n    for_network: NetworkId,\n  },\n}\n"
  },
  {
    "path": "substrate/abi/src/system.rs",
    "content": "use frame_system::DispatchEventInfo;\nuse frame_support::sp_runtime::DispatchError;\n\nuse serai_primitives::SeraiAddress;\n\n#[derive(\n  Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking,\n)]\npub enum Event {\n  ExtrinsicSuccess { dispatch_info: DispatchEventInfo },\n  ExtrinsicFailed { dispatch_error: DispatchError, dispatch_info: DispatchEventInfo },\n  CodeUpdated,\n  NewAccount { account: SeraiAddress },\n  KilledAccount { account: SeraiAddress },\n}\n"
  },
  {
    "path": "substrate/abi/src/timestamp.rs",
    "content": "#[derive(\n  Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking,\n)]\n#[cfg_attr(feature = \"serde\", derive(serde::Serialize))]\n#[cfg_attr(all(feature = \"std\", feature = \"serde\"), derive(serde::Deserialize))]\npub enum Call {\n  set {\n    #[codec(compact)]\n    now: u64,\n  },\n}\n"
  },
  {
    "path": "substrate/abi/src/tx.rs",
    "content": "#![allow(deprecated)]\n\nuse scale::Encode;\n\nuse sp_core::sr25519::{Public, Signature};\nuse sp_runtime::traits::Verify;\n\nuse serai_primitives::SeraiAddress;\n\nuse frame_support::dispatch::GetDispatchInfo;\n\npub trait TransactionMember:\n  Clone + PartialEq + Eq + core::fmt::Debug + scale::Encode + scale::Decode\n{\n}\nimpl<T: Clone + PartialEq + Eq + core::fmt::Debug + scale::Encode + scale::Decode> TransactionMember\n  for T\n{\n}\n\ntype TransactionEncodeAs<'a, Extra> =\n  (&'a crate::Call, &'a Option<(SeraiAddress, Signature, Extra)>);\ntype TransactionDecodeAs<Extra> = (crate::Call, Option<(SeraiAddress, Signature, Extra)>);\n\n// We use our own Transaction struct, over UncheckedExtrinsic, for more control, a bit more\n// simplicity, and in order to be immune to https://github.com/paritytech/polkadot-sdk/issues/2947\n#[allow(clippy::multiple_bound_locations)]\n#[derive(Clone, PartialEq, Eq, Debug, scale::DecodeWithMemTracking)]\npub struct Transaction<\n  Call: 'static + TransactionMember + From<crate::Call>,\n  Extra: 'static + TransactionMember,\n> {\n  call: crate::Call,\n  mapped_call: Call,\n  signature: Option<(SeraiAddress, Signature, Extra)>,\n}\n\nimpl<Call: 'static + TransactionMember + From<crate::Call>, Extra: 'static + TransactionMember>\n  Transaction<Call, Extra>\n{\n  pub fn new(call: crate::Call, signature: Option<(SeraiAddress, Signature, Extra)>) -> Self {\n    Self { call: call.clone(), mapped_call: call.into(), signature }\n  }\n\n  pub fn call(&self) -> &crate::Call {\n    &self.call\n  }\n\n  pub fn signer(&self) -> Option<SeraiAddress> {\n    self.signature.as_ref().map(|(address, _sig, _extra)| *address)\n  }\n}\n\nimpl<Call: 'static + TransactionMember + From<crate::Call>, Extra: 'static + TransactionMember>\n  scale::Encode for Transaction<Call, Extra>\n{\n  fn using_encoded<R, F: FnOnce(&[u8]) -> R>(&self, f: F) -> R {\n    let tx: TransactionEncodeAs<Extra> = (&self.call, &self.signature);\n    tx.using_encoded(f)\n  }\n}\nimpl<Call: 'static + TransactionMember + From<crate::Call>, Extra: 'static + TransactionMember>\n  scale::Decode for Transaction<Call, Extra>\n{\n  fn decode<I: scale::Input>(input: &mut I) -> Result<Self, scale::Error> {\n    let (call, signature) = TransactionDecodeAs::decode(input)?;\n    let mapped_call = Call::from(call.clone());\n    Ok(Self { call, mapped_call, signature })\n  }\n}\n\n#[cfg(feature = \"serde\")]\nmod _serde {\n  use scale::Encode;\n  use serde::ser::*;\n  use super::*;\n  impl<Call: 'static + TransactionMember + From<crate::Call>, Extra: 'static + TransactionMember>\n    Serialize for Transaction<Call, Extra>\n  {\n    fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {\n      let encoded = self.encode();\n      serializer.serialize_bytes(&encoded)\n    }\n  }\n\n  #[cfg(feature = \"std\")]\n  use serde::de::*;\n  #[cfg(feature = \"std\")]\n  impl<\n      'a,\n      Call: 'static + TransactionMember + From<crate::Call>,\n      Extra: 'static + TransactionMember,\n    > Deserialize<'a> for Transaction<Call, Extra>\n  {\n    fn deserialize<D: Deserializer<'a>>(de: D) -> Result<Self, D::Error> {\n      let bytes = sp_core::bytes::deserialize(de)?;\n      <Self as scale::Decode>::decode(&mut &bytes[..])\n        .map_err(|e| serde::de::Error::custom(format!(\"invalid transaction: {e}\")))\n    }\n  }\n}\n\nimpl<\n    Call: 'static + TransactionMember + From<crate::Call> + TryInto<crate::Call>,\n    Extra: 'static + TransactionMember,\n  > sp_runtime::traits::Extrinsic for Transaction<Call, Extra>\n{\n  type Call = Call;\n  type SignaturePayload = (SeraiAddress, Signature, Extra);\n  fn is_signed(&self) -> Option<bool> {\n    Some(self.signature.is_some())\n  }\n  fn new(call: Call, signature: Option<Self::SignaturePayload>) -> Option<Self> {\n    Some(Self { call: call.clone().try_into().ok()?, mapped_call: call, signature })\n  }\n}\n\nimpl<\n    Call: 'static + TransactionMember + From<crate::Call> + TryInto<crate::Call>,\n    Extra: 'static + TransactionMember,\n  > frame_support::sp_runtime::traits::ExtrinsicCall for Transaction<Call, Extra>\n{\n  type Call = Call;\n  fn call(&self) -> &Call {\n    &self.mapped_call\n  }\n}\n\nimpl<\n    Call: 'static + TransactionMember + From<crate::Call> + GetDispatchInfo,\n    Extra: 'static + TransactionMember,\n  > GetDispatchInfo for Transaction<Call, Extra>\n{\n  fn get_dispatch_info(&self) -> frame_support::dispatch::DispatchInfo {\n    self.mapped_call.get_dispatch_info()\n  }\n}\n\nuse sp_runtime::generic::ExtrinsicFormat;\nimpl<\n    Call: 'static + TransactionMember + From<crate::Call> + sp_runtime::traits::Dispatchable,\n    Extra: 'static + TransactionMember + sp_runtime::traits::TransactionExtension<Call>,\n  > sp_runtime::traits::BlindCheckable for Transaction<Call, Extra>\n{\n  type Checked = sp_runtime::generic::CheckedExtrinsic<Public, Call, Extra>;\n\n  fn check(\n    self,\n  ) -> Result<Self::Checked, sp_runtime::transaction_validity::TransactionValidityError> {\n    Ok(match self.signature {\n      Some((signer, signature, extra)) => {\n        if !signature\n          .verify((&self.call, &extra, extra.implicit()?).encode().as_slice(), &signer.into())\n        {\n          Err(sp_runtime::transaction_validity::InvalidTransaction::BadProof)?\n        }\n\n        sp_runtime::generic::CheckedExtrinsic {\n          format: ExtrinsicFormat::Signed(signer.into(), extra),\n          function: self.mapped_call,\n        }\n      }\n      None => sp_runtime::generic::CheckedExtrinsic {\n        format: ExtrinsicFormat::Bare,\n        function: self.mapped_call,\n      },\n    })\n  }\n}\n\nimpl<\n    Call: 'static + TransactionMember + From<crate::Call> + TryInto<crate::Call>,\n    Extra: 'static + TransactionMember,\n  > frame_support::traits::InherentBuilder for Transaction<Call, Extra>\n{\n  /// Panics if the inherent isn't supported.\n  // TODO: Don't panic here\n  fn new_inherent(call: Self::Call) -> Self {\n    sp_runtime::traits::Extrinsic::new(call, None).expect(\"trying to build an unsupported inherent\")\n  }\n}\n"
  },
  {
    "path": "substrate/abi/src/validator_sets.rs",
    "content": "use sp_core::{ConstU32, bounded::BoundedVec};\n\npub use serai_validator_sets_primitives as primitives;\n\nuse serai_primitives::*;\nuse serai_validator_sets_primitives::*;\n\n#[derive(\n  Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking,\n)]\n#[cfg_attr(feature = \"serde\", derive(serde::Serialize))]\n#[cfg_attr(all(feature = \"std\", feature = \"serde\"), derive(serde::Deserialize))]\npub enum Call {\n  set_keys {\n    network: ExternalNetworkId,\n    removed_participants: BoundedVec<SeraiAddress, ConstU32<{ MAX_KEY_SHARES_PER_SET / 3 }>>,\n    key_pair: KeyPair,\n    signature: Signature,\n  },\n  report_slashes {\n    network: ExternalNetworkId,\n    slashes: BoundedVec<(SeraiAddress, u32), ConstU32<{ MAX_KEY_SHARES_PER_SET / 3 }>>,\n    signature: Signature,\n  },\n  allocate {\n    network: NetworkId,\n    amount: Amount,\n  },\n  deallocate {\n    network: NetworkId,\n    amount: Amount,\n  },\n  claim_deallocation {\n    network: NetworkId,\n    session: Session,\n  },\n}\n\n#[derive(\n  Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking,\n)]\n#[cfg_attr(feature = \"borsh\", derive(borsh::BorshSerialize, borsh::BorshDeserialize))]\n#[cfg_attr(feature = \"serde\", derive(serde::Serialize))]\n#[cfg_attr(all(feature = \"std\", feature = \"serde\"), derive(serde::Deserialize))]\npub enum Event {\n  NewSet {\n    set: ValidatorSet,\n  },\n  ParticipantRemoved {\n    set: ValidatorSet,\n    removed: SeraiAddress,\n  },\n  KeyGen {\n    set: ExternalValidatorSet,\n    key_pair: KeyPair,\n  },\n  AcceptedHandover {\n    set: ValidatorSet,\n  },\n  SetRetired {\n    set: ValidatorSet,\n  },\n  AllocationIncreased {\n    validator: SeraiAddress,\n    network: NetworkId,\n    amount: Amount,\n  },\n  AllocationDecreased {\n    validator: SeraiAddress,\n    network: NetworkId,\n    amount: Amount,\n    delayed_until: Option<Session>,\n  },\n  DeallocationClaimed {\n    validator: SeraiAddress,\n    network: NetworkId,\n    session: Session,\n  },\n}\n"
  },
  {
    "path": "substrate/client/Cargo.toml",
    "content": "[package]\nname = \"serai-client\"\nversion = \"0.1.0\"\ndescription = \"Client library for the Serai network\"\nlicense = \"MIT\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/substrate/client\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nkeywords = [\"serai\"]\nedition = \"2021\"\nrust-version = \"1.82\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nzeroize = \"^1.5\"\nthiserror = { version = \"1\", optional = true }\n\nhex = \"0.4\"\nscale = { package = \"parity-scale-codec\", version = \"3\" }\nserde = { version = \"1\", features = [\"derive\"], optional = true }\nserde_json = { version = \"1\", optional = true }\n\nserai-abi = { path = \"../abi\", version = \"0.1\" }\n\nmultiaddr = { version = \"0.18\", optional = true }\nsp-core = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", optional = true }\nsp-runtime = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", optional = true }\nframe-system = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", optional = true }\n\nasync-lock = \"3\"\n\nsimple-request = { path = \"../../common/request\", version = \"0.1\", optional = true }\n\nbitcoin = { version = \"0.32\", optional = true }\n\ndalek-ff-group = { path = \"../../crypto/dalek-ff-group\", optional = true }\nciphersuite = { path = \"../../crypto/ciphersuite\", version = \"0.4\", optional = true }\nmonero-wallet = { git = \"https://github.com/monero-oxide/monero-oxide\", rev = \"32e6b5fe5ba9e1ea3e68da882550005122a11d22\", version = \"0.1.0\", default-features = false, features = [\"std\"], optional = true }\n\n[dev-dependencies]\nrand_core = \"0.6\"\nhex = \"0.4\"\n\nblake2 = \"0.10\"\n\ndalek-ff-group = { path = \"../../crypto/dalek-ff-group\" }\nciphersuite = { path = \"../../crypto/ciphersuite\" }\ndkg-musig = { path = \"../../crypto/dkg/musig\" }\nfrost = { package = \"modular-frost\", path = \"../../crypto/frost\", features = [\"tests\"] }\nschnorrkel = { path = \"../../crypto/schnorrkel\", package = \"frost-schnorrkel\" }\n\ntokio = \"1\"\n\ndockertest = \"0.5\"\nserai-docker-tests = { path = \"../../tests/docker\" }\n\n[features]\nserai = [\"thiserror\", \"serde\", \"serde_json\", \"serai-abi/serde\", \"multiaddr\", \"sp-core\", \"sp-runtime\", \"frame-system\", \"simple-request\"]\nborsh = [\"serai-abi/borsh\"]\n\nnetworks = []\nbitcoin = [\"networks\", \"dep:bitcoin\"]\nmonero = [\"networks\", \"dalek-ff-group\", \"ciphersuite\", \"monero-wallet\"]\n\n# Assumes the default usage is to use Serai as a DEX, which doesn't actually\n# require connecting to a Serai node\ndefault = [\"bitcoin\", \"monero\"]\n"
  },
  {
    "path": "substrate/client/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2022-2023 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "substrate/client/src/lib.rs",
    "content": "#![expect(clippy::cast_possible_truncation)]\n\n#[cfg(feature = \"networks\")]\npub mod networks;\n\n#[cfg(feature = \"serai\")]\nmod serai;\n#[cfg(feature = \"serai\")]\npub use serai::*;\n\n#[cfg(not(feature = \"serai\"))]\npub use serai_abi::primitives;\n#[cfg(not(feature = \"serai\"))]\nmod other_primitives {\n  pub mod coins {\n    pub use serai_abi::coins::primitives;\n  }\n  pub mod validator_sets {\n    pub use serai_abi::validator_sets::primitives;\n  }\n  pub mod in_instructions {\n    pub use serai_abi::in_instructions::primitives;\n  }\n}\n#[cfg(not(feature = \"serai\"))]\npub use other_primitives::*;\n\n#[cfg(test)]\nmod tests;\n"
  },
  {
    "path": "substrate/client/src/networks/bitcoin.rs",
    "content": "use core::{str::FromStr, fmt};\n\nuse scale::{Encode, Decode};\n\nuse bitcoin::{\n  hashes::{Hash as HashTrait, hash160::Hash},\n  PubkeyHash, ScriptHash,\n  network::Network,\n  WitnessVersion, WitnessProgram, ScriptBuf,\n  address::{AddressType, NetworkChecked, Address as BAddress},\n};\n\n#[derive(Clone, Eq, Debug)]\npub struct Address(ScriptBuf);\n\nimpl PartialEq for Address {\n  fn eq(&self, other: &Self) -> bool {\n    // Since Serai defines the Bitcoin-address specification as a variant of the script alone,\n    // define equivalency as the script alone\n    self.0 == other.0\n  }\n}\n\nimpl From<Address> for ScriptBuf {\n  fn from(addr: Address) -> ScriptBuf {\n    addr.0\n  }\n}\n\nimpl FromStr for Address {\n  type Err = ();\n  fn from_str(str: &str) -> Result<Address, ()> {\n    Address::new(\n      BAddress::from_str(str)\n        .map_err(|_| ())?\n        .require_network(Network::Bitcoin)\n        .map_err(|_| ())?\n        .script_pubkey(),\n    )\n    .ok_or(())\n  }\n}\n\nimpl fmt::Display for Address {\n  fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n    BAddress::<NetworkChecked>::from_script(&self.0, Network::Bitcoin)\n      .map_err(|_| fmt::Error)?\n      .fmt(f)\n  }\n}\n\n// SCALE-encoded variant of Monero addresses.\n#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)]\nenum EncodedAddress {\n  P2PKH([u8; 20]),\n  P2SH([u8; 20]),\n  P2WPKH([u8; 20]),\n  P2WSH([u8; 32]),\n  P2TR([u8; 32]),\n}\n\nimpl TryFrom<Vec<u8>> for Address {\n  type Error = ();\n  fn try_from(data: Vec<u8>) -> Result<Address, ()> {\n    Ok(Address(match EncodedAddress::decode(&mut data.as_ref()).map_err(|_| ())? {\n      EncodedAddress::P2PKH(hash) => {\n        ScriptBuf::new_p2pkh(&PubkeyHash::from_raw_hash(Hash::from_byte_array(hash)))\n      }\n      EncodedAddress::P2SH(hash) => {\n        ScriptBuf::new_p2sh(&ScriptHash::from_raw_hash(Hash::from_byte_array(hash)))\n      }\n      EncodedAddress::P2WPKH(hash) => {\n        ScriptBuf::new_witness_program(&WitnessProgram::new(WitnessVersion::V0, &hash).unwrap())\n      }\n      EncodedAddress::P2WSH(hash) => {\n        ScriptBuf::new_witness_program(&WitnessProgram::new(WitnessVersion::V0, &hash).unwrap())\n      }\n      EncodedAddress::P2TR(key) => {\n        ScriptBuf::new_witness_program(&WitnessProgram::new(WitnessVersion::V1, &key).unwrap())\n      }\n    }))\n  }\n}\n\nfn try_to_vec(addr: &Address) -> Result<Vec<u8>, ()> {\n  let parsed_addr =\n    BAddress::<NetworkChecked>::from_script(&addr.0, Network::Bitcoin).map_err(|_| ())?;\n  Ok(\n    (match parsed_addr.address_type() {\n      Some(AddressType::P2pkh) => {\n        EncodedAddress::P2PKH(*parsed_addr.pubkey_hash().unwrap().as_raw_hash().as_byte_array())\n      }\n      Some(AddressType::P2sh) => {\n        EncodedAddress::P2SH(*parsed_addr.script_hash().unwrap().as_raw_hash().as_byte_array())\n      }\n      Some(AddressType::P2wpkh) => {\n        let program = parsed_addr.witness_program().ok_or(())?;\n        let program = program.program().as_bytes();\n        EncodedAddress::P2WPKH(program.try_into().map_err(|_| ())?)\n      }\n      Some(AddressType::P2wsh) => {\n        let program = parsed_addr.witness_program().ok_or(())?;\n        let program = program.program().as_bytes();\n        EncodedAddress::P2WSH(program.try_into().map_err(|_| ())?)\n      }\n      Some(AddressType::P2tr) => {\n        let program = parsed_addr.witness_program().ok_or(())?;\n        let program = program.program().as_bytes();\n        EncodedAddress::P2TR(program.try_into().map_err(|_| ())?)\n      }\n      _ => Err(())?,\n    })\n    .encode(),\n  )\n}\n\nimpl From<Address> for Vec<u8> {\n  fn from(addr: Address) -> Vec<u8> {\n    // Safe since only encodable addresses can be created\n    try_to_vec(&addr).unwrap()\n  }\n}\n\nimpl Address {\n  pub fn new(address: ScriptBuf) -> Option<Self> {\n    let res = Self(address);\n    if try_to_vec(&res).is_ok() {\n      return Some(res);\n    }\n    None\n  }\n}\n"
  },
  {
    "path": "substrate/client/src/networks/mod.rs",
    "content": "#[cfg(feature = \"bitcoin\")]\npub mod bitcoin;\n\n#[cfg(feature = \"monero\")]\npub mod monero;\n"
  },
  {
    "path": "substrate/client/src/networks/monero.rs",
    "content": "use core::{str::FromStr, fmt};\n\nuse scale::{Encode, Decode};\n\nuse dalek_ff_group::Ed25519;\nuse ciphersuite::Ciphersuite;\n\nuse monero_wallet::address::{AddressError, Network, AddressType, MoneroAddress};\n\n#[derive(Clone, PartialEq, Eq, Debug)]\npub struct Address(MoneroAddress);\nimpl Address {\n  pub fn new(address: MoneroAddress) -> Option<Address> {\n    if address.payment_id().is_some() {\n      return None;\n    }\n    Some(Address(address))\n  }\n}\n\nimpl FromStr for Address {\n  type Err = AddressError;\n  fn from_str(str: &str) -> Result<Address, AddressError> {\n    MoneroAddress::from_str(Network::Mainnet, str).map(Address)\n  }\n}\n\nimpl fmt::Display for Address {\n  fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n    self.0.fmt(f)\n  }\n}\n\n// SCALE-encoded variant of Monero addresses.\n#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)]\nenum EncodedAddressType {\n  Legacy,\n  Subaddress,\n  Featured(u8),\n}\n\n#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)]\nstruct EncodedAddress {\n  kind: EncodedAddressType,\n  spend: [u8; 32],\n  view: [u8; 32],\n}\n\nimpl TryFrom<Vec<u8>> for Address {\n  type Error = ();\n  fn try_from(data: Vec<u8>) -> Result<Address, ()> {\n    // Decode as SCALE\n    let addr = EncodedAddress::decode(&mut data.as_ref()).map_err(|_| ())?;\n    // Convert over\n    Ok(Address(MoneroAddress::new(\n      Network::Mainnet,\n      match addr.kind {\n        EncodedAddressType::Legacy => AddressType::Legacy,\n        EncodedAddressType::Subaddress => AddressType::Subaddress,\n        EncodedAddressType::Featured(flags) => {\n          let subaddress = (flags & 1) != 0;\n          let integrated = (flags & (1 << 1)) != 0;\n          let guaranteed = (flags & (1 << 2)) != 0;\n          if integrated {\n            Err(())?;\n          }\n          AddressType::Featured { subaddress, payment_id: None, guaranteed }\n        }\n      },\n      Ed25519::read_G::<&[u8]>(&mut addr.spend.as_ref()).map_err(|_| ())?.0,\n      Ed25519::read_G::<&[u8]>(&mut addr.view.as_ref()).map_err(|_| ())?.0,\n    )))\n  }\n}\n\n#[allow(clippy::from_over_into)]\nimpl Into<MoneroAddress> for Address {\n  fn into(self) -> MoneroAddress {\n    self.0\n  }\n}\n\n#[allow(clippy::from_over_into)]\nimpl Into<Vec<u8>> for Address {\n  fn into(self) -> Vec<u8> {\n    EncodedAddress {\n      kind: match self.0.kind() {\n        AddressType::Legacy => EncodedAddressType::Legacy,\n        AddressType::LegacyIntegrated(_) => {\n          panic!(\"integrated address became Serai Monero address\")\n        }\n        AddressType::Subaddress => EncodedAddressType::Subaddress,\n        AddressType::Featured { subaddress, payment_id, guaranteed } => {\n          debug_assert!(payment_id.is_none());\n          EncodedAddressType::Featured(u8::from(*subaddress) + (u8::from(*guaranteed) << 2))\n        }\n      },\n      spend: self.0.spend().compress().0,\n      view: self.0.view().compress().0,\n    }\n    .encode()\n  }\n}\n"
  },
  {
    "path": "substrate/client/src/serai/coins.rs",
    "content": "use scale::Encode;\n\nuse serai_abi::primitives::{SeraiAddress, Amount, Coin, Balance};\npub use serai_abi::coins::primitives;\nuse primitives::OutInstructionWithBalance;\n\nuse crate::{TemporalSerai, SeraiError};\n\nconst PALLET: &str = \"Coins\";\n\npub type CoinsEvent = serai_abi::coins::Event;\n\n#[derive(Clone, Copy)]\npub struct SeraiCoins<'a>(pub(crate) &'a TemporalSerai<'a>);\nimpl<'a> SeraiCoins<'a> {\n  pub async fn mint_events(&self) -> Result<Vec<CoinsEvent>, SeraiError> {\n    self\n      .0\n      .events(|event| {\n        if let serai_abi::Event::Coins(event) = event {\n          if matches!(event, CoinsEvent::Mint { .. }) {\n            Some(event.clone())\n          } else {\n            None\n          }\n        } else {\n          None\n        }\n      })\n      .await\n  }\n\n  pub async fn burn_with_instruction_events(&self) -> Result<Vec<CoinsEvent>, SeraiError> {\n    self\n      .0\n      .events(|event| {\n        if let serai_abi::Event::Coins(event) = event {\n          if matches!(event, CoinsEvent::BurnWithInstruction { .. }) {\n            Some(event.clone())\n          } else {\n            None\n          }\n        } else {\n          None\n        }\n      })\n      .await\n  }\n\n  pub async fn coin_supply(&self, coin: Coin) -> Result<Amount, SeraiError> {\n    Ok(self.0.storage(PALLET, \"Supply\", coin).await?.unwrap_or(Amount(0)))\n  }\n\n  pub async fn coin_balance(\n    &self,\n    coin: Coin,\n    address: SeraiAddress,\n  ) -> Result<Amount, SeraiError> {\n    Ok(\n      self\n        .0\n        .storage(\n          PALLET,\n          \"Balances\",\n          (sp_core::hashing::blake2_128(&address.encode()), &address.0, coin),\n        )\n        .await?\n        .unwrap_or(Amount(0)),\n    )\n  }\n\n  pub fn transfer(to: SeraiAddress, balance: Balance) -> serai_abi::Call {\n    serai_abi::Call::Coins(serai_abi::coins::Call::transfer { to, balance })\n  }\n\n  pub fn burn(balance: Balance) -> serai_abi::Call {\n    serai_abi::Call::Coins(serai_abi::coins::Call::burn { balance })\n  }\n\n  pub fn burn_with_instruction(instruction: OutInstructionWithBalance) -> serai_abi::Call {\n    serai_abi::Call::Coins(serai_abi::coins::Call::burn_with_instruction { instruction })\n  }\n}\n"
  },
  {
    "path": "substrate/client/src/serai/dex.rs",
    "content": "use sp_core::bounded_vec::BoundedVec;\nuse serai_abi::primitives::{Amount, Coin, ExternalCoin, SeraiAddress};\n\nuse crate::{SeraiError, TemporalSerai};\n\npub type DexEvent = serai_abi::dex::Event;\n\nconst PALLET: &str = \"Dex\";\n\n#[derive(Clone, Copy)]\npub struct SeraiDex<'a>(pub(crate) &'a TemporalSerai<'a>);\nimpl<'a> SeraiDex<'a> {\n  pub async fn events(&self) -> Result<Vec<DexEvent>, SeraiError> {\n    self\n      .0\n      .events(\n        |event| if let serai_abi::Event::Dex(event) = event { Some(event.clone()) } else { None },\n      )\n      .await\n  }\n\n  pub fn add_liquidity(\n    coin: ExternalCoin,\n    coin_amount: Amount,\n    sri_amount: Amount,\n    min_coin_amount: Amount,\n    min_sri_amount: Amount,\n    address: SeraiAddress,\n  ) -> serai_abi::Call {\n    serai_abi::Call::Dex(serai_abi::dex::Call::add_liquidity {\n      coin,\n      coin_desired: coin_amount.0,\n      sri_desired: sri_amount.0,\n      coin_min: min_coin_amount.0,\n      sri_min: min_sri_amount.0,\n      mint_to: address,\n    })\n  }\n\n  pub fn swap(\n    from_coin: Coin,\n    to_coin: Coin,\n    amount_in: Amount,\n    amount_out_min: Amount,\n    address: SeraiAddress,\n  ) -> serai_abi::Call {\n    let path = if to_coin.is_native() {\n      BoundedVec::try_from(vec![from_coin, Coin::Serai]).unwrap()\n    } else if from_coin.is_native() {\n      BoundedVec::try_from(vec![Coin::Serai, to_coin]).unwrap()\n    } else {\n      BoundedVec::try_from(vec![from_coin, Coin::Serai, to_coin]).unwrap()\n    };\n\n    serai_abi::Call::Dex(serai_abi::dex::Call::swap_exact_tokens_for_tokens {\n      path,\n      amount_in: amount_in.0,\n      amount_out_min: amount_out_min.0,\n      send_to: address,\n    })\n  }\n\n  /// Returns the reserves of `coin:SRI` pool.\n  pub async fn get_reserves(\n    &self,\n    coin: ExternalCoin,\n  ) -> Result<Option<(Amount, Amount)>, SeraiError> {\n    self.0.runtime_api(\"DexApi_get_reserves\", (Coin::from(coin), Coin::Serai)).await\n  }\n\n  pub async fn oracle_value(&self, coin: ExternalCoin) -> Result<Option<Amount>, SeraiError> {\n    self.0.storage(PALLET, \"SecurityOracleValue\", coin).await\n  }\n}\n"
  },
  {
    "path": "substrate/client/src/serai/genesis_liquidity.rs",
    "content": "pub use serai_abi::genesis_liquidity::primitives;\nuse primitives::{Values, LiquidityAmount};\n\nuse serai_abi::primitives::*;\n\nuse sp_core::sr25519::Signature;\n\nuse scale::Encode;\n\nuse crate::{Serai, SeraiError, TemporalSerai, Transaction};\n\npub type GenesisLiquidityEvent = serai_abi::genesis_liquidity::Event;\n\nconst PALLET: &str = \"GenesisLiquidity\";\n\n#[derive(Clone, Copy)]\npub struct SeraiGenesisLiquidity<'a>(pub(crate) &'a TemporalSerai<'a>);\nimpl<'a> SeraiGenesisLiquidity<'a> {\n  pub async fn events(&self) -> Result<Vec<GenesisLiquidityEvent>, SeraiError> {\n    self\n      .0\n      .events(|event| {\n        if let serai_abi::Event::GenesisLiquidity(event) = event {\n          Some(event.clone())\n        } else {\n          None\n        }\n      })\n      .await\n  }\n\n  pub fn oraclize_values(values: Values, signature: Signature) -> Transaction {\n    Serai::unsigned(serai_abi::Call::GenesisLiquidity(\n      serai_abi::genesis_liquidity::Call::oraclize_values { values, signature },\n    ))\n  }\n\n  pub fn remove_coin_liquidity(balance: ExternalBalance) -> serai_abi::Call {\n    serai_abi::Call::GenesisLiquidity(serai_abi::genesis_liquidity::Call::remove_coin_liquidity {\n      balance,\n    })\n  }\n\n  pub async fn liquidity(\n    &self,\n    address: &SeraiAddress,\n    coin: ExternalCoin,\n  ) -> Result<LiquidityAmount, SeraiError> {\n    Ok(\n      self\n        .0\n        .storage(\n          PALLET,\n          \"Liquidity\",\n          (coin, sp_core::hashing::blake2_128(&address.encode()), &address.0),\n        )\n        .await?\n        .unwrap_or(LiquidityAmount::zero()),\n    )\n  }\n\n  pub async fn supply(&self, coin: ExternalCoin) -> Result<LiquidityAmount, SeraiError> {\n    Ok(self.0.storage(PALLET, \"Supply\", coin).await?.unwrap_or(LiquidityAmount::zero()))\n  }\n\n  pub async fn genesis_complete_block(&self) -> Result<Option<u64>, SeraiError> {\n    self.0.storage(PALLET, \"GenesisCompleteBlock\", ()).await\n  }\n}\n"
  },
  {
    "path": "substrate/client/src/serai/in_instructions.rs",
    "content": "pub use serai_abi::in_instructions::primitives;\nuse primitives::SignedBatch;\n\nuse crate::{\n  primitives::{BlockHash, ExternalNetworkId},\n  Transaction, SeraiError, Serai, TemporalSerai,\n};\n\npub type InInstructionsEvent = serai_abi::in_instructions::Event;\n\nconst PALLET: &str = \"InInstructions\";\n\n#[derive(Clone, Copy)]\npub struct SeraiInInstructions<'a>(pub(crate) &'a TemporalSerai<'a>);\nimpl<'a> SeraiInInstructions<'a> {\n  pub async fn latest_block_for_network(\n    &self,\n    network: ExternalNetworkId,\n  ) -> Result<Option<BlockHash>, SeraiError> {\n    self.0.storage(PALLET, \"LatestNetworkBlock\", network).await\n  }\n\n  pub async fn last_batch_for_network(\n    &self,\n    network: ExternalNetworkId,\n  ) -> Result<Option<u32>, SeraiError> {\n    self.0.storage(PALLET, \"LastBatch\", network).await\n  }\n\n  pub async fn batch_events(&self) -> Result<Vec<InInstructionsEvent>, SeraiError> {\n    self\n      .0\n      .events(|event| {\n        if let serai_abi::Event::InInstructions(event) = event {\n          if matches!(event, InInstructionsEvent::Batch { .. }) {\n            Some(event.clone())\n          } else {\n            None\n          }\n        } else {\n          None\n        }\n      })\n      .await\n  }\n\n  pub fn execute_batch(batch: SignedBatch) -> Transaction {\n    Serai::unsigned(serai_abi::Call::InInstructions(\n      serai_abi::in_instructions::Call::execute_batch { batch },\n    ))\n  }\n}\n"
  },
  {
    "path": "substrate/client/src/serai/liquidity_tokens.rs",
    "content": "use scale::Encode;\n\nuse serai_abi::primitives::{Amount, ExternalBalance, ExternalCoin, SeraiAddress};\n\nuse crate::{TemporalSerai, SeraiError};\n\nconst PALLET: &str = \"LiquidityTokens\";\n\n#[derive(Clone, Copy)]\npub struct SeraiLiquidityTokens<'a>(pub(crate) &'a TemporalSerai<'a>);\nimpl<'a> SeraiLiquidityTokens<'a> {\n  pub async fn token_supply(&self, coin: ExternalCoin) -> Result<Amount, SeraiError> {\n    Ok(self.0.storage(PALLET, \"Supply\", coin).await?.unwrap_or(Amount(0)))\n  }\n\n  pub async fn token_balance(\n    &self,\n    coin: ExternalCoin,\n    address: SeraiAddress,\n  ) -> Result<Amount, SeraiError> {\n    Ok(\n      self\n        .0\n        .storage(\n          PALLET,\n          \"Balances\",\n          (sp_core::hashing::blake2_128(&address.encode()), &address.0, coin),\n        )\n        .await?\n        .unwrap_or(Amount(0)),\n    )\n  }\n\n  pub fn transfer(to: SeraiAddress, balance: ExternalBalance) -> serai_abi::Call {\n    serai_abi::Call::LiquidityTokens(serai_abi::liquidity_tokens::Call::transfer {\n      to,\n      balance: balance.into(),\n    })\n  }\n\n  pub fn burn(balance: ExternalBalance) -> serai_abi::Call {\n    serai_abi::Call::LiquidityTokens(serai_abi::liquidity_tokens::Call::burn {\n      balance: balance.into(),\n    })\n  }\n}\n"
  },
  {
    "path": "substrate/client/src/serai/mod.rs",
    "content": "use thiserror::Error;\n\nuse async_lock::RwLock;\nuse simple_request::{hyper, Request, Client};\n\nuse scale::{Decode, Encode};\nuse serde::{Serialize, Deserialize, de::DeserializeOwned};\n\npub use sp_core::{\n  Pair as PairTrait,\n  sr25519::{Public, Pair},\n};\n\npub use serai_abi as abi;\npub use abi::{primitives, Transaction};\nuse abi::*;\n\npub use primitives::{SeraiAddress, Signature, Amount};\nuse primitives::{Header, NetworkId};\n\npub mod coins;\npub use coins::SeraiCoins;\npub mod dex;\npub use dex::SeraiDex;\npub mod in_instructions;\npub use in_instructions::SeraiInInstructions;\npub mod validator_sets;\npub use validator_sets::SeraiValidatorSets;\npub mod genesis_liquidity;\npub use genesis_liquidity::SeraiGenesisLiquidity;\npub mod liquidity_tokens;\npub use liquidity_tokens::SeraiLiquidityTokens;\n\n#[derive(Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode)]\npub struct Block {\n  pub header: Header,\n  pub transactions: Vec<Transaction>,\n}\nimpl Block {\n  pub fn hash(&self) -> [u8; 32] {\n    self.header.hash().into()\n  }\n  pub fn number(&self) -> u64 {\n    self.header.number\n  }\n\n  /// Returns the time of this block, set by its producer, in milliseconds since the epoch.\n  pub fn time(&self) -> Result<u64, SeraiError> {\n    for transaction in &self.transactions {\n      if let Call::Timestamp(timestamp::Call::set { now }) = transaction.call() {\n        return Ok(*now);\n      }\n    }\n    Err(SeraiError::InvalidNode(\"no time was present in block\".to_string()))\n  }\n}\n\n#[derive(Error, Debug)]\npub enum SeraiError {\n  #[error(\"failed to communicate with serai\")]\n  ConnectionError,\n  #[error(\"node is faulty: {0}\")]\n  InvalidNode(String),\n  #[error(\"error in response: {0}\")]\n  ErrorInResponse(String),\n  #[error(\"serai-client library was intended for a different runtime version: {0}\")]\n  InvalidRuntime(String),\n}\n\n#[derive(Clone)]\npub struct Serai {\n  url: String,\n  client: Client,\n  genesis: [u8; 32],\n}\n\ntype EventsInBlock = Vec<frame_system::EventRecord<Event, [u8; 32]>>;\npub struct TemporalSerai<'a> {\n  serai: &'a Serai,\n  block: [u8; 32],\n  events: RwLock<Option<EventsInBlock>>,\n}\nimpl<'a> Clone for TemporalSerai<'a> {\n  fn clone(&self) -> Self {\n    Self { serai: self.serai, block: self.block, events: RwLock::new(None) }\n  }\n}\n\nimpl Serai {\n  pub async fn call<Req: Serialize, Res: DeserializeOwned>(\n    &self,\n    method: &str,\n    params: Req,\n  ) -> Result<Res, SeraiError> {\n    let request = Request::from(\n      hyper::Request::post(&self.url)\n        .header(\"Content-Type\", \"application/json\")\n        .body(\n          serde_json::to_vec(\n            &serde_json::json!({ \"jsonrpc\": \"2.0\", \"id\": 1, \"method\": method, \"params\": params }),\n          )\n          .unwrap()\n          .into(),\n        )\n        .unwrap(),\n    );\n\n    #[derive(Deserialize)]\n    pub struct Error {\n      message: String,\n    }\n\n    #[derive(Deserialize)]\n    #[serde(untagged)]\n    enum RpcResponse<T> {\n      Ok { result: T },\n      Err { error: Error },\n    }\n\n    let mut res = self\n      .client\n      .request(request)\n      .await\n      .map_err(|_| SeraiError::ConnectionError)?\n      .body()\n      .await\n      .map_err(|_| SeraiError::ConnectionError)?;\n\n    let res: RpcResponse<Res> = serde_json::from_reader(&mut res).map_err(|e| {\n      SeraiError::InvalidRuntime(format!(\n        \"response was a different type than expected: {:?}\",\n        e.classify()\n      ))\n    })?;\n    match res {\n      RpcResponse::Ok { result } => Ok(result),\n      RpcResponse::Err { error } => Err(SeraiError::ErrorInResponse(error.message)),\n    }\n  }\n\n  fn hex_decode(str: String) -> Result<Vec<u8>, SeraiError> {\n    (if let Some(stripped) = str.strip_prefix(\"0x\") {\n      hex::decode(stripped)\n    } else {\n      hex::decode(str)\n    })\n    .map_err(|_| SeraiError::InvalidNode(\"expected hex from node wasn't hex\".to_string()))\n  }\n\n  pub async fn block_hash(&self, number: u64) -> Result<Option<[u8; 32]>, SeraiError> {\n    let hash: Option<String> = self.call(\"chain_getBlockHash\", [number]).await?;\n    let Some(hash) = hash else { return Ok(None) };\n    Self::hex_decode(hash)?\n      .try_into()\n      .map_err(|_| SeraiError::InvalidNode(\"didn't respond to getBlockHash with hash\".to_string()))\n      .map(Some)\n  }\n\n  pub async fn new(url: String) -> Result<Self, SeraiError> {\n    let client = Client::with_connection_pool();\n    let mut res = Serai { url, client, genesis: [0xfe; 32] };\n    res.genesis = res.block_hash(0).await?.ok_or_else(|| {\n      SeraiError::InvalidNode(\"node didn't have the first block's hash\".to_string())\n    })?;\n    Ok(res)\n  }\n\n  fn unsigned(call: Call) -> Transaction {\n    Transaction::new(call, None)\n  }\n\n  pub fn sign(&self, signer: &Pair, call: Call, nonce: u32, tip: u64) -> Transaction {\n    const SPEC_VERSION: u32 = 1;\n    const TX_VERSION: u32 = 1;\n\n    let extra = Extra { era: sp_runtime::generic::Era::Immortal, nonce, tip };\n    let signature_payload = (\n      &call,\n      &extra,\n      SignedPayloadExtra {\n        spec_version: SPEC_VERSION,\n        tx_version: TX_VERSION,\n        genesis: self.genesis,\n        mortality_checkpoint: self.genesis,\n      },\n    )\n      .encode();\n    let signature = signer.sign(&signature_payload);\n\n    Transaction::new(call, Some((signer.public().into(), signature, extra)))\n  }\n\n  pub async fn publish(&self, tx: &Transaction) -> Result<(), SeraiError> {\n    // Drop the returned hash, which is the hash of the raw extrinsic, as extrinsics are allowed\n    // to share hashes and this hash is accordingly useless/unsafe\n    // If we are to return something, it should be block included in and position within block\n    let _: String = self.call(\"author_submitExtrinsic\", [hex::encode(tx.encode())]).await?;\n    Ok(())\n  }\n\n  pub async fn latest_finalized_block_hash(&self) -> Result<[u8; 32], SeraiError> {\n    let hash: String = self.call(\"chain_getFinalizedHead\", ()).await?;\n    Self::hex_decode(hash)?.try_into().map_err(|_| {\n      SeraiError::InvalidNode(\"didn't respond to getFinalizedHead with hash\".to_string())\n    })\n  }\n\n  pub async fn header(&self, hash: [u8; 32]) -> Result<Option<Header>, SeraiError> {\n    self.call(\"chain_getHeader\", [hex::encode(hash)]).await\n  }\n\n  pub async fn block(&self, hash: [u8; 32]) -> Result<Option<Block>, SeraiError> {\n    let block: Option<String> = self.call(\"chain_getBlockBin\", [hex::encode(hash)]).await?;\n    let Some(block) = block else { return Ok(None) };\n    let Ok(bytes) = Self::hex_decode(block) else {\n      Err(SeraiError::InvalidNode(\"didn't return a hex-encoded block\".to_string()))?\n    };\n    let Ok(block) = Block::decode(&mut bytes.as_slice()) else {\n      Err(SeraiError::InvalidNode(\"didn't return a block\".to_string()))?\n    };\n    Ok(Some(block))\n  }\n\n  pub async fn latest_finalized_block(&self) -> Result<Block, SeraiError> {\n    let latest = self.latest_finalized_block_hash().await?;\n    let Some(block) = self.block(latest).await? else {\n      Err(SeraiError::InvalidNode(\"node didn't have a latest block\".to_string()))?\n    };\n    Ok(block)\n  }\n\n  // There is no provided method for this\n  // TODO: Add one to Serai\n  pub async fn is_finalized(&self, header: &Header) -> Result<bool, SeraiError> {\n    // Get the latest finalized block\n    let finalized = self.latest_finalized_block_hash().await?;\n    // If the latest finalized block is this block, return true\n    if finalized == header.hash().as_ref() {\n      return Ok(true);\n    }\n\n    let Some(finalized) = self.header(finalized).await? else {\n      Err(SeraiError::InvalidNode(\"couldn't get finalized header\".to_string()))?\n    };\n\n    // If the finalized block has a lower number, this block can't be finalized\n    if finalized.number < header.number {\n      return Ok(false);\n    }\n\n    // This block, if finalized, comes before the finalized block\n    // If we request the hash of this block's number, Substrate will return the hash on the main\n    // chain\n    // If that hash is this hash, this block is finalized\n    let Some(hash) = self.block_hash(header.number).await? else {\n      // This is an error since there is a finalized block at this index\n      Err(SeraiError::InvalidNode(\n        \"couldn't get block hash for a block number below the finalized block\".to_string(),\n      ))?\n    };\n\n    Ok(header.hash().as_ref() == hash)\n  }\n\n  pub async fn finalized_block_by_number(&self, number: u64) -> Result<Option<Block>, SeraiError> {\n    let hash = self.block_hash(number).await?;\n    let Some(hash) = hash else { return Ok(None) };\n    let Some(block) = self.block(hash).await? else { return Ok(None) };\n    if !self.is_finalized(&block.header).await? {\n      return Ok(None);\n    }\n    Ok(Some(block))\n  }\n\n  /*\n  /// A stream which yields whenever new block(s) have been finalized.\n  pub async fn newly_finalized_block(\n    &self,\n  ) -> Result<impl Stream<Item = Result<(), SeraiError>>, SeraiError> {\n    Ok(self.0.rpc().subscribe_finalized_block_headers().await\n    .map_err(|_| SeraiError::ConnectionError)?.map(\n      |next| {\n        next.map_err(|_| SeraiError::ConnectionError)?;\n        Ok(())\n      },\n    ))\n  }\n\n  pub async fn nonce(&self, address: &SeraiAddress) -> Result<u32, SeraiError> {\n    self\n      .0\n      .rpc()\n      .system_account_next_index(&sp_core::sr25519::Public::from(address.0).to_string())\n      .await\n      .map_err(|_| SeraiError::ConnectionError)\n  }\n  */\n\n  /// Create a TemporalSerai bound to whatever is currently the latest finalized block.\n  ///\n  /// The binding occurs at time of call. This does not track the latest finalized block and update\n  /// itself.\n  pub async fn as_of_latest_finalized_block(&self) -> Result<TemporalSerai, SeraiError> {\n    let latest = self.latest_finalized_block_hash().await?;\n    Ok(TemporalSerai { serai: self, block: latest, events: RwLock::new(None) })\n  }\n\n  /// Returns a TemporalSerai able to retrieve state as of the specified block.\n  pub fn as_of(&self, block: [u8; 32]) -> TemporalSerai {\n    TemporalSerai { serai: self, block, events: RwLock::new(None) }\n  }\n\n  /// Return the P2P Multiaddrs for the validators of the specified network.\n  pub async fn p2p_validators(\n    &self,\n    network: NetworkId,\n  ) -> Result<Vec<multiaddr::Multiaddr>, SeraiError> {\n    self.call(\"p2p_validators\", [network]).await\n  }\n}\n\nimpl<'a> TemporalSerai<'a> {\n  async fn events<E>(\n    &self,\n    filter_map: impl Fn(&Event) -> Option<E>,\n  ) -> Result<Vec<E>, SeraiError> {\n    let mut events = self.events.read().await;\n    if events.is_none() {\n      drop(events);\n      let mut events_write = self.events.write().await;\n      if events_write.is_none() {\n        *events_write = Some(self.storage(\"System\", \"Events\", ()).await?.unwrap_or(vec![]));\n      }\n      drop(events_write);\n      events = self.events.read().await;\n    }\n\n    let mut res = vec![];\n    for event in events.as_ref().unwrap() {\n      if let Some(event) = filter_map(&event.event) {\n        res.push(event);\n      }\n    }\n    Ok(res)\n  }\n\n  async fn storage<K: Encode, R: Decode>(\n    &self,\n    pallet: &'static str,\n    name: &'static str,\n    key: K,\n  ) -> Result<Option<R>, SeraiError> {\n    // TODO: Make this const?\n    let mut full_key = sp_core::hashing::twox_128(pallet.as_bytes()).to_vec();\n    full_key.extend(sp_core::hashing::twox_128(name.as_bytes()));\n    full_key.extend(key.encode());\n\n    let res: Option<String> =\n      self.serai.call(\"state_getStorage\", [hex::encode(full_key), hex::encode(self.block)]).await?;\n    let Some(res) = res else { return Ok(None) };\n    let res = Serai::hex_decode(res)?;\n    Ok(Some(R::decode(&mut res.as_slice()).map_err(|_| {\n      SeraiError::InvalidRuntime(format!(\n        \"different type present at storage location, raw value: {}\",\n        hex::encode(res)\n      ))\n    })?))\n  }\n\n  async fn runtime_api<P: Encode, R: Decode>(\n    &self,\n    method: &'static str,\n    params: P,\n  ) -> Result<R, SeraiError> {\n    let result: String = self\n      .serai\n      .call(\n        \"state_call\",\n        [method.to_string(), hex::encode(params.encode()), hex::encode(self.block)],\n      )\n      .await?;\n\n    let bytes = Serai::hex_decode(result.clone())?;\n    R::decode(&mut bytes.as_slice()).map_err(|_| {\n      SeraiError::InvalidRuntime(format!(\n        \"different type than what is expected to be returned, raw value: {}\",\n        hex::encode(result)\n      ))\n    })\n  }\n\n  pub fn coins(&'a self) -> SeraiCoins<'a> {\n    SeraiCoins(self)\n  }\n\n  pub fn dex(&'a self) -> SeraiDex<'a> {\n    SeraiDex(self)\n  }\n\n  pub fn in_instructions(&'a self) -> SeraiInInstructions<'a> {\n    SeraiInInstructions(self)\n  }\n\n  pub fn validator_sets(&'a self) -> SeraiValidatorSets<'a> {\n    SeraiValidatorSets(self)\n  }\n\n  pub fn genesis_liquidity(&'a self) -> SeraiGenesisLiquidity {\n    SeraiGenesisLiquidity(self)\n  }\n\n  pub fn liquidity_tokens(&'a self) -> SeraiLiquidityTokens {\n    SeraiLiquidityTokens(self)\n  }\n}\n"
  },
  {
    "path": "substrate/client/src/serai/validator_sets.rs",
    "content": "use scale::Encode;\n\nuse sp_core::sr25519::{Public, Signature};\n\nuse serai_abi::{primitives::Amount, validator_sets::primitives::ExternalValidatorSet};\npub use serai_abi::validator_sets::primitives;\nuse primitives::{Session, KeyPair};\n\nuse crate::{\n  primitives::{NetworkId, ExternalNetworkId, SeraiAddress},\n  Transaction, Serai, TemporalSerai, SeraiError,\n};\n\nconst PALLET: &str = \"ValidatorSets\";\n\npub type ValidatorSetsEvent = serai_abi::validator_sets::Event;\n\n#[derive(Clone, Copy)]\npub struct SeraiValidatorSets<'a>(pub(crate) &'a TemporalSerai<'a>);\nimpl<'a> SeraiValidatorSets<'a> {\n  pub async fn new_set_events(&self) -> Result<Vec<ValidatorSetsEvent>, SeraiError> {\n    self\n      .0\n      .events(|event| {\n        if let serai_abi::Event::ValidatorSets(event) = event {\n          if matches!(event, ValidatorSetsEvent::NewSet { .. }) {\n            Some(event.clone())\n          } else {\n            None\n          }\n        } else {\n          None\n        }\n      })\n      .await\n  }\n\n  pub async fn participant_removed_events(&self) -> Result<Vec<ValidatorSetsEvent>, SeraiError> {\n    self\n      .0\n      .events(|event| {\n        if let serai_abi::Event::ValidatorSets(event) = event {\n          if matches!(event, ValidatorSetsEvent::ParticipantRemoved { .. }) {\n            Some(event.clone())\n          } else {\n            None\n          }\n        } else {\n          None\n        }\n      })\n      .await\n  }\n\n  pub async fn key_gen_events(&self) -> Result<Vec<ValidatorSetsEvent>, SeraiError> {\n    self\n      .0\n      .events(|event| {\n        if let serai_abi::Event::ValidatorSets(event) = event {\n          if matches!(event, ValidatorSetsEvent::KeyGen { .. }) {\n            Some(event.clone())\n          } else {\n            None\n          }\n        } else {\n          None\n        }\n      })\n      .await\n  }\n\n  pub async fn accepted_handover_events(&self) -> Result<Vec<ValidatorSetsEvent>, SeraiError> {\n    self\n      .0\n      .events(|event| {\n        if let serai_abi::Event::ValidatorSets(event) = event {\n          if matches!(event, ValidatorSetsEvent::AcceptedHandover { .. }) {\n            Some(event.clone())\n          } else {\n            None\n          }\n        } else {\n          None\n        }\n      })\n      .await\n  }\n\n  pub async fn set_retired_events(&self) -> Result<Vec<ValidatorSetsEvent>, SeraiError> {\n    self\n      .0\n      .events(|event| {\n        if let serai_abi::Event::ValidatorSets(event) = event {\n          if matches!(event, ValidatorSetsEvent::SetRetired { .. }) {\n            Some(event.clone())\n          } else {\n            None\n          }\n        } else {\n          None\n        }\n      })\n      .await\n  }\n\n  pub async fn session(&self, network: NetworkId) -> Result<Option<Session>, SeraiError> {\n    self.0.storage(PALLET, \"CurrentSession\", network).await\n  }\n\n  pub async fn participants(\n    &self,\n    network: NetworkId,\n  ) -> Result<Option<Vec<(Public, u64)>>, SeraiError> {\n    self.0.storage(PALLET, \"Participants\", network).await\n  }\n\n  pub async fn allocation_per_key_share(\n    &self,\n    network: NetworkId,\n  ) -> Result<Option<Amount>, SeraiError> {\n    self.0.storage(PALLET, \"AllocationPerKeyShare\", network).await\n  }\n\n  pub async fn total_allocated_stake(\n    &self,\n    network: NetworkId,\n  ) -> Result<Option<Amount>, SeraiError> {\n    self.0.storage(PALLET, \"TotalAllocatedStake\", network).await\n  }\n\n  pub async fn allocation(\n    &self,\n    network: NetworkId,\n    key: Public,\n  ) -> Result<Option<Amount>, SeraiError> {\n    self\n      .0\n      .storage(\n        PALLET,\n        \"Allocations\",\n        (sp_core::hashing::blake2_128(&(network, key).encode()), (network, key)),\n      )\n      .await\n  }\n\n  pub async fn pending_deallocations(\n    &self,\n    network: NetworkId,\n    account: Public,\n    session: Session,\n  ) -> Result<Option<Amount>, SeraiError> {\n    self\n      .0\n      .storage(\n        PALLET,\n        \"PendingDeallocations\",\n        (sp_core::hashing::blake2_128(&(network, account).encode()), (network, account, session)),\n      )\n      .await\n  }\n\n  pub async fn active_network_validators(\n    &self,\n    network: NetworkId,\n  ) -> Result<Vec<Public>, SeraiError> {\n    self.0.runtime_api(\"SeraiRuntimeApi_validators\", network).await\n  }\n\n  // TODO: Store these separately since we almost never need both at once?\n  pub async fn keys(&self, set: ExternalValidatorSet) -> Result<Option<KeyPair>, SeraiError> {\n    self.0.storage(PALLET, \"Keys\", (sp_core::hashing::twox_64(&set.encode()), set)).await\n  }\n\n  pub async fn key_pending_slash_report(\n    &self,\n    network: ExternalNetworkId,\n  ) -> Result<Option<Public>, SeraiError> {\n    self.0.storage(PALLET, \"PendingSlashReport\", network).await\n  }\n\n  pub async fn session_begin_block(\n    &self,\n    network: NetworkId,\n    session: Session,\n  ) -> Result<Option<u64>, SeraiError> {\n    self.0.storage(PALLET, \"SessionBeginBlock\", (network, session)).await\n  }\n\n  pub fn set_keys(\n    network: ExternalNetworkId,\n    removed_participants: sp_runtime::BoundedVec<\n      SeraiAddress,\n      sp_core::ConstU32<{ primitives::MAX_KEY_SHARES_PER_SET / 3 }>,\n    >,\n    key_pair: KeyPair,\n    signature: Signature,\n  ) -> Transaction {\n    Serai::unsigned(serai_abi::Call::ValidatorSets(serai_abi::validator_sets::Call::set_keys {\n      network,\n      removed_participants,\n      key_pair,\n      signature,\n    }))\n  }\n\n  pub fn allocate(network: NetworkId, amount: Amount) -> serai_abi::Call {\n    serai_abi::Call::ValidatorSets(serai_abi::validator_sets::Call::allocate { network, amount })\n  }\n\n  pub fn deallocate(network: NetworkId, amount: Amount) -> serai_abi::Call {\n    serai_abi::Call::ValidatorSets(serai_abi::validator_sets::Call::deallocate { network, amount })\n  }\n\n  pub fn report_slashes(\n    network: ExternalNetworkId,\n    slashes: sp_runtime::BoundedVec<\n      (SeraiAddress, u32),\n      sp_core::ConstU32<{ primitives::MAX_KEY_SHARES_PER_SET / 3 }>,\n    >,\n    signature: Signature,\n  ) -> Transaction {\n    Serai::unsigned(serai_abi::Call::ValidatorSets(\n      serai_abi::validator_sets::Call::report_slashes { network, slashes, signature },\n    ))\n  }\n}\n"
  },
  {
    "path": "substrate/client/src/tests/mod.rs",
    "content": "#[cfg(feature = \"networks\")]\nmod networks;\n"
  },
  {
    "path": "substrate/client/src/tests/networks/bitcoin.rs",
    "content": "// TODO: Test the address back and forth\n"
  },
  {
    "path": "substrate/client/src/tests/networks/mod.rs",
    "content": "#[cfg(feature = \"bitcoin\")]\nmod bitcoin;\n\n#[cfg(feature = \"monero\")]\nmod monero;\n"
  },
  {
    "path": "substrate/client/src/tests/networks/monero.rs",
    "content": "// TODO: Test the address back and forth\n"
  },
  {
    "path": "substrate/client/tests/batch.rs",
    "content": "use rand_core::{RngCore, OsRng};\n\nuse blake2::{\n  digest::{consts::U32, Digest},\n  Blake2b,\n};\n\nuse scale::Encode;\n\nuse serai_client::{\n  primitives::{Amount, BlockHash, ExternalBalance, ExternalCoin, SeraiAddress},\n  in_instructions::{\n    primitives::{InInstruction, InInstructionWithBalance, Batch},\n    InInstructionsEvent,\n  },\n  coins::CoinsEvent,\n  Serai,\n};\n\nmod common;\nuse common::in_instructions::provide_batch;\n\nserai_test!(\n  publish_batch: (|serai: Serai| async move {\n    let id = 0;\n    let mut block_hash = BlockHash([0; 32]);\n    OsRng.fill_bytes(&mut block_hash.0);\n\n    let mut address = SeraiAddress::new([0; 32]);\n    OsRng.fill_bytes(&mut address.0);\n\n    let coin = ExternalCoin::Bitcoin;\n    let network = coin.network();\n    let amount = Amount(OsRng.next_u64().saturating_add(1));\n    let balance = ExternalBalance { coin, amount };\n\n    let batch = Batch {\n      network,\n      id,\n      block: block_hash,\n      instructions: vec![InInstructionWithBalance {\n        instruction: InInstruction::Transfer(address),\n        balance,\n      }],\n    };\n\n    let block = provide_batch(&serai, batch.clone()).await;\n\n    let serai = serai.as_of(block);\n    {\n      let serai = serai.in_instructions();\n      let latest_finalized = serai.latest_block_for_network(network).await.unwrap();\n      assert_eq!(latest_finalized, Some(block_hash));\n      let batches = serai.batch_events().await.unwrap();\n      assert_eq!(\n        batches,\n        vec![InInstructionsEvent::Batch {\n          network,\n          id,\n          block: block_hash,\n          instructions_hash: Blake2b::<U32>::digest(batch.instructions.encode()).into(),\n        }]\n      );\n    }\n\n    let serai = serai.coins();\n    assert_eq!(\n      serai.mint_events().await.unwrap(),\n      vec![CoinsEvent::Mint { to: address, balance: balance.into() }]\n    );\n    assert_eq!(serai.coin_supply(coin.into()).await.unwrap(), amount);\n    assert_eq!(serai.coin_balance(coin.into(), address).await.unwrap(), amount);\n  })\n);\n"
  },
  {
    "path": "substrate/client/tests/burn.rs",
    "content": "use rand_core::{RngCore, OsRng};\n\nuse blake2::{\n  digest::{consts::U32, Digest},\n  Blake2b,\n};\n\nuse scale::Encode;\n\nuse serai_abi::coins::primitives::OutInstructionWithBalance;\nuse sp_core::Pair;\n\nuse serai_client::{\n  primitives::{\n    Amount, ExternalCoin, ExternalBalance, BlockHash, SeraiAddress, Data, ExternalAddress,\n    insecure_pair_from_name,\n  },\n  in_instructions::{\n    InInstructionsEvent,\n    primitives::{InInstruction, InInstructionWithBalance, Batch},\n  },\n  coins::{primitives::OutInstruction, CoinsEvent},\n  Serai, SeraiCoins,\n};\n\nmod common;\nuse common::{tx::publish_tx, in_instructions::provide_batch};\n\nserai_test!(\n  burn: (|serai: Serai| async move {\n    let id = 0;\n    let mut block_hash = BlockHash([0; 32]);\n    OsRng.fill_bytes(&mut block_hash.0);\n\n    let pair = insecure_pair_from_name(\"Dave\");\n    let public = pair.public();\n    let address = SeraiAddress::from(public);\n\n    let coin = ExternalCoin::Bitcoin;\n    let network = coin.network();\n    let amount = Amount(OsRng.next_u64().saturating_add(1));\n    let balance = ExternalBalance { coin, amount };\n\n    let batch = Batch {\n      network,\n      id,\n      block: block_hash,\n      instructions: vec![InInstructionWithBalance {\n        instruction: InInstruction::Transfer(address),\n        balance,\n      }],\n    };\n\n    let block = provide_batch(&serai, batch.clone()).await;\n\n    let instruction = {\n    let serai = serai.as_of(block);\n    let batches = serai.in_instructions().batch_events().await.unwrap();\n    assert_eq!(\n      batches,\n      vec![InInstructionsEvent::Batch {\n        network,\n        id,\n        block: block_hash,\n        instructions_hash: Blake2b::<U32>::digest(batch.instructions.encode()).into(),\n      }]\n    );\n\n    assert_eq!(\n      serai.coins().mint_events().await.unwrap(),\n      vec![CoinsEvent::Mint { to: address, balance: balance.into() }]\n    );\n    assert_eq!(serai.coins().coin_supply(coin.into()).await.unwrap(), amount);\n    assert_eq!(serai.coins().coin_balance(coin.into(), address).await.unwrap(), amount);\n\n    // Now burn it\n    let mut rand_bytes = vec![0; 32];\n    OsRng.fill_bytes(&mut rand_bytes);\n    let external_address = ExternalAddress::new(rand_bytes).unwrap();\n\n    let mut rand_bytes = vec![0; 32];\n    OsRng.fill_bytes(&mut rand_bytes);\n    let data = Data::new(rand_bytes).unwrap();\n\n    OutInstructionWithBalance {\n      balance,\n      instruction: OutInstruction { address: external_address, data: Some(data) },\n    }\n};\n\n    let block = publish_tx(\n      &serai,\n      &serai.sign(&pair, SeraiCoins::burn_with_instruction(instruction.clone()), 0, 0),\n    )\n    .await;\n\n    let serai = serai.as_of(block);\n    let serai = serai.coins();\n    let events = serai.burn_with_instruction_events().await.unwrap();\n    assert_eq!(events, vec![CoinsEvent::BurnWithInstruction { from: address, instruction }]);\n    assert_eq!(serai.coin_supply(coin.into()).await.unwrap(), Amount(0));\n    assert_eq!(serai.coin_balance(coin.into(), address).await.unwrap(), Amount(0));\n  })\n);\n"
  },
  {
    "path": "substrate/client/tests/common/dex.rs",
    "content": "use serai_abi::primitives::{Amount, Coin, ExternalCoin};\n\nuse serai_client::{Serai, SeraiDex};\nuse sp_core::{sr25519::Pair, Pair as PairTrait};\n\nuse crate::common::tx::publish_tx;\n\n#[allow(dead_code)]\npub async fn add_liquidity(\n  serai: &Serai,\n  coin: ExternalCoin,\n  coin_amount: Amount,\n  sri_amount: Amount,\n  nonce: u32,\n  pair: Pair,\n) -> [u8; 32] {\n  let address = pair.public();\n\n  let tx = serai.sign(\n    &pair,\n    SeraiDex::add_liquidity(coin, coin_amount, sri_amount, Amount(1), Amount(1), address.into()),\n    nonce,\n    0,\n  );\n\n  publish_tx(serai, &tx).await\n}\n\n#[allow(dead_code)]\npub async fn swap(\n  serai: &Serai,\n  from_coin: Coin,\n  to_coin: Coin,\n  amount_in: Amount,\n  amount_out_min: Amount,\n  nonce: u32,\n  pair: Pair,\n) -> [u8; 32] {\n  let address = pair.public();\n\n  let tx = serai.sign(\n    &pair,\n    SeraiDex::swap(from_coin, to_coin, amount_in, amount_out_min, address.into()),\n    nonce,\n    Default::default(),\n  );\n\n  publish_tx(serai, &tx).await\n}\n"
  },
  {
    "path": "substrate/client/tests/common/genesis_liquidity.rs",
    "content": "use std::collections::HashMap;\n\nuse rand_core::{RngCore, OsRng};\nuse zeroize::Zeroizing;\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::Ciphersuite;\nuse dkg_musig::musig;\nuse schnorrkel::Schnorrkel;\n\nuse sp_core::Pair as PairTrait;\n\nuse serai_abi::{\n  genesis_liquidity::primitives::{oraclize_values_message, Values},\n  in_instructions::primitives::{Batch, InInstruction, InInstructionWithBalance},\n  primitives::{\n    insecure_pair_from_name, Amount, ExternalBalance, BlockHash, ExternalCoin, ExternalNetworkId,\n    NetworkId, SeraiAddress, EXTERNAL_COINS,\n  },\n  validator_sets::primitives::{musig_context, Session, ValidatorSet},\n};\n\nuse serai_client::{Serai, SeraiGenesisLiquidity};\n\nuse crate::common::{in_instructions::provide_batch, tx::publish_tx};\n\n#[allow(dead_code)]\npub async fn set_up_genesis(\n  serai: &Serai,\n  values: &HashMap<ExternalCoin, u64>,\n) -> (HashMap<ExternalCoin, Vec<(SeraiAddress, Amount)>>, HashMap<ExternalNetworkId, u32>) {\n  // make accounts with amounts\n  let mut accounts = HashMap::new();\n  for coin in EXTERNAL_COINS {\n    // make 5 accounts per coin\n    let mut values = vec![];\n    for _ in 0 .. 5 {\n      let mut address = SeraiAddress::new([0; 32]);\n      OsRng.fill_bytes(&mut address.0);\n      values.push((address, Amount(OsRng.next_u64() % 10u64.pow(coin.decimals()))));\n    }\n    accounts.insert(coin, values);\n  }\n\n  // send a batch per coin\n  let mut batch_ids: HashMap<ExternalNetworkId, u32> = HashMap::new();\n  for coin in EXTERNAL_COINS {\n    // set up instructions\n    let instructions = accounts[&coin]\n      .iter()\n      .map(|(addr, amount)| InInstructionWithBalance {\n        instruction: InInstruction::GenesisLiquidity(*addr),\n        balance: ExternalBalance { coin, amount: *amount },\n      })\n      .collect::<Vec<_>>();\n\n    // set up bloch hash\n    let mut block = BlockHash([0; 32]);\n    OsRng.fill_bytes(&mut block.0);\n\n    // set up batch id\n    batch_ids\n      .entry(coin.network())\n      .and_modify(|v| {\n        *v += 1;\n      })\n      .or_insert(0);\n\n    let batch =\n      Batch { network: coin.network(), id: batch_ids[&coin.network()], block, instructions };\n    provide_batch(serai, batch).await;\n  }\n\n  // set values relative to each other. We can do that without checking for genesis period blocks\n  // since we are running in test(fast-epoch) mode.\n  // TODO: Random values here\n  let values = Values {\n    monero: values[&ExternalCoin::Monero],\n    ether: values[&ExternalCoin::Ether],\n    dai: values[&ExternalCoin::Dai],\n  };\n  set_values(serai, &values).await;\n\n  (accounts, batch_ids)\n}\n\n#[allow(dead_code)]\nasync fn set_values(serai: &Serai, values: &Values) {\n  // prepare a Musig tx to oraclize the relative values\n  let pair = insecure_pair_from_name(\"Alice\");\n  let public = pair.public();\n  // we publish the tx in set 1\n  let set = ValidatorSet { session: Session(1), network: NetworkId::Serai };\n\n  let public_key = <Ristretto as Ciphersuite>::read_G::<&[u8]>(&mut public.0.as_ref()).unwrap();\n  let secret_key = <Ristretto as Ciphersuite>::read_F::<&[u8]>(\n    &mut pair.as_ref().secret.to_bytes()[.. 32].as_ref(),\n  )\n  .unwrap();\n\n  assert_eq!(Ristretto::generator() * secret_key, public_key);\n  let threshold_keys =\n    musig::<Ristretto>(musig_context(set), Zeroizing::new(secret_key), &[public_key]).unwrap();\n\n  let sig = frost::tests::sign_without_caching(\n    &mut OsRng,\n    frost::tests::algorithm_machines(\n      &mut OsRng,\n      &Schnorrkel::new(b\"substrate\"),\n      &HashMap::from([(threshold_keys.params().i(), threshold_keys.into())]),\n    ),\n    &oraclize_values_message(&set, values),\n  );\n\n  // oraclize values\n  let _ =\n    publish_tx(serai, &SeraiGenesisLiquidity::oraclize_values(*values, sig.to_bytes().into()))\n      .await;\n}\n"
  },
  {
    "path": "substrate/client/tests/common/in_instructions.rs",
    "content": "use rand_core::{RngCore, OsRng};\nuse blake2::{\n  digest::{consts::U32, Digest},\n  Blake2b,\n};\n\nuse scale::Encode;\n\nuse sp_core::Pair;\n\nuse serai_client::{\n  primitives::{insecure_pair_from_name, BlockHash, ExternalBalance, SeraiAddress},\n  validator_sets::primitives::{ExternalValidatorSet, KeyPair},\n  in_instructions::{\n    primitives::{Batch, SignedBatch, batch_message, InInstruction, InInstructionWithBalance},\n    InInstructionsEvent,\n  },\n  SeraiInInstructions, Serai,\n};\n\nuse crate::common::{tx::publish_tx, validator_sets::set_keys};\n\n#[allow(dead_code)]\npub async fn provide_batch(serai: &Serai, batch: Batch) -> [u8; 32] {\n  let serai_latest = serai.as_of_latest_finalized_block().await.unwrap();\n  let session = serai_latest.validator_sets().session(batch.network.into()).await.unwrap().unwrap();\n  let set = ExternalValidatorSet { session, network: batch.network };\n\n  let pair = insecure_pair_from_name(&format!(\"ValidatorSet {set:?}\"));\n  let keys = if let Some(keys) = serai_latest.validator_sets().keys(set).await.unwrap() {\n    keys\n  } else {\n    let keys = KeyPair(pair.public(), vec![].try_into().unwrap());\n    set_keys(serai, set, keys.clone(), &[insecure_pair_from_name(\"Alice\")]).await;\n    keys\n  };\n  assert_eq!(keys.0, pair.public());\n\n  let block = publish_tx(\n    serai,\n    &SeraiInInstructions::execute_batch(SignedBatch {\n      batch: batch.clone(),\n      signature: pair.sign(&batch_message(&batch)),\n    }),\n  )\n  .await;\n\n  let batches = serai.as_of(block).in_instructions().batch_events().await.unwrap();\n  // TODO: impl From<Batch> for BatchEvent?\n  assert_eq!(\n    batches,\n    vec![InInstructionsEvent::Batch {\n      network: batch.network,\n      id: batch.id,\n      block: batch.block,\n      instructions_hash: Blake2b::<U32>::digest(batch.instructions.encode()).into(),\n    }],\n  );\n\n  // TODO: Check the tokens events\n\n  block\n}\n\n#[allow(dead_code)]\npub async fn mint_coin(\n  serai: &Serai,\n  balance: ExternalBalance,\n  batch_id: u32,\n  address: SeraiAddress,\n) -> [u8; 32] {\n  let mut block_hash = BlockHash([0; 32]);\n  OsRng.fill_bytes(&mut block_hash.0);\n\n  let batch = Batch {\n    network: balance.coin.network(),\n    id: batch_id,\n    block: block_hash,\n    instructions: vec![InInstructionWithBalance {\n      instruction: InInstruction::Transfer(address),\n      balance,\n    }],\n  };\n\n  provide_batch(serai, batch).await\n}\n"
  },
  {
    "path": "substrate/client/tests/common/mod.rs",
    "content": "pub mod tx;\npub mod validator_sets;\npub mod in_instructions;\npub mod dex;\npub mod genesis_liquidity;\n\n#[macro_export]\nmacro_rules! serai_test {\n  ($($name: ident: $test: expr)*) => {\n    $(\n      #[tokio::test]\n      async fn $name() {\n        use std::collections::HashMap;\n        use dockertest::{\n          PullPolicy, StartPolicy, LogOptions, LogAction, LogPolicy, LogSource, Image,\n          TestBodySpecification, DockerTest,\n        };\n\n        serai_docker_tests::build(\"serai\".to_string());\n\n        let handle = concat!(\"serai_client-serai_node-\", stringify!($name));\n\n        let composition = TestBodySpecification::with_image(\n          Image::with_repository(\"serai-dev-serai\").pull_policy(PullPolicy::Never),\n        )\n        .replace_cmd(vec![\n          \"serai-node\".to_string(),\n          \"--dev\".to_string(),\n          \"--unsafe-rpc-external\".to_string(),\n          \"--rpc-cors\".to_string(),\n          \"all\".to_string(),\n        ])\n        .replace_env(\n          HashMap::from([\n            (\"RUST_LOG\".to_string(), \"runtime=debug\".to_string()),\n            (\"KEY\".to_string(), \" \".to_string()),\n          ])\n        )\n        .set_publish_all_ports(true)\n        .set_handle(handle)\n        .set_start_policy(StartPolicy::Strict)\n        .set_log_options(Some(LogOptions {\n          action: LogAction::Forward,\n          policy: LogPolicy::Always,\n          source: LogSource::Both,\n        }));\n\n        let mut test = DockerTest::new().with_network(dockertest::Network::Isolated);\n        test.provide_container(composition);\n        test.run_async(|ops| async move {\n          // Sleep until the Substrate RPC starts\n          let mut ticks = 0;\n          let serai_rpc = loop {\n            // Bound execution to 60 seconds\n            if ticks > 60 {\n              panic!(\"Serai node didn't start within 60 seconds\");\n            }\n            tokio::time::sleep(core::time::Duration::from_secs(1)).await;\n            ticks += 1;\n\n            let Some(serai_rpc) = ops.handle(handle).host_port(9944) else { continue };\n            let serai_rpc = format!(\"http://{}:{}\", serai_rpc.0, serai_rpc.1);\n\n            let Ok(client) = Serai::new(serai_rpc.clone()).await else { continue };\n            if client.latest_finalized_block_hash().await.is_err() {\n              continue;\n            }\n            break serai_rpc;\n          };\n          #[allow(clippy::redundant_closure_call)]\n          $test(Serai::new(serai_rpc).await.unwrap()).await;\n        }).await;\n      }\n    )*\n  }\n}\n\n#[macro_export]\nmacro_rules! serai_test_fast_epoch {\n  ($($name: ident: $test: expr)*) => {\n    $(\n      #[tokio::test]\n      async fn $name() {\n        use std::collections::HashMap;\n        use dockertest::{\n          PullPolicy, StartPolicy, LogOptions, LogAction, LogPolicy, LogSource, Image,\n          TestBodySpecification, DockerTest,\n        };\n\n        serai_docker_tests::build(\"serai-fast-epoch\".to_string());\n\n        let handle = concat!(\"serai_client-serai_node-\", stringify!($name));\n\n        let composition = TestBodySpecification::with_image(\n          Image::with_repository(\"serai-dev-serai-fast-epoch\").pull_policy(PullPolicy::Never),\n        )\n        .replace_cmd(vec![\n          \"serai-node\".to_string(),\n          \"--dev\".to_string(),\n          \"--unsafe-rpc-external\".to_string(),\n          \"--rpc-cors\".to_string(),\n          \"all\".to_string(),\n        ])\n        .replace_env(\n          HashMap::from([\n            (\"RUST_LOG\".to_string(), \"runtime=debug\".to_string()),\n            (\"KEY\".to_string(), \" \".to_string()),\n          ])\n        )\n        .set_publish_all_ports(true)\n        .set_handle(handle)\n        .set_start_policy(StartPolicy::Strict)\n        .set_log_options(Some(LogOptions {\n          action: LogAction::Forward,\n          policy: LogPolicy::Always,\n          source: LogSource::Both,\n        }));\n\n        let mut test = DockerTest::new().with_network(dockertest::Network::Isolated);\n        test.provide_container(composition);\n        test.run_async(|ops| async move {\n          // Sleep until the Substrate RPC starts\n          let serai_rpc = ops.handle(handle).host_port(9944).unwrap();\n          let serai_rpc = format!(\"http://{}:{}\", serai_rpc.0, serai_rpc.1);\n          // Bound execution to 60 seconds\n          for _ in 0 .. 60 {\n            tokio::time::sleep(core::time::Duration::from_secs(1)).await;\n            let Ok(client) = Serai::new(serai_rpc.clone()).await else { continue };\n            if client.latest_finalized_block_hash().await.is_err() {\n              continue;\n            }\n            break;\n          }\n          #[allow(clippy::redundant_closure_call)]\n          $test(Serai::new(serai_rpc).await.unwrap()).await;\n        }).await;\n      }\n    )*\n  }\n}\n"
  },
  {
    "path": "substrate/client/tests/common/tx.rs",
    "content": "use core::time::Duration;\n\nuse tokio::time::sleep;\n\nuse serai_client::{Transaction, Serai};\n\n#[allow(dead_code)]\npub async fn publish_tx(serai: &Serai, tx: &Transaction) -> [u8; 32] {\n  let mut latest = serai\n    .block(serai.latest_finalized_block_hash().await.unwrap())\n    .await\n    .unwrap()\n    .unwrap()\n    .number();\n\n  serai.publish(tx).await.unwrap();\n\n  // Get the block it was included in\n  // TODO: Add an RPC method for this/check the guarantee on the subscription\n  let mut ticks = 0;\n  loop {\n    latest += 1;\n\n    let block = {\n      let mut block;\n      while {\n        block = serai.finalized_block_by_number(latest).await.unwrap();\n        block.is_none()\n      } {\n        sleep(Duration::from_secs(1)).await;\n        ticks += 1;\n\n        if ticks > 60 {\n          panic!(\"60 seconds without inclusion in a finalized block\");\n        }\n      }\n      block.unwrap()\n    };\n\n    for transaction in &block.transactions {\n      if transaction == tx {\n        return block.hash();\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "substrate/client/tests/common/validator_sets.rs",
    "content": "use std::collections::HashMap;\n\nuse serai_abi::primitives::NetworkId;\nuse zeroize::Zeroizing;\nuse rand_core::OsRng;\n\nuse sp_core::{sr25519::Pair, Pair as PairTrait};\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::Ciphersuite;\nuse dkg_musig::musig;\nuse schnorrkel::Schnorrkel;\n\nuse serai_client::{\n  validator_sets::{\n    primitives::{ExternalValidatorSet, KeyPair, musig_context, set_keys_message},\n    ValidatorSetsEvent,\n  },\n  Amount, Serai, SeraiValidatorSets,\n};\n\nuse crate::common::tx::publish_tx;\n\n#[allow(dead_code)]\npub async fn set_keys(\n  serai: &Serai,\n  set: ExternalValidatorSet,\n  key_pair: KeyPair,\n  pairs: &[Pair],\n) -> [u8; 32] {\n  let mut pub_keys = vec![];\n  for pair in pairs {\n    let public_key =\n      <Ristretto as Ciphersuite>::read_G::<&[u8]>(&mut pair.public().0.as_ref()).unwrap();\n    pub_keys.push(public_key);\n  }\n\n  let mut threshold_keys = vec![];\n  for i in 0 .. pairs.len() {\n    let secret_key = <Ristretto as Ciphersuite>::read_F::<&[u8]>(\n      &mut pairs[i].as_ref().secret.to_bytes()[.. 32].as_ref(),\n    )\n    .unwrap();\n    assert_eq!(Ristretto::generator() * secret_key, pub_keys[i]);\n\n    threshold_keys.push(\n      musig::<Ristretto>(musig_context(set.into()), Zeroizing::new(secret_key), &pub_keys).unwrap(),\n    );\n  }\n\n  let mut musig_keys = HashMap::new();\n  for tk in threshold_keys {\n    musig_keys.insert(tk.params().i(), tk.into());\n  }\n\n  let sig = frost::tests::sign_without_caching(\n    &mut OsRng,\n    frost::tests::algorithm_machines(&mut OsRng, &Schnorrkel::new(b\"substrate\"), &musig_keys),\n    &set_keys_message(&set, &[], &key_pair),\n  );\n\n  // Set the key pair\n  let block = publish_tx(\n    serai,\n    &SeraiValidatorSets::set_keys(\n      set.network,\n      vec![].try_into().unwrap(),\n      key_pair.clone(),\n      sig.to_bytes().into(),\n    ),\n  )\n  .await;\n\n  assert_eq!(\n    serai.as_of(block).validator_sets().key_gen_events().await.unwrap(),\n    vec![ValidatorSetsEvent::KeyGen { set, key_pair: key_pair.clone() }]\n  );\n  assert_eq!(serai.as_of(block).validator_sets().keys(set).await.unwrap(), Some(key_pair));\n\n  block\n}\n\n#[allow(dead_code)]\npub async fn allocate_stake(\n  serai: &Serai,\n  network: NetworkId,\n  amount: Amount,\n  pair: &Pair,\n  nonce: u32,\n) -> [u8; 32] {\n  // get the call\n  let tx = serai.sign(pair, SeraiValidatorSets::allocate(network, amount), nonce, 0);\n  publish_tx(serai, &tx).await\n}\n\n#[allow(dead_code)]\npub async fn deallocate_stake(\n  serai: &Serai,\n  network: NetworkId,\n  amount: Amount,\n  pair: &Pair,\n  nonce: u32,\n) -> [u8; 32] {\n  // get the call\n  let tx = serai.sign(pair, SeraiValidatorSets::deallocate(network, amount), nonce, 0);\n  publish_tx(serai, &tx).await\n}\n"
  },
  {
    "path": "substrate/client/tests/dex.rs",
    "content": "use rand_core::{RngCore, OsRng};\n\nuse sp_core::{Pair as PairTrait, bounded_vec::BoundedVec};\n\nuse serai_abi::in_instructions::primitives::DexCall;\n\nuse serai_client::{\n  primitives::{\n    Amount, Coin, Balance, BlockHash, insecure_pair_from_name, ExternalAddress, SeraiAddress,\n    ExternalCoin, ExternalBalance,\n  },\n  in_instructions::primitives::{\n    InInstruction, InInstructionWithBalance, Batch, IN_INSTRUCTION_EXECUTOR, OutAddress,\n  },\n  dex::DexEvent,\n  Serai,\n};\n\nmod common;\nuse common::{\n  in_instructions::{provide_batch, mint_coin},\n  dex::{add_liquidity as common_add_liquidity, swap as common_swap},\n};\n\n// TODO: Calculate all constants in the following tests\n// TODO: Check LP token, coin balances\n// TODO: Modularize common code\n// TODO: Check Transfer events\nserai_test!(\n  add_liquidity: (|serai: Serai| async move {\n    let coin = ExternalCoin::Monero;\n    let pair = insecure_pair_from_name(\"Ferdie\");\n\n    // mint sriXMR in the account so that we can add liq.\n    // Ferdie account is already pre-funded with SRI.\n    mint_coin(\n      &serai,\n      ExternalBalance { coin, amount: Amount(100_000_000_000_000) },\n      0,\n      pair.clone().public().into(),\n    )\n    .await;\n\n    // add liquidity\n    let coin_amount = Amount(50_000_000_000_000);\n    let sri_amount = Amount(50_000_000_000_000);\n    let block = common_add_liquidity(&serai,\n      coin,\n      coin_amount,\n      sri_amount,\n      0,\n      pair.clone()\n    ).await;\n    // get only the add liq events\n    let mut events = serai.as_of(block).dex().events().await.unwrap();\n    events.retain(|e| matches!(e, DexEvent::LiquidityAdded { .. }));\n\n    assert_eq!(\n      events,\n      vec![DexEvent::LiquidityAdded {\n        who: pair.public().into(),\n        mint_to: pair.public().into(),\n        pool_id: coin,\n        coin_amount: coin_amount.0,\n        sri_amount: sri_amount.0,\n        lp_token_minted: 49_999999990000\n      }]\n    );\n  })\n\n  // Tests coin -> SRI and SRI -> coin swaps.\n  swap_coin_to_sri: (|serai: Serai| async move {\n    let coin = ExternalCoin::Ether;\n    let pair = insecure_pair_from_name(\"Ferdie\");\n\n    // mint sriXMR in the account so that we can add liq.\n    // Ferdie account is already pre-funded with SRI.\n    mint_coin(\n      &serai,\n      ExternalBalance { coin, amount: Amount(100_000_000_000_000) },\n      0,\n      pair.clone().public().into(),\n    )\n    .await;\n\n    // add liquidity\n    common_add_liquidity(&serai,\n      coin,\n      Amount(50_000_000_000_000),\n      Amount(50_000_000_000_000),\n      0,\n      pair.clone()\n    ).await;\n\n    // now that we have our liquid pool, swap some coin to SRI.\n    let mut amount_in = Amount(25_000_000_000_000);\n    let mut block = common_swap(\n      &serai,\n      coin.into(),\n      Coin::Serai,\n      amount_in,\n      Amount(1),\n      1,\n      pair.clone())\n      .await;\n\n    // get only the swap events\n    let mut events = serai.as_of(block).dex().events().await.unwrap();\n    events.retain(|e| matches!(e, DexEvent::SwapExecuted { .. }));\n\n    let mut path = BoundedVec::try_from(vec![coin.into(), Coin::Serai]).unwrap();\n    assert_eq!(\n      events,\n      vec![DexEvent::SwapExecuted {\n        who: pair.clone().public().into(),\n        send_to: pair.public().into(),\n        path,\n        amount_in: amount_in.0,\n        amount_out: 16633299966633\n      }]\n    );\n\n    // now swap some SRI to coin\n    amount_in = Amount(10_000_000_000_000);\n    block = common_swap(\n      &serai,\n      Coin::Serai,\n      coin.into(),\n      amount_in,\n      Amount(1),\n      2,\n      pair.clone()\n    ).await;\n\n    // get only the swap events\n    let mut events = serai.as_of(block).dex().events().await.unwrap();\n    events.retain(|e| matches!(e, DexEvent::SwapExecuted { .. }));\n\n    path = BoundedVec::try_from(vec![Coin::Serai, coin.into()]).unwrap();\n    assert_eq!(\n      events,\n      vec![DexEvent::SwapExecuted {\n        who: pair.clone().public().into(),\n        send_to: pair.public().into(),\n        path,\n        amount_in: amount_in.0,\n        amount_out: 17254428681101\n      }]\n    );\n  })\n\n  swap_coin_to_coin: (|serai: Serai| async move {\n    let coin1 = ExternalCoin::Monero;\n    let coin2 = ExternalCoin::Dai;\n    let pair = insecure_pair_from_name(\"Ferdie\");\n\n    // mint coins\n    mint_coin(\n      &serai,\n      ExternalBalance { coin: coin1, amount: Amount(100_000_000_000_000) },\n      0,\n      pair.clone().public().into(),\n    )\n    .await;\n    mint_coin(\n      &serai,\n      ExternalBalance { coin: coin2, amount: Amount(100_000_000_000_000) },\n      0,\n      pair.clone().public().into(),\n    )\n    .await;\n\n    // add liquidity to pools\n    common_add_liquidity(&serai,\n      coin1,\n      Amount(50_000_000_000_000),\n      Amount(50_000_000_000_000),\n      0,\n      pair.clone()\n    ).await;\n    common_add_liquidity(&serai,\n      coin2,\n      Amount(50_000_000_000_000),\n      Amount(50_000_000_000_000),\n      1,\n      pair.clone()\n    ).await;\n\n    // swap coin1 -> coin2\n    let amount_in = Amount(25_000_000_000_000);\n    let block = common_swap(\n      &serai,\n      coin1.into(),\n      coin2.into(),\n      amount_in,\n      Amount(1),\n      2,\n      pair.clone()\n    ).await;\n\n    // get only the swap events\n    let mut events = serai.as_of(block).dex().events().await.unwrap();\n    events.retain(|e| matches!(e, DexEvent::SwapExecuted { .. }));\n\n    let path = BoundedVec::try_from(vec![coin1.into(), Coin::Serai, coin2.into()]).unwrap();\n    assert_eq!(\n      events,\n      vec![DexEvent::SwapExecuted {\n        who: pair.clone().public().into(),\n        send_to: pair.public().into(),\n        path,\n        amount_in: amount_in.0,\n        amount_out: 12453103964435,\n      }]\n    );\n  })\n\n  add_liquidity_in_instructions: (|serai: Serai| async move {\n    let coin = ExternalCoin::Bitcoin;\n    let pair = insecure_pair_from_name(\"Ferdie\");\n    let mut batch_id = 0;\n\n    // mint sriBTC in the account so that we can add liq.\n    // Ferdie account is already pre-funded with SRI.\n    mint_coin(\n      &serai,\n      ExternalBalance { coin, amount: Amount(100_000_000_000_000) },\n      batch_id,\n      pair.clone().public().into(),\n    )\n    .await;\n    batch_id += 1;\n\n    // add liquidity\n    common_add_liquidity(&serai,\n      coin,\n      Amount(5_000_000_000_000),\n      Amount(500_000_000_000),\n      0,\n      pair.clone()\n    ).await;\n\n    // now that we have our liquid SRI/BTC pool, we can add more liquidity to it via an\n    // InInstruction\n    let mut block_hash = BlockHash([0; 32]);\n    OsRng.fill_bytes(&mut block_hash.0);\n    let batch = Batch {\n      network: coin.network(),\n      id: batch_id,\n      block: block_hash,\n      instructions: vec![InInstructionWithBalance {\n        instruction: InInstruction::Dex(DexCall::SwapAndAddLiquidity(pair.public().into())),\n        balance: ExternalBalance { coin, amount: Amount(20_000_000_000_000) },\n      }],\n    };\n\n    let block = provide_batch(&serai, batch).await;\n    let mut events = serai.as_of(block).dex().events().await.unwrap();\n    events.retain(|e| matches!(e, DexEvent::LiquidityAdded { .. }));\n    assert_eq!(\n      events,\n      vec![DexEvent::LiquidityAdded {\n        who: IN_INSTRUCTION_EXECUTOR,\n        mint_to: pair.public().into(),\n        pool_id: coin,\n        coin_amount: 10_000_000_000_000, // half of sent amount\n        sri_amount: 111_333_778_668,\n        lp_token_minted: 1_054_092_553_383\n      }]\n    );\n  })\n\n  swap_in_instructions: (|serai: Serai| async move {\n    let coin1 = ExternalCoin::Monero;\n    let coin2 = ExternalCoin::Ether;\n    let pair = insecure_pair_from_name(\"Ferdie\");\n    let mut coin1_batch_id = 0;\n    let mut coin2_batch_id = 0;\n\n    // mint coins\n    mint_coin(\n      &serai,\n      ExternalBalance { coin: coin1, amount: Amount(10_000_000_000_000_000) },\n      coin1_batch_id,\n      pair.clone().public().into(),\n    )\n    .await;\n    coin1_batch_id += 1;\n    mint_coin(\n      &serai,\n      ExternalBalance { coin: coin2, amount: Amount(100_000_000_000_000) },\n      coin2_batch_id,\n      pair.clone().public().into(),\n    )\n    .await;\n    coin2_batch_id += 1;\n\n    // add liquidity to pools\n    common_add_liquidity(&serai,\n      coin1,\n      Amount(5_000_000_000_000_000), // monero has 12 decimals\n      Amount(50_000_000_000),\n      0,\n      pair.clone()\n    ).await;\n    common_add_liquidity(&serai,\n      coin2,\n      Amount(5_000_000_000_000), // ether still has 8 in our codebase\n      Amount(500_000_000_000),\n      1,\n      pair.clone()\n    ).await;\n\n    // rand address bytes\n    let mut rand_bytes = vec![0; 32];\n    OsRng.fill_bytes(&mut rand_bytes);\n\n    // XMR -> ETH\n    {\n      // make an out address\n      let out_address = OutAddress::External(ExternalAddress::new(rand_bytes.clone()).unwrap());\n\n      // amount is the min out amount\n      let out_balance = Balance { coin: coin2.into(), amount: Amount(1) };\n\n      // now that we have our pools, we can try to swap\n      let mut block_hash = BlockHash([0; 32]);\n      OsRng.fill_bytes(&mut block_hash.0);\n      let batch = Batch {\n        network: coin1.network(),\n        id: coin1_batch_id,\n        block: block_hash,\n        instructions: vec![InInstructionWithBalance {\n          instruction: InInstruction::Dex(DexCall::Swap(out_balance, out_address)),\n          balance: ExternalBalance { coin: coin1, amount: Amount(200_000_000_000_000) },\n        }],\n      };\n\n      let block = provide_batch(&serai, batch).await;\n      coin1_batch_id += 1;\n      let mut events = serai.as_of(block).dex().events().await.unwrap();\n      events.retain(|e| matches!(e, DexEvent::SwapExecuted { .. }));\n\n      let path = BoundedVec::try_from(vec![coin1.into(), Coin::Serai, coin2.into()]).unwrap();\n      assert_eq!(\n        events,\n        vec![DexEvent::SwapExecuted {\n          who: IN_INSTRUCTION_EXECUTOR,\n          send_to: IN_INSTRUCTION_EXECUTOR,\n          path,\n          amount_in: 200_000_000_000_000,\n          amount_out: 19_044_944_233\n        }]\n      );\n    }\n\n    // ETH -> sriXMR\n    {\n      // make an out address\n      let out_address =\n        OutAddress::Serai(SeraiAddress::new(rand_bytes.clone().try_into().unwrap()));\n\n      // amount is the min out amount\n      let out_balance = Balance { coin: coin1.into(), amount: Amount(1) };\n\n      // now that we have our pools, we can try to swap\n      let mut block_hash = BlockHash([0; 32]);\n      OsRng.fill_bytes(&mut block_hash.0);\n      let batch = Batch {\n        network: coin2.network(),\n        id: coin2_batch_id,\n        block: block_hash,\n        instructions: vec![InInstructionWithBalance {\n          instruction: InInstruction::Dex(DexCall::Swap(out_balance, out_address.clone())),\n          balance: ExternalBalance { coin: coin2, amount: Amount(200_000_000_000) },\n        }],\n      };\n\n      let block = provide_batch(&serai, batch).await;\n      let mut events = serai.as_of(block).dex().events().await.unwrap();\n      events.retain(|e| matches!(e, DexEvent::SwapExecuted { .. }));\n\n      let path = BoundedVec::try_from(vec![coin2.into(), Coin::Serai, coin1.into()]).unwrap();\n      assert_eq!(\n        events,\n        vec![DexEvent::SwapExecuted {\n          who: IN_INSTRUCTION_EXECUTOR,\n          send_to: out_address.as_native().unwrap(),\n          path,\n          amount_in: 200_000_000_000,\n          amount_out: 1487294253782353\n        }]\n      );\n    }\n\n    // XMR -> SRI\n    {\n      // make an out address\n      let out_address = OutAddress::Serai(SeraiAddress::new(rand_bytes.try_into().unwrap()));\n\n      // amount is the min out amount\n      let out_balance = Balance { coin: Coin::Serai, amount: Amount(1) };\n\n      // now that we have our pools, we can try to swap\n      let mut block_hash = BlockHash([0; 32]);\n      OsRng.fill_bytes(&mut block_hash.0);\n      let batch = Batch {\n        network: coin1.network(),\n        id: coin1_batch_id,\n        block: block_hash,\n        instructions: vec![InInstructionWithBalance {\n          instruction: InInstruction::Dex(DexCall::Swap(out_balance, out_address.clone())),\n          balance: ExternalBalance { coin: coin1, amount: Amount(100_000_000_000_000) },\n        }],\n      };\n\n      let block = provide_batch(&serai, batch).await;\n      let mut events = serai.as_of(block).dex().events().await.unwrap();\n      events.retain(|e| matches!(e, DexEvent::SwapExecuted { .. }));\n\n      let path = BoundedVec::try_from(vec![coin1.into(), Coin::Serai]).unwrap();\n      assert_eq!(\n        events,\n        vec![DexEvent::SwapExecuted {\n          who: IN_INSTRUCTION_EXECUTOR,\n          send_to: out_address.as_native().unwrap(),\n          path,\n          amount_in: 100_000_000_000_000,\n          amount_out: 1_762_662_819\n        }]\n      );\n    }\n  })\n);\n"
  },
  {
    "path": "substrate/client/tests/dht.rs",
    "content": "use serai_client::{primitives::ExternalNetworkId, Serai};\n\n#[tokio::test]\nasync fn dht() {\n  use dockertest::{\n    PullPolicy, StartPolicy, LogOptions, LogAction, LogPolicy, LogSource, Image,\n    TestBodySpecification, DockerTest,\n  };\n\n  serai_docker_tests::build(\"serai\".to_string());\n\n  let handle = |name: &str| format!(\"serai_client-serai_node-{name}\");\n  let composition = |name: &str| {\n    TestBodySpecification::with_image(\n      Image::with_repository(\"serai-dev-serai\").pull_policy(PullPolicy::Never),\n    )\n    .replace_env(\n      [(\"SERAI_NAME\".to_string(), name.to_string()), (\"KEY\".to_string(), \" \".to_string())].into(),\n    )\n    .set_publish_all_ports(true)\n    .set_handle(handle(name))\n    .set_start_policy(StartPolicy::Strict)\n    .set_log_options(Some(LogOptions {\n      action: LogAction::Forward,\n      policy: LogPolicy::Always,\n      source: LogSource::Both,\n    }))\n  };\n\n  let mut test = DockerTest::new().with_network(dockertest::Network::Isolated);\n  test.provide_container(composition(\"alice\"));\n  test.provide_container(composition(\"bob\"));\n  test.provide_container(composition(\"charlie\"));\n  test.provide_container(composition(\"dave\"));\n  test\n    .run_async(|ops| async move {\n      // Sleep until the Substrate RPC starts\n      let alice = handle(\"alice\");\n      let serai_rpc = ops.handle(&alice).host_port(9944).unwrap();\n      let serai_rpc = format!(\"http://{}:{}\", serai_rpc.0, serai_rpc.1);\n      // Sleep for a minute\n      tokio::time::sleep(core::time::Duration::from_secs(60)).await;\n      // Check the DHT has been populated\n      assert!(!Serai::new(serai_rpc.clone())\n        .await\n        .unwrap()\n        .p2p_validators(ExternalNetworkId::Bitcoin.into())\n        .await\n        .unwrap()\n        .is_empty());\n    })\n    .await;\n}\n"
  },
  {
    "path": "substrate/client/tests/emissions.rs",
    "content": "use std::{time::Duration, collections::HashMap};\nuse rand_core::{RngCore, OsRng};\n\nuse serai_client::TemporalSerai;\n\nuse serai_abi::{\n  emissions::primitives::{INITIAL_REWARD_PER_BLOCK, SECURE_BY},\n  in_instructions::primitives::Batch,\n  primitives::{\n    BlockHash, ExternalBalance, ExternalCoin, ExternalNetworkId, EXTERNAL_NETWORKS,\n    FAST_EPOCH_DURATION, FAST_EPOCH_INITIAL_PERIOD, NETWORKS, TARGET_BLOCK_TIME, Amount, NetworkId,\n  },\n  validator_sets::primitives::Session,\n};\n\nuse serai_client::Serai;\n\nmod common;\nuse common::{genesis_liquidity::set_up_genesis, in_instructions::provide_batch};\n\nserai_test_fast_epoch!(\n  emissions: (|serai: Serai| async move {\n    test_emissions(serai).await;\n  })\n);\n\nasync fn send_batches(serai: &Serai, ids: &mut HashMap<ExternalNetworkId, u32>) {\n  for network in EXTERNAL_NETWORKS {\n    // set up batch id\n    ids\n      .entry(network)\n      .and_modify(|v| {\n        *v += 1;\n      })\n      .or_insert(0);\n\n    // set up block hash\n    let mut block = BlockHash([0; 32]);\n    OsRng.fill_bytes(&mut block.0);\n\n    provide_batch(serai, Batch { network, id: ids[&network], block, instructions: vec![] }).await;\n  }\n}\n\nasync fn test_emissions(serai: Serai) {\n  // set up the genesis\n  let values = HashMap::from([\n    (ExternalCoin::Monero, 184100),\n    (ExternalCoin::Ether, 4785000),\n    (ExternalCoin::Dai, 1500),\n  ]);\n  let (_, mut batch_ids) = set_up_genesis(&serai, &values).await;\n\n  // wait until genesis is complete\n  let mut genesis_complete_block = None;\n  while genesis_complete_block.is_none() {\n    tokio::time::sleep(Duration::from_secs(1)).await;\n    genesis_complete_block = serai\n      .as_of_latest_finalized_block()\n      .await\n      .unwrap()\n      .genesis_liquidity()\n      .genesis_complete_block()\n      .await\n      .unwrap();\n  }\n\n  for _ in 0 .. 3 {\n    // get current stakes\n    let mut current_stake = HashMap::new();\n    for n in NETWORKS {\n      // TODO: investigate why serai network TAS isn't visible at session 0.\n      let stake = serai\n        .as_of_latest_finalized_block()\n        .await\n        .unwrap()\n        .validator_sets()\n        .total_allocated_stake(n)\n        .await\n        .unwrap()\n        .unwrap_or(Amount(0))\n        .0;\n      current_stake.insert(n, stake);\n    }\n\n    // wait for a session change\n    let current_session = wait_for_session_change(&serai).await;\n\n    // get last block\n    let last_block = serai.latest_finalized_block().await.unwrap();\n    let serai_latest = serai.as_of(last_block.hash());\n    let change_block_number = last_block.number();\n\n    // get distances to ec security & block count of the previous session\n    let (distances, total_distance) = get_distances(&serai_latest, &current_stake).await;\n    let block_count = get_session_blocks(&serai_latest, current_session - 1).await;\n\n    // calculate how much reward in this session\n    let reward_this_epoch =\n      if change_block_number < (genesis_complete_block.unwrap() + FAST_EPOCH_INITIAL_PERIOD) {\n        block_count * INITIAL_REWARD_PER_BLOCK\n      } else {\n        let blocks_until = SECURE_BY - change_block_number;\n        let block_reward = total_distance / blocks_until;\n        block_count * block_reward\n      };\n\n    let reward_per_network = distances\n      .into_iter()\n      .map(|(n, distance)| {\n        let reward = u64::try_from(\n          u128::from(reward_this_epoch).saturating_mul(u128::from(distance)) /\n            u128::from(total_distance),\n        )\n        .unwrap();\n        (n, reward)\n      })\n      .collect::<HashMap<NetworkId, u64>>();\n\n    // retire the prev-set so that TotalAllocatedStake updated.\n    send_batches(&serai, &mut batch_ids).await;\n\n    for (n, reward) in reward_per_network {\n      let stake = serai\n        .as_of_latest_finalized_block()\n        .await\n        .unwrap()\n        .validator_sets()\n        .total_allocated_stake(n)\n        .await\n        .unwrap()\n        .unwrap_or(Amount(0))\n        .0;\n\n      // all reward should automatically staked for the network since we are in initial period.\n      assert_eq!(stake, *current_stake.get(&n).unwrap() + reward);\n    }\n\n    // TODO: check stake per address?\n    // TODO: check post ec security era\n  }\n}\n\n/// Returns the required stake in terms SRI for a given `Balance`.\nasync fn required_stake(serai: &TemporalSerai<'_>, balance: ExternalBalance) -> u64 {\n  // This is inclusive to an increase in accuracy\n  let sri_per_coin = serai.dex().oracle_value(balance.coin).await.unwrap().unwrap_or(Amount(0));\n\n  // See dex-pallet for the reasoning on these\n  let coin_decimals = balance.coin.decimals().max(5);\n  let accuracy_increase = u128::from(10u64.pow(coin_decimals));\n\n  let total_coin_value =\n    u64::try_from(u128::from(balance.amount.0) * u128::from(sri_per_coin.0) / accuracy_increase)\n      .unwrap_or(u64::MAX);\n\n  // required stake formula (COIN_VALUE * 1.5) + margin(20%)\n  let required_stake = total_coin_value.saturating_mul(3).saturating_div(2);\n  required_stake.saturating_add(total_coin_value.saturating_div(5))\n}\n\nasync fn wait_for_session_change(serai: &Serai) -> u32 {\n  let current_session = serai\n    .as_of_latest_finalized_block()\n    .await\n    .unwrap()\n    .validator_sets()\n    .session(NetworkId::Serai)\n    .await\n    .unwrap()\n    .unwrap()\n    .0;\n  let next_session = current_session + 1;\n\n  // lets wait double the epoch time.\n  tokio::time::timeout(\n    tokio::time::Duration::from_secs(FAST_EPOCH_DURATION * TARGET_BLOCK_TIME * 2),\n    async {\n      while serai\n        .as_of_latest_finalized_block()\n        .await\n        .unwrap()\n        .validator_sets()\n        .session(NetworkId::Serai)\n        .await\n        .unwrap()\n        .unwrap()\n        .0 <\n        next_session\n      {\n        tokio::time::sleep(Duration::from_secs(6)).await;\n      }\n    },\n  )\n  .await\n  .unwrap();\n\n  next_session\n}\n\nasync fn get_distances(\n  serai: &TemporalSerai<'_>,\n  current_stake: &HashMap<NetworkId, u64>,\n) -> (HashMap<NetworkId, u64>, u64) {\n  // we should be in the initial period, so calculate how much each network supposedly get..\n  // we can check the supply to see how much coin hence liability we have.\n  let mut distances: HashMap<NetworkId, u64> = HashMap::new();\n  let mut total_distance = 0;\n  for n in EXTERNAL_NETWORKS {\n    let mut required = 0;\n    for c in n.coins() {\n      let amount = serai.coins().coin_supply(c.into()).await.unwrap();\n      required += required_stake(serai, ExternalBalance { coin: c, amount }).await;\n    }\n\n    let mut current = *current_stake.get(&n.into()).unwrap();\n    if current > required {\n      current = required;\n    }\n\n    let distance = required - current;\n    total_distance += distance;\n\n    distances.insert(n.into(), distance);\n  }\n\n  // add serai network portion(20%)\n  let new_total_distance = total_distance.saturating_mul(10) / 8;\n  distances.insert(NetworkId::Serai, new_total_distance - total_distance);\n  total_distance = new_total_distance;\n\n  (distances, total_distance)\n}\n\nasync fn get_session_blocks(serai: &TemporalSerai<'_>, session: u32) -> u64 {\n  let begin_block = serai\n    .validator_sets()\n    .session_begin_block(NetworkId::Serai, Session(session))\n    .await\n    .unwrap()\n    .unwrap();\n\n  let next_begin_block = serai\n    .validator_sets()\n    .session_begin_block(NetworkId::Serai, Session(session + 1))\n    .await\n    .unwrap()\n    .unwrap();\n\n  next_begin_block.saturating_sub(begin_block)\n}\n"
  },
  {
    "path": "substrate/client/tests/genesis_liquidity.rs",
    "content": "use std::{time::Duration, collections::HashMap};\n\nuse serai_client::Serai;\n\nuse serai_abi::primitives::{Amount, Coin, ExternalCoin, COINS, EXTERNAL_COINS, GENESIS_SRI};\n\nuse serai_client::genesis_liquidity::primitives::{\n  GENESIS_LIQUIDITY_ACCOUNT, INITIAL_GENESIS_LP_SHARES,\n};\n\nmod common;\nuse common::genesis_liquidity::set_up_genesis;\n\nserai_test_fast_epoch!(\n  genesis_liquidity: (|serai: Serai| async move {\n    test_genesis_liquidity(serai).await;\n  })\n);\n\npub async fn test_genesis_liquidity(serai: Serai) {\n  // set up the genesis\n  let values = HashMap::from([\n    (ExternalCoin::Monero, 184100),\n    (ExternalCoin::Ether, 4785000),\n    (ExternalCoin::Dai, 1500),\n  ]);\n  let (accounts, _) = set_up_genesis(&serai, &values).await;\n\n  // wait until genesis is complete\n  while serai\n    .as_of_latest_finalized_block()\n    .await\n    .unwrap()\n    .genesis_liquidity()\n    .genesis_complete_block()\n    .await\n    .unwrap()\n    .is_none()\n  {\n    tokio::time::sleep(Duration::from_secs(1)).await;\n  }\n\n  // check total SRI supply is +100M\n  // there are 6 endowed accounts in dev-net. Take this into consideration when checking\n  // for the total sri minted at this time.\n  let serai = serai.as_of_latest_finalized_block().await.unwrap();\n  let sri = serai.coins().coin_supply(Coin::Serai).await.unwrap();\n  let endowed_amount: u64 = 1 << 60;\n  let total_sri = (6 * endowed_amount) + GENESIS_SRI;\n  assert_eq!(sri, Amount(total_sri));\n\n  // check genesis account has no coins, all transferred to pools.\n  for coin in COINS {\n    let amount = serai.coins().coin_balance(coin, GENESIS_LIQUIDITY_ACCOUNT).await.unwrap();\n    assert_eq!(amount.0, 0);\n  }\n\n  // check pools has proper liquidity\n  let mut pool_amounts = HashMap::new();\n  let mut total_value = 0u128;\n  for coin in EXTERNAL_COINS {\n    let total_coin = accounts[&coin].iter().fold(0u128, |acc, value| acc + u128::from(value.1 .0));\n    let value = if coin != ExternalCoin::Bitcoin {\n      (total_coin * u128::from(values[&coin])) / 10u128.pow(coin.decimals())\n    } else {\n      total_coin\n    };\n\n    total_value += value;\n    pool_amounts.insert(coin, (total_coin, value));\n  }\n\n  // check distributed SRI per pool\n  let mut total_sri_distributed = 0u128;\n  for coin in EXTERNAL_COINS {\n    let sri = if coin == *EXTERNAL_COINS.last().unwrap() {\n      u128::from(GENESIS_SRI).checked_sub(total_sri_distributed).unwrap()\n    } else {\n      (pool_amounts[&coin].1 * u128::from(GENESIS_SRI)) / total_value\n    };\n    total_sri_distributed += sri;\n\n    let reserves = serai.dex().get_reserves(coin).await.unwrap().unwrap();\n    assert_eq!(u128::from(reserves.0 .0), pool_amounts[&coin].0); // coin side\n    assert_eq!(u128::from(reserves.1 .0), sri); // SRI side\n  }\n\n  // check each liquidity provider got liquidity tokens proportional to their value\n  for coin in EXTERNAL_COINS {\n    let liq_supply = serai.genesis_liquidity().supply(coin).await.unwrap();\n    for (acc, amount) in &accounts[&coin] {\n      let acc_liq_shares = serai.genesis_liquidity().liquidity(acc, coin).await.unwrap().shares;\n\n      // since we can't test the ratios directly(due to integer division giving 0)\n      // we test whether they give the same result when multiplied by another constant.\n      // Following test ensures the account in fact has the right amount of shares.\n      let mut shares_ratio = (INITIAL_GENESIS_LP_SHARES * acc_liq_shares) / liq_supply.shares;\n      let amounts_ratio =\n        (INITIAL_GENESIS_LP_SHARES * amount.0) / u64::try_from(pool_amounts[&coin].0).unwrap();\n\n      // we can tolerate 1 unit diff between them due to integer division.\n      if shares_ratio.abs_diff(amounts_ratio) == 1 {\n        shares_ratio = amounts_ratio;\n      }\n\n      assert_eq!(shares_ratio, amounts_ratio);\n    }\n  }\n\n  // TODO: test remove the liq before/after genesis ended.\n}\n"
  },
  {
    "path": "substrate/client/tests/time.rs",
    "content": "use std::time::{Duration, SystemTime};\n\nuse tokio::time::sleep;\n\nuse serai_client::Serai;\n\nmod common;\n\nserai_test!(\n  time: (|serai: Serai| async move {\n    let mut number = serai.latest_finalized_block().await.unwrap().number();\n    let mut done = 0;\n    while done < 3 {\n      // Wait for the next block\n      let block = serai.latest_finalized_block().await.unwrap();\n      if block.number() == number {\n        sleep(Duration::from_secs(1)).await;\n        continue;\n      }\n      number = block.number();\n\n      // Make sure the time we extract from the block is within 5 seconds of now\n      let now = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs();\n      assert!(now.saturating_sub(block.time().unwrap()) < 5);\n      done += 1;\n    }\n  })\n);\n"
  },
  {
    "path": "substrate/client/tests/validator_sets.rs",
    "content": "use rand_core::{RngCore, OsRng};\n\nuse sp_core::{\n  sr25519::{Public, Pair},\n  Pair as PairTrait,\n};\n\nuse serai_client::{\n  primitives::{\n    NETWORKS, NetworkId, BlockHash, insecure_pair_from_name, FAST_EPOCH_DURATION,\n    TARGET_BLOCK_TIME, ExternalNetworkId, Amount,\n  },\n  validator_sets::{\n    primitives::{Session, ValidatorSet, ExternalValidatorSet, KeyPair},\n    ValidatorSetsEvent,\n  },\n  in_instructions::{\n    primitives::{Batch, SignedBatch, batch_message},\n    SeraiInInstructions,\n  },\n  Serai,\n};\n\nmod common;\nuse common::{\n  tx::publish_tx,\n  validator_sets::{allocate_stake, deallocate_stake, set_keys},\n};\n\nfn get_random_key_pair() -> KeyPair {\n  let mut ristretto_key = [0; 32];\n  OsRng.fill_bytes(&mut ristretto_key);\n  let mut external_key = vec![0; 33];\n  OsRng.fill_bytes(&mut external_key);\n  KeyPair(Public::from(ristretto_key), external_key.try_into().unwrap())\n}\n\nasync fn get_ordered_keys(serai: &Serai, network: NetworkId, accounts: &[Pair]) -> Vec<Pair> {\n  // retrieve the current session validators so that we know the order of the keys\n  // that is necessary for the correct musig signature.\n  let validators = serai\n    .as_of_latest_finalized_block()\n    .await\n    .unwrap()\n    .validator_sets()\n    .active_network_validators(network)\n    .await\n    .unwrap();\n\n  // collect the pairs of the validators\n  let mut pairs = vec![];\n  for v in validators {\n    let p = accounts.iter().find(|pair| pair.public() == v).unwrap().clone();\n    pairs.push(p);\n  }\n\n  pairs\n}\n\nserai_test!(\n  set_keys_test: (|serai: Serai| async move {\n    let network = ExternalNetworkId::Bitcoin;\n    let set = ExternalValidatorSet { session: Session(0), network };\n\n    let pair = insecure_pair_from_name(\"Alice\");\n    let public = pair.public();\n\n    // Neither of these keys are validated\n    // The external key is infeasible to validate on-chain, the Ristretto key is feasible\n    // TODO: Should the Ristretto key be validated?\n    let key_pair = get_random_key_pair();\n\n    // Make sure the genesis is as expected\n    assert_eq!(\n      serai\n        .as_of(serai.finalized_block_by_number(0).await.unwrap().unwrap().hash())\n        .validator_sets()\n        .new_set_events()\n        .await\n        .unwrap(),\n      NETWORKS\n        .iter()\n        .copied()\n        .map(|network| ValidatorSetsEvent::NewSet {\n          set: ValidatorSet { session: Session(0), network }\n        })\n        .collect::<Vec<_>>(),\n    );\n\n    {\n      let vs_serai = serai.as_of_latest_finalized_block().await.unwrap();\n      let vs_serai = vs_serai.validator_sets();\n      let participants = vs_serai.participants(set.network.into()).await\n        .unwrap()\n        .unwrap()\n        .into_iter()\n        .map(|(k, _)| k)\n        .collect::<Vec<_>>();\n      let participants_ref: &[_] = participants.as_ref();\n      assert_eq!(participants_ref, [public].as_ref());\n    }\n\n    let block = set_keys(&serai, set, key_pair.clone(), &[pair]).await;\n\n    // While the set_keys function should handle this, it's beneficial to\n    // independently test it\n    let serai = serai.as_of(block);\n    let serai = serai.validator_sets();\n    assert_eq!(\n      serai.key_gen_events().await.unwrap(),\n      vec![ValidatorSetsEvent::KeyGen { set, key_pair: key_pair.clone() }]\n    );\n    assert_eq!(serai.keys(set).await.unwrap(), Some(key_pair));\n  })\n);\n\n#[tokio::test]\nasync fn validator_set_rotation() {\n  use dockertest::{\n    PullPolicy, StartPolicy, LogOptions, LogAction, LogPolicy, LogSource, Image,\n    TestBodySpecification, DockerTest,\n  };\n  use std::collections::HashMap;\n\n  serai_docker_tests::build(\"serai-fast-epoch\".to_string());\n\n  let handle = |name| format!(\"serai_client-serai_node-{name}\");\n  let composition = |name| {\n    TestBodySpecification::with_image(\n      Image::with_repository(\"serai-dev-serai-fast-epoch\").pull_policy(PullPolicy::Never),\n    )\n    .replace_cmd(vec![\n      \"serai-node\".to_string(),\n      \"--unsafe-rpc-external\".to_string(),\n      \"--rpc-cors\".to_string(),\n      \"all\".to_string(),\n      \"--chain\".to_string(),\n      \"local\".to_string(),\n      format!(\"--{name}\"),\n    ])\n    .replace_env(HashMap::from([\n      (\"RUST_LOG\".to_string(), \"runtime=debug\".to_string()),\n      (\"KEY\".to_string(), \" \".to_string()),\n    ]))\n    .set_publish_all_ports(true)\n    .set_handle(handle(name))\n    .set_start_policy(StartPolicy::Strict)\n    .set_log_options(Some(LogOptions {\n      action: LogAction::Forward,\n      policy: LogPolicy::Always,\n      source: LogSource::Both,\n    }))\n  };\n\n  let mut test = DockerTest::new().with_network(dockertest::Network::Isolated);\n  test.provide_container(composition(\"alice\"));\n  test.provide_container(composition(\"bob\"));\n  test.provide_container(composition(\"charlie\"));\n  test.provide_container(composition(\"dave\"));\n  test.provide_container(composition(\"eve\"));\n  test\n    .run_async(|ops| async move {\n      // Sleep until the Substrate RPC starts\n      let alice = handle(\"alice\");\n      let alice_rpc = ops.handle(&alice).host_port(9944).unwrap();\n      let alice_rpc = format!(\"http://{}:{}\", alice_rpc.0, alice_rpc.1);\n\n      // Sleep for some time\n      tokio::time::sleep(core::time::Duration::from_secs(20)).await;\n      let serai = Serai::new(alice_rpc.clone()).await.unwrap();\n\n      // Make sure the genesis is as expected\n      assert_eq!(\n        serai\n          .as_of(serai.finalized_block_by_number(0).await.unwrap().unwrap().hash())\n          .validator_sets()\n          .new_set_events()\n          .await\n          .unwrap(),\n        NETWORKS\n          .iter()\n          .copied()\n          .map(|network| ValidatorSetsEvent::NewSet {\n            set: ValidatorSet { session: Session(0), network }\n          })\n          .collect::<Vec<_>>(),\n      );\n\n      // genesis accounts\n      let accounts = vec![\n        insecure_pair_from_name(\"Alice\"),\n        insecure_pair_from_name(\"Bob\"),\n        insecure_pair_from_name(\"Charlie\"),\n        insecure_pair_from_name(\"Dave\"),\n        insecure_pair_from_name(\"Eve\"),\n      ];\n\n      // amounts for single key share per network\n      let key_shares = HashMap::from([\n        (NetworkId::Serai, Amount(50_000 * 10_u64.pow(8))),\n        (NetworkId::External(ExternalNetworkId::Bitcoin), Amount(1_000_000 * 10_u64.pow(8))),\n        (NetworkId::External(ExternalNetworkId::Monero), Amount(100_000 * 10_u64.pow(8))),\n        (NetworkId::External(ExternalNetworkId::Ethereum), Amount(1_000_000 * 10_u64.pow(8))),\n      ]);\n\n      // genesis participants per network\n      #[allow(clippy::redundant_closure_for_method_calls)]\n      let default_participants =\n        accounts[.. 4].to_vec().iter().map(|pair| pair.public()).collect::<Vec<_>>();\n      let mut participants = HashMap::from([\n        (NetworkId::Serai, default_participants.clone()),\n        (NetworkId::External(ExternalNetworkId::Bitcoin), default_participants.clone()),\n        (NetworkId::External(ExternalNetworkId::Monero), default_participants.clone()),\n        (NetworkId::External(ExternalNetworkId::Ethereum), default_participants),\n      ]);\n\n      // test the set rotation\n      for (i, network) in NETWORKS.into_iter().enumerate() {\n        let participants = participants.get_mut(&network).unwrap();\n\n        // we start the chain with 4 default participants that has a single key share each\n        participants.sort();\n        verify_session_and_active_validators(&serai, network, 0, participants).await;\n\n        // add 1 participant\n        let last_participant = accounts[4].clone();\n        let hash = allocate_stake(\n          &serai,\n          network,\n          key_shares[&network],\n          &last_participant,\n          i.try_into().unwrap(),\n        )\n        .await;\n        participants.push(last_participant.public());\n        // the session at which set changes becomes active\n        let activation_session = get_session_at_which_changes_activate(&serai, network, hash).await;\n\n        // set the keys if it is an external set\n        if network != NetworkId::Serai {\n          let set =\n            ExternalValidatorSet { session: Session(0), network: network.try_into().unwrap() };\n          let key_pair = get_random_key_pair();\n          let pairs = get_ordered_keys(&serai, network, &accounts).await;\n          set_keys(&serai, set, key_pair, &pairs).await;\n        }\n\n        // verify\n        participants.sort();\n        verify_session_and_active_validators(&serai, network, activation_session, participants)\n          .await;\n\n        // remove 1 participant\n        let participant_to_remove = accounts[1].clone();\n        let hash = deallocate_stake(\n          &serai,\n          network,\n          key_shares[&network],\n          &participant_to_remove,\n          i.try_into().unwrap(),\n        )\n        .await;\n        participants.swap_remove(\n          participants.iter().position(|k| *k == participant_to_remove.public()).unwrap(),\n        );\n        let activation_session = get_session_at_which_changes_activate(&serai, network, hash).await;\n\n        if network != NetworkId::Serai {\n          // set the keys if it is an external set\n          let set =\n            ExternalValidatorSet { session: Session(1), network: network.try_into().unwrap() };\n\n          // we need the whole substrate key pair to sign the batch\n          let (substrate_pair, key_pair) = {\n            let pair = insecure_pair_from_name(\"session-1-key-pair\");\n            let public = pair.public();\n\n            let mut external_key = vec![0; 33];\n            OsRng.fill_bytes(&mut external_key);\n\n            (pair, KeyPair(public, external_key.try_into().unwrap()))\n          };\n          let pairs = get_ordered_keys(&serai, network, &accounts).await;\n          set_keys(&serai, set, key_pair, &pairs).await;\n\n          // provide a batch to complete the handover and retire the previous set\n          let mut block_hash = BlockHash([0; 32]);\n          OsRng.fill_bytes(&mut block_hash.0);\n          let batch = Batch {\n            network: network.try_into().unwrap(),\n            id: 0,\n            block: block_hash,\n            instructions: vec![],\n          };\n          publish_tx(\n            &serai,\n            &SeraiInInstructions::execute_batch(SignedBatch {\n              batch: batch.clone(),\n              signature: substrate_pair.sign(&batch_message(&batch)),\n            }),\n          )\n          .await;\n        }\n\n        // verify\n        participants.sort();\n        verify_session_and_active_validators(&serai, network, activation_session, participants)\n          .await;\n\n        // check pending deallocations\n        let pending = serai\n          .as_of_latest_finalized_block()\n          .await\n          .unwrap()\n          .validator_sets()\n          .pending_deallocations(\n            network,\n            participant_to_remove.public(),\n            Session(activation_session + 1),\n          )\n          .await\n          .unwrap();\n        assert_eq!(pending, Some(key_shares[&network]));\n      }\n    })\n    .await;\n}\n\nasync fn session_for_block(serai: &Serai, block: [u8; 32], network: NetworkId) -> u32 {\n  serai.as_of(block).validator_sets().session(network).await.unwrap().unwrap().0\n}\n\nasync fn verify_session_and_active_validators(\n  serai: &Serai,\n  network: NetworkId,\n  session: u32,\n  participants: &[Public],\n) {\n  // wait until the active session.\n  let block = tokio::time::timeout(\n    core::time::Duration::from_secs(FAST_EPOCH_DURATION * TARGET_BLOCK_TIME * 2),\n    async move {\n      loop {\n        let mut block = serai.latest_finalized_block_hash().await.unwrap();\n        if session_for_block(serai, block, network).await < session {\n          // Sleep a block\n          tokio::time::sleep(core::time::Duration::from_secs(TARGET_BLOCK_TIME)).await;\n          continue;\n        }\n        while session_for_block(serai, block, network).await > session {\n          block = serai.block(block).await.unwrap().unwrap().header.parent_hash.0;\n        }\n        assert_eq!(session_for_block(serai, block, network).await, session);\n        break block;\n      }\n    },\n  )\n  .await\n  .unwrap();\n  let serai_for_block = serai.as_of(block);\n\n  // verify session\n  let s = serai_for_block.validator_sets().session(network).await.unwrap().unwrap();\n  assert_eq!(s.0, session);\n\n  // verify participants\n  let mut validators =\n    serai_for_block.validator_sets().active_network_validators(network).await.unwrap();\n  validators.sort();\n  assert_eq!(validators, participants);\n\n  // make sure finalization continues as usual after the changes\n  let current_finalized_block = serai.latest_finalized_block().await.unwrap().header.number;\n  tokio::time::timeout(core::time::Duration::from_secs(TARGET_BLOCK_TIME * 10), async move {\n    let mut finalized_block = serai.latest_finalized_block().await.unwrap().header.number;\n    while finalized_block <= current_finalized_block + 2 {\n      tokio::time::sleep(core::time::Duration::from_secs(TARGET_BLOCK_TIME)).await;\n      finalized_block = serai.latest_finalized_block().await.unwrap().header.number;\n    }\n  })\n  .await\n  .unwrap();\n\n  // TODO: verify key shares as well?\n}\n\nasync fn get_session_at_which_changes_activate(\n  serai: &Serai,\n  network: NetworkId,\n  hash: [u8; 32],\n) -> u32 {\n  let session = session_for_block(serai, hash, network).await;\n\n  // changes should be active in the next session\n  if network == NetworkId::Serai {\n    // it takes 1 extra session for serai net to make the changes active.\n    session + 2\n  } else {\n    session + 1\n  }\n}\n"
  },
  {
    "path": "substrate/coins/pallet/Cargo.toml",
    "content": "[package]\nname = \"serai-coins-pallet\"\nversion = \"0.1.0\"\ndescription = \"Coins pallet for Serai\"\nlicense = \"AGPL-3.0-only\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/substrate/coins/pallet\"\nauthors = [\"Akil Demir <akildemir72@gmail.com>\"]\nedition = \"2021\"\nrust-version = \"1.74\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[package.metadata.cargo-machete]\nignored = [\"scale\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nscale = { package = \"parity-scale-codec\", version = \"3\", default-features = false, features = [\"derive\"] }\n\nframe-system = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nframe-support = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\n\nsp-core = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nsp-std = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nsp-runtime = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\n\npallet-transaction-payment = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\n\nserai-primitives = { path = \"../../primitives\", default-features = false, features = [\"serde\"] }\ncoins-primitives = { package = \"serai-coins-primitives\", path = \"../primitives\", default-features = false }\n\n[dev-dependencies]\nsp-io = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false, features = [\"std\"] }\n\n[features]\nstd = [\n  \"frame-system/std\",\n  \"frame-support/std\",\n\n  \"sp-core/std\",\n  \"sp-std/std\",\n  \"sp-runtime/std\",\n\n  \"pallet-transaction-payment/std\",\n\n  \"serai-primitives/std\",\n  \"coins-primitives/std\",\n]\n\ntry-runtime = [\n  \"frame-system/try-runtime\",\n  \"frame-support/try-runtime\",\n\n  \"sp-runtime/try-runtime\",\n]\n\nruntime-benchmarks = [\n  \"frame-system/runtime-benchmarks\",\n  \"frame-support/runtime-benchmarks\",\n]\n\ndefault = [\"std\"]\n"
  },
  {
    "path": "substrate/coins/pallet/LICENSE",
    "content": "AGPL-3.0-only license\n\nCopyright (c) 2023 Luke Parker\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License Version 3 as\npublished by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n"
  },
  {
    "path": "substrate/coins/pallet/src/lib.rs",
    "content": "#![cfg_attr(not(feature = \"std\"), no_std)]\n\n#[cfg(test)]\nmod mock;\n\n#[cfg(test)]\nmod tests;\n\nuse serai_primitives::{Balance, Coin, ExternalBalance, SubstrateAmount};\n\npub trait AllowMint {\n  fn is_allowed(balance: &ExternalBalance) -> bool;\n}\n\nimpl AllowMint for () {\n  fn is_allowed(_: &ExternalBalance) -> bool {\n    true\n  }\n}\n\n// TODO: Investigate why Substrate generates this\n#[allow(unreachable_patterns, clippy::cast_possible_truncation)]\n#[frame_support::pallet]\npub mod pallet {\n  use super::*;\n  use sp_std::{vec::Vec, any::TypeId};\n  use sp_core::sr25519::Public;\n  use sp_runtime::{\n    traits::{DispatchInfoOf, PostDispatchInfoOf},\n    transaction_validity::{TransactionValidityError, InvalidTransaction},\n  };\n\n  use frame_system::pallet_prelude::*;\n  use frame_support::pallet_prelude::*;\n\n  use pallet_transaction_payment::{Config as TpConfig, OnChargeTransaction};\n\n  use serai_primitives::*;\n  pub use coins_primitives as primitives;\n  use primitives::*;\n\n  type LiquidityTokensInstance = crate::Instance1;\n\n  #[pallet::config]\n  pub trait Config<I: 'static = ()>: frame_system::Config<AccountId = Public> {\n    type AllowMint: AllowMint;\n  }\n\n  #[pallet::genesis_config]\n  #[derive(Clone, Debug)]\n  pub struct GenesisConfig<T: Config<I>, I: 'static = ()> {\n    pub accounts: Vec<(T::AccountId, Balance)>,\n    pub _ignore: PhantomData<I>,\n  }\n\n  impl<T: Config<I>, I: 'static> Default for GenesisConfig<T, I> {\n    fn default() -> Self {\n      GenesisConfig { accounts: Default::default(), _ignore: Default::default() }\n    }\n  }\n\n  #[pallet::error]\n  pub enum Error<T, I = ()> {\n    AmountOverflowed,\n    NotEnoughCoins,\n    BurnWithInstructionNotAllowed,\n    MintNotAllowed,\n  }\n\n  #[pallet::event]\n  #[pallet::generate_deposit(fn deposit_event)]\n  pub enum Event<T: Config<I>, I: 'static = ()> {\n    Mint { to: Public, balance: Balance },\n    Burn { from: Public, balance: Balance },\n    BurnWithInstruction { from: Public, instruction: OutInstructionWithBalance },\n    Transfer { from: Public, to: Public, balance: Balance },\n  }\n\n  #[pallet::pallet]\n  pub struct Pallet<T, I = ()>(_);\n\n  /// The amount of coins each account has.\n  // Identity is used as the second key's hasher due to it being a non-manipulatable fixed-space\n  // ID.\n  #[pallet::storage]\n  #[pallet::getter(fn balances)]\n  pub type Balances<T: Config<I>, I: 'static = ()> =\n    StorageDoubleMap<_, Blake2_128Concat, Public, Identity, Coin, SubstrateAmount, ValueQuery>;\n\n  /// The total supply of each coin.\n  // We use Identity type here again due to reasons stated in the Balances Storage.\n  #[pallet::storage]\n  #[pallet::getter(fn supply)]\n  pub type Supply<T: Config<I>, I: 'static = ()> =\n    StorageMap<_, Identity, Coin, SubstrateAmount, ValueQuery>;\n\n  #[pallet::genesis_build]\n  impl<T: Config<I>, I: 'static> BuildGenesisConfig for GenesisConfig<T, I> {\n    fn build(&self) {\n      // initialize the supply of the coins\n      // TODO: Don't use COINS yet GenesisConfig so we can safely expand COINS\n      for c in &COINS {\n        Supply::<T, I>::set(c, 0);\n      }\n\n      // initialize the genesis accounts\n      for (account, balance) in &self.accounts {\n        Pallet::<T, I>::mint(*account, *balance).unwrap();\n      }\n    }\n  }\n\n  #[pallet::hooks]\n  impl<T: Config<I>, I: 'static> Hooks<BlockNumberFor<T>> for Pallet<T, I> {\n    fn on_initialize(_: BlockNumberFor<T>) -> Weight {\n      // burn the fees collected previous block\n      let coin = Coin::Serai;\n      let amount = Self::balance(FEE_ACCOUNT.into(), coin);\n      // we can unwrap, we are not burning more then what we have\n      // If this errors, it'll halt the runtime however (due to being called at the start of every\n      // block), requiring extra care when reviewing\n      Self::burn_internal(FEE_ACCOUNT.into(), Balance { coin, amount }).unwrap();\n      Weight::zero() // TODO\n    }\n  }\n\n  impl<T: Config<I>, I: 'static> Pallet<T, I> {\n    /// Returns the balance of a given account for `coin`.\n    pub fn balance(of: Public, coin: Coin) -> Amount {\n      Amount(Self::balances(of, coin))\n    }\n\n    fn decrease_balance_internal(from: Public, balance: Balance) -> Result<(), Error<T, I>> {\n      let coin = &balance.coin;\n\n      // sub amount from account\n      let new_amount = Self::balances(from, coin)\n        .checked_sub(balance.amount.0)\n        .ok_or(Error::<T, I>::NotEnoughCoins)?;\n\n      // save\n      if new_amount == 0 {\n        Balances::<T, I>::remove(from, coin);\n      } else {\n        Balances::<T, I>::set(from, coin, new_amount);\n      }\n      Ok(())\n    }\n\n    fn increase_balance_internal(to: Public, balance: Balance) -> Result<(), Error<T, I>> {\n      let coin = &balance.coin;\n\n      // add amount to account\n      let new_amount = Self::balances(to, coin)\n        .checked_add(balance.amount.0)\n        .ok_or(Error::<T, I>::AmountOverflowed)?;\n\n      // save\n      Balances::<T, I>::set(to, coin, new_amount);\n      Ok(())\n    }\n\n    /// Mint `balance` to the given account.\n    ///\n    /// Errors if any amount overflows.\n    pub fn mint(to: Public, balance: Balance) -> Result<(), Error<T, I>> {\n      // If the coin isn't Serai, which we're always allowed to mint, and the mint isn't explicitly\n      // allowed, error\n      if !ExternalCoin::try_from(balance.coin)\n        .map(|coin| T::AllowMint::is_allowed(&ExternalBalance { coin, amount: balance.amount }))\n        .unwrap_or(true)\n      {\n        Err(Error::<T, I>::MintNotAllowed)?;\n      }\n\n      // update the balance\n      Self::increase_balance_internal(to, balance)?;\n\n      // update the supply\n      let new_supply = Self::supply(balance.coin)\n        .checked_add(balance.amount.0)\n        .ok_or(Error::<T, I>::AmountOverflowed)?;\n      Supply::<T, I>::set(balance.coin, new_supply);\n\n      Self::deposit_event(Event::Mint { to, balance });\n      Ok(())\n    }\n\n    /// Burn `balance` from the specified account.\n    fn burn_internal(from: Public, balance: Balance) -> Result<(), Error<T, I>> {\n      // don't waste time if amount == 0\n      if balance.amount.0 == 0 {\n        return Ok(());\n      }\n\n      // update the balance\n      Self::decrease_balance_internal(from, balance)?;\n\n      // update the supply\n      let new_supply = Self::supply(balance.coin).checked_sub(balance.amount.0).unwrap();\n      Supply::<T, I>::set(balance.coin, new_supply);\n\n      Ok(())\n    }\n\n    /// Transfer `balance` from `from` to `to`.\n    pub fn transfer_internal(\n      from: Public,\n      to: Public,\n      balance: Balance,\n    ) -> Result<(), Error<T, I>> {\n      // update balances of accounts\n      Self::decrease_balance_internal(from, balance)?;\n      Self::increase_balance_internal(to, balance)?;\n      Self::deposit_event(Event::Transfer { from, to, balance });\n      Ok(())\n    }\n  }\n\n  #[pallet::call]\n  impl<T: Config<I>, I: 'static> Pallet<T, I> {\n    #[pallet::call_index(0)]\n    #[pallet::weight((0, DispatchClass::Normal))] // TODO\n    pub fn transfer(origin: OriginFor<T>, to: Public, balance: Balance) -> DispatchResult {\n      let from = ensure_signed(origin)?;\n      Self::transfer_internal(from, to, balance)?;\n      Ok(())\n    }\n\n    /// Burn `balance` from the caller.\n    #[pallet::call_index(1)]\n    #[pallet::weight((0, DispatchClass::Normal))] // TODO\n    pub fn burn(origin: OriginFor<T>, balance: Balance) -> DispatchResult {\n      let from = ensure_signed(origin)?;\n      Self::burn_internal(from, balance)?;\n      Self::deposit_event(Event::Burn { from, balance });\n      Ok(())\n    }\n\n    /// Burn `balance` with `OutInstructionWithBalance` from the caller.\n    #[pallet::call_index(2)]\n    #[pallet::weight((0, DispatchClass::Normal))] // TODO\n    pub fn burn_with_instruction(\n      origin: OriginFor<T>,\n      instruction: OutInstructionWithBalance,\n    ) -> DispatchResult {\n      if TypeId::of::<I>() == TypeId::of::<LiquidityTokensInstance>() {\n        Err(Error::<T, I>::BurnWithInstructionNotAllowed)?;\n      }\n\n      let from = ensure_signed(origin)?;\n      Self::burn_internal(from, instruction.balance.into())?;\n      Self::deposit_event(Event::BurnWithInstruction { from, instruction });\n      Ok(())\n    }\n  }\n\n  impl<T: Config> OnChargeTransaction<T> for Pallet<T>\n  where\n    T: TpConfig,\n  {\n    type Balance = SubstrateAmount;\n    type LiquidityInfo = Option<SubstrateAmount>;\n\n    fn withdraw_fee(\n      who: &Public,\n      _call: &T::RuntimeCall,\n      _dispatch_info: &DispatchInfoOf<T::RuntimeCall>,\n      fee: Self::Balance,\n      _tip: Self::Balance,\n    ) -> Result<Self::LiquidityInfo, TransactionValidityError> {\n      if fee == 0 {\n        return Ok(None);\n      }\n\n      let balance = Balance { coin: Coin::Serai, amount: Amount(fee) };\n      match Self::transfer_internal(*who, FEE_ACCOUNT.into(), balance) {\n        Err(_) => Err(InvalidTransaction::Payment)?,\n        Ok(()) => Ok(Some(fee)),\n      }\n    }\n\n    fn can_withdraw_fee(\n      who: &Public,\n      _call: &T::RuntimeCall,\n      _dispatch_info: &DispatchInfoOf<T::RuntimeCall>,\n      fee: Self::Balance,\n      _tip: Self::Balance,\n    ) -> Result<(), TransactionValidityError> {\n      if fee == 0 {\n        return Ok(());\n      }\n      if Self::balance(*who, Coin::Serai).0 < fee {\n        Err(InvalidTransaction::Payment)?;\n      }\n      Ok(())\n    }\n\n    fn correct_and_deposit_fee(\n      who: &Public,\n      _dispatch_info: &DispatchInfoOf<T::RuntimeCall>,\n      _post_info: &PostDispatchInfoOf<T::RuntimeCall>,\n      corrected_fee: Self::Balance,\n      _tip: Self::Balance,\n      already_withdrawn: Self::LiquidityInfo,\n    ) -> Result<(), TransactionValidityError> {\n      if let Some(paid) = already_withdrawn {\n        let refund_amount = paid.saturating_sub(corrected_fee);\n        let balance = Balance { coin: Coin::Serai, amount: Amount(refund_amount) };\n        Self::transfer_internal(FEE_ACCOUNT.into(), *who, balance)\n          .map_err(|_| TransactionValidityError::Invalid(InvalidTransaction::Payment))?;\n      }\n      Ok(())\n    }\n  }\n}\n\npub use pallet::*;\n"
  },
  {
    "path": "substrate/coins/pallet/src/mock.rs",
    "content": "//! Test environment for Coins pallet.\n\nuse super::*;\n\nuse frame_support::{construct_runtime, derive_impl};\n\nuse sp_core::sr25519::Public;\nuse sp_runtime::{traits::IdentityLookup, BuildStorage};\n\nuse crate as coins;\n\ntype Block = frame_system::mocking::MockBlock<Test>;\n\nconstruct_runtime!(\n  pub enum Test\n  {\n    System: frame_system,\n    Coins: coins,\n  }\n);\n\n#[derive_impl(frame_system::config_preludes::TestDefaultConfig)]\nimpl frame_system::Config for Test {\n  type AccountId = Public;\n  type Lookup = IdentityLookup<Self::AccountId>;\n  type Block = Block;\n}\n\nimpl Config for Test {\n  type AllowMint = ();\n}\n\npub(crate) fn new_test_ext() -> sp_io::TestExternalities {\n  let mut t = frame_system::GenesisConfig::<Test>::default().build_storage().unwrap();\n\n  crate::GenesisConfig::<Test> { accounts: vec![], _ignore: Default::default() }\n    .assimilate_storage(&mut t)\n    .unwrap();\n\n  let mut ext = sp_io::TestExternalities::new(t);\n  ext.execute_with(|| System::set_block_number(0));\n  ext\n}\n"
  },
  {
    "path": "substrate/coins/pallet/src/tests.rs",
    "content": "use crate::{mock::*, primitives::*};\n\nuse frame_system::RawOrigin;\nuse sp_core::Pair;\n\nuse serai_primitives::*;\n\npub type CoinsEvent = crate::Event<Test, ()>;\n\n#[test]\nfn mint() {\n  new_test_ext().execute_with(|| {\n    // minting u64::MAX should work\n    let coin = Coin::Serai;\n    let to = insecure_pair_from_name(\"random1\").public();\n    let balance = Balance { coin, amount: Amount(u64::MAX) };\n\n    Coins::mint(to, balance).unwrap();\n    assert_eq!(Coins::balance(to, coin), balance.amount);\n\n    // minting more should fail\n    assert!(Coins::mint(to, Balance { coin, amount: Amount(1) }).is_err());\n\n    // supply now should be equal to sum of the accounts balance sum\n    assert_eq!(Coins::supply(coin), balance.amount.0);\n\n    // test events\n    let mint_events = System::events()\n      .iter()\n      .filter_map(|event| {\n        if let RuntimeEvent::Coins(e) = &event.event {\n          if matches!(e, CoinsEvent::Mint { .. }) {\n            Some(e.clone())\n          } else {\n            None\n          }\n        } else {\n          None\n        }\n      })\n      .collect::<Vec<_>>();\n\n    assert_eq!(mint_events, vec![CoinsEvent::Mint { to, balance }]);\n  })\n}\n\n#[test]\nfn burn_with_instruction() {\n  new_test_ext().execute_with(|| {\n    // mint some coin\n    let coin = Coin::External(ExternalCoin::Bitcoin);\n    let to = insecure_pair_from_name(\"random1\").public();\n    let balance = Balance { coin, amount: Amount(10 * 10u64.pow(coin.decimals())) };\n\n    Coins::mint(to, balance).unwrap();\n    assert_eq!(Coins::balance(to, coin), balance.amount);\n    assert_eq!(Coins::supply(coin), balance.amount.0);\n\n    // we shouldn't be able to burn more than what we have\n    let mut instruction = OutInstructionWithBalance {\n      instruction: OutInstruction { address: ExternalAddress::new(vec![]).unwrap(), data: None },\n      balance: ExternalBalance {\n        coin: coin.try_into().unwrap(),\n        amount: Amount(balance.amount.0 + 1),\n      },\n    };\n    assert!(\n      Coins::burn_with_instruction(RawOrigin::Signed(to).into(), instruction.clone()).is_err()\n    );\n\n    // it should now work\n    instruction.balance.amount = balance.amount;\n    Coins::burn_with_instruction(RawOrigin::Signed(to).into(), instruction.clone()).unwrap();\n\n    // balance & supply now should be back to 0\n    assert_eq!(Coins::balance(to, coin), Amount(0));\n    assert_eq!(Coins::supply(coin), 0);\n\n    let burn_events = System::events()\n      .iter()\n      .filter_map(|event| {\n        if let RuntimeEvent::Coins(e) = &event.event {\n          if matches!(e, CoinsEvent::BurnWithInstruction { .. }) {\n            Some(e.clone())\n          } else {\n            None\n          }\n        } else {\n          None\n        }\n      })\n      .collect::<Vec<_>>();\n\n    assert_eq!(burn_events, vec![CoinsEvent::BurnWithInstruction { from: to, instruction }]);\n  })\n}\n\n#[test]\nfn transfer() {\n  new_test_ext().execute_with(|| {\n    // mint some coin\n    let coin = Coin::External(ExternalCoin::Bitcoin);\n    let from = insecure_pair_from_name(\"random1\").public();\n    let balance = Balance { coin, amount: Amount(10 * 10u64.pow(coin.decimals())) };\n\n    Coins::mint(from, balance).unwrap();\n    assert_eq!(Coins::balance(from, coin), balance.amount);\n    assert_eq!(Coins::supply(coin), balance.amount.0);\n\n    // we can't send more than what we have\n    let to = insecure_pair_from_name(\"random2\").public();\n    assert!(Coins::transfer(\n      RawOrigin::Signed(from).into(),\n      to,\n      Balance { coin, amount: Amount(balance.amount.0 + 1) }\n    )\n    .is_err());\n\n    // we can send it all\n    Coins::transfer(RawOrigin::Signed(from).into(), to, balance).unwrap();\n\n    // check the balances\n    assert_eq!(Coins::balance(from, coin), Amount(0));\n    assert_eq!(Coins::balance(to, coin), balance.amount);\n\n    // supply shouldn't change\n    assert_eq!(Coins::supply(coin), balance.amount.0);\n  })\n}\n"
  },
  {
    "path": "substrate/coins/primitives/Cargo.toml",
    "content": "[package]\nname = \"serai-coins-primitives\"\nversion = \"0.1.0\"\ndescription = \"Serai coins primitives\"\nlicense = \"MIT\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nedition = \"2021\"\nrust-version = \"1.74\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nzeroize = { version = \"^1.5\", features = [\"derive\"], optional = true }\n\nborsh = { version = \"1\", default-features = false, features = [\"derive\", \"de_strict_order\"], optional = true }\nserde = { version = \"1\", default-features = false, features = [\"derive\", \"alloc\"], optional = true }\n\nscale = { package = \"parity-scale-codec\", version = \"3\", default-features = false, features = [\"derive\"] }\n\nserai-primitives = { path = \"../../primitives\", default-features = false }\n\n[dev-dependencies]\nsp-runtime = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\n\n[features]\nstd = [\"zeroize\", \"borsh?/std\", \"serde?/std\", \"scale/std\", \"sp-runtime/std\", \"serai-primitives/std\"]\nborsh = [\"dep:borsh\", \"serai-primitives/borsh\"]\nserde = [\"dep:serde\", \"serai-primitives/serde\"]\ndefault = [\"std\"]\n"
  },
  {
    "path": "substrate/coins/primitives/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2023 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "substrate/coins/primitives/src/lib.rs",
    "content": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![cfg_attr(docsrs, feature(doc_cfg))]\n#![cfg_attr(not(feature = \"std\"), no_std)]\n#![expect(clippy::cast_possible_truncation)]\n\n#[cfg(feature = \"std\")]\nuse zeroize::Zeroize;\n\n#[cfg(feature = \"borsh\")]\nuse borsh::{BorshSerialize, BorshDeserialize};\n#[cfg(feature = \"serde\")]\nuse serde::{Serialize, Deserialize};\n\nuse scale::{Encode, Decode, DecodeWithMemTracking, MaxEncodedLen};\n\nuse serai_primitives::{system_address, Data, ExternalAddress, ExternalBalance, SeraiAddress};\n\npub const FEE_ACCOUNT: SeraiAddress = system_address(b\"Coins-fees\");\n\n#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen)]\n#[cfg_attr(feature = \"std\", derive(Zeroize))]\n#[cfg_attr(feature = \"borsh\", derive(BorshSerialize, BorshDeserialize))]\n#[cfg_attr(feature = \"serde\", derive(Serialize, Deserialize))]\npub struct OutInstruction {\n  pub address: ExternalAddress,\n  pub data: Option<Data>,\n}\n\n#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen)]\n#[cfg_attr(feature = \"std\", derive(Zeroize))]\n#[cfg_attr(feature = \"borsh\", derive(BorshSerialize, BorshDeserialize))]\n#[cfg_attr(feature = \"serde\", derive(Serialize, Deserialize))]\npub struct OutInstructionWithBalance {\n  pub instruction: OutInstruction,\n  pub balance: ExternalBalance,\n}\n\n#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen)]\n#[cfg_attr(feature = \"std\", derive(Zeroize))]\n#[cfg_attr(feature = \"borsh\", derive(BorshSerialize, BorshDeserialize))]\n#[cfg_attr(feature = \"serde\", derive(Serialize, Deserialize))]\npub enum Destination {\n  Native(SeraiAddress),\n  External(OutInstruction),\n}\n\n#[test]\nfn address() {\n  use sp_runtime::traits::TrailingZeroInput;\n  assert_eq!(\n    FEE_ACCOUNT,\n    SeraiAddress::decode(&mut TrailingZeroInput::new(b\"Coins-fees\")).unwrap()\n  );\n}\n"
  },
  {
    "path": "substrate/dex/pallet/Cargo.toml",
    "content": "[package]\nname = \"serai-dex-pallet\"\nversion = \"0.1.0\"\ndescription = \"DEX pallet for Serai\"\nlicense = \"AGPL-3.0-only\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/substrate/dex/pallet\"\nauthors = [\"Parity Technologies <admin@parity.io>, Akil Demir <akildemir72@gmail.com>\"]\nedition = \"2021\"\nrust-version = \"1.74\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[package.metadata.cargo-machete]\nignored = [\"scale\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nscale = { package = \"parity-scale-codec\", version = \"3.6.1\", default-features = false }\n\nsp-std = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nsp-io = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nsp-api = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nsp-runtime = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nsp-core = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\n\nframe-system = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nframe-support = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nframe-benchmarking = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false, optional = true }\n\ncoins-pallet = { package = \"serai-coins-pallet\", path = \"../../coins/pallet\", default-features = false }\n\nserai-primitives = { path = \"../../primitives\", default-features = false }\n\n[dev-dependencies]\nrand_core = { version = \"0.6\", default-features = false, features = [\"getrandom\"] }\n\n[features]\ndefault = [\"std\"]\nstd = [\n  \"scale/std\",\n\n  \"sp-std/std\",\n  \"sp-io/std\",\n  \"sp-api/std\",\n  \"sp-runtime/std\",\n  \"sp-core/std\",\n\n  \"serai-primitives/std\",\n\n  \"frame-system/std\",\n  \"frame-support/std\",\n  \"frame-benchmarking?/std\",\n\n  \"coins-pallet/std\",\n]\nruntime-benchmarks = [\n  \"sp-runtime/runtime-benchmarks\",\n\n  \"frame-system/runtime-benchmarks\",\n  \"frame-support/runtime-benchmarks\",\n  \"frame-benchmarking/runtime-benchmarks\",\n]\ntry-runtime = [\n  \"sp-runtime/try-runtime\",\n\n  \"frame-system/try-runtime\",\n  \"frame-support/try-runtime\",\n]\n"
  },
  {
    "path": "substrate/dex/pallet/LICENSE-AGPL3",
    "content": "AGPL-3.0-only license\n\nCopyright (c) 2023 Luke Parker\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License Version 3 as\npublished by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n"
  },
  {
    "path": "substrate/dex/pallet/LICENSE-APACHE2",
    "content": "                                Apache License\n                        Version 2.0, January 2004\n                    http://www.apache.org/licenses/\n\nTERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n1. Definitions.\n\n    \"License\" shall mean the terms and conditions for use, reproduction,\n    and distribution as defined by Sections 1 through 9 of this document.\n\n    \"Licensor\" shall mean the copyright owner or entity authorized by\n    the copyright owner that is granting the License.\n\n    \"Legal Entity\" shall mean the union of the acting entity and all\n    other entities that control, are controlled by, or are under common\n    control with that entity. For the purposes of this definition,\n    \"control\" means (i) the power, direct or indirect, to cause the\n    direction or management of such entity, whether by contract or\n    otherwise, or (ii) ownership of fifty percent (50%) or more of the\n    outstanding shares, or (iii) beneficial ownership of such entity.\n\n    \"You\" (or \"Your\") shall mean an individual or Legal Entity\n    exercising permissions granted by this License.\n\n    \"Source\" form shall mean the preferred form for making modifications,\n    including but not limited to software source code, documentation\n    source, and configuration files.\n\n    \"Object\" form shall mean any form resulting from mechanical\n    transformation or translation of a Source form, including but\n    not limited to compiled object code, generated documentation,\n    and conversions to other media types.\n\n    \"Work\" shall mean the work of authorship, whether in Source or\n    Object form, made available under the License, as indicated by a\n    copyright notice that is included in or attached to the work\n    (an example is provided in the Appendix below).\n\n    \"Derivative Works\" shall mean any work, whether in Source or Object\n    form, that is based on (or derived from) the Work and for which the\n    editorial revisions, annotations, elaborations, or other modifications\n    represent, as a whole, an original work of authorship. For the purposes\n    of this License, Derivative Works shall not include works that remain\n    separable from, or merely link (or bind by name) to the interfaces of,\n    the Work and Derivative Works thereof.\n\n    \"Contribution\" shall mean any work of authorship, including\n    the original version of the Work and any modifications or additions\n    to that Work or Derivative Works thereof, that is intentionally\n    submitted to Licensor for inclusion in the Work by the copyright owner\n    or by an individual or Legal Entity authorized to submit on behalf of\n    the copyright owner. For the purposes of this definition, \"submitted\"\n    means any form of electronic, verbal, or written communication sent\n    to the Licensor or its representatives, including but not limited to\n    communication on electronic mailing lists, source code control systems,\n    and issue tracking systems that are managed by, or on behalf of, the\n    Licensor for the purpose of discussing and improving the Work, but\n    excluding communication that is conspicuously marked or otherwise\n    designated in writing by the copyright owner as \"Not a Contribution.\"\n\n    \"Contributor\" shall mean Licensor and any individual or Legal Entity\n    on behalf of whom a Contribution has been received by Licensor and\n    subsequently incorporated within the Work.\n\n2. Grant of Copyright License. Subject to the terms and conditions of\n    this License, each Contributor hereby grants to You a perpetual,\n    worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n    copyright license to reproduce, prepare Derivative Works of,\n    publicly display, publicly perform, sublicense, and distribute the\n    Work and such Derivative Works in Source or Object form.\n\n3. Grant of Patent License. Subject to the terms and conditions of\n    this License, each Contributor hereby grants to You a perpetual,\n    worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n    (except as stated in this section) patent license to make, have made,\n    use, offer to sell, sell, import, and otherwise transfer the Work,\n    where such license applies only to those patent claims licensable\n    by such Contributor that are necessarily infringed by their\n    Contribution(s) alone or by combination of their Contribution(s)\n    with the Work to which such Contribution(s) was submitted. If You\n    institute patent litigation against any entity (including a\n    cross-claim or counterclaim in a lawsuit) alleging that the Work\n    or a Contribution incorporated within the Work constitutes direct\n    or contributory patent infringement, then any patent licenses\n    granted to You under this License for that Work shall terminate\n    as of the date such litigation is filed.\n\n4. Redistribution. You may reproduce and distribute copies of the\n    Work or Derivative Works thereof in any medium, with or without\n    modifications, and in Source or Object form, provided that You\n    meet the following conditions:\n\n    (a) You must give any other recipients of the Work or\n        Derivative Works a copy of this License; and\n\n    (b) You must cause any modified files to carry prominent notices\n        stating that You changed the files; and\n\n    (c) You must retain, in the Source form of any Derivative Works\n        that You distribute, all copyright, patent, trademark, and\n        attribution notices from the Source form of the Work,\n        excluding those notices that do not pertain to any part of\n        the Derivative Works; and\n\n    (d) If the Work includes a \"NOTICE\" text file as part of its\n        distribution, then any Derivative Works that You distribute must\n        include a readable copy of the attribution notices contained\n        within such NOTICE file, excluding those notices that do not\n        pertain to any part of the Derivative Works, in at least one\n        of the following places: within a NOTICE text file distributed\n        as part of the Derivative Works; within the Source form or\n        documentation, if provided along with the Derivative Works; or,\n        within a display generated by the Derivative Works, if and\n        wherever such third-party notices normally appear. The contents\n        of the NOTICE file are for informational purposes only and\n        do not modify the License. You may add Your own attribution\n        notices within Derivative Works that You distribute, alongside\n        or as an addendum to the NOTICE text from the Work, provided\n        that such additional attribution notices cannot be construed\n        as modifying the License.\n\n    You may add Your own copyright statement to Your modifications and\n    may provide additional or different license terms and conditions\n    for use, reproduction, or distribution of Your modifications, or\n    for any such Derivative Works as a whole, provided Your use,\n    reproduction, and distribution of the Work otherwise complies with\n    the conditions stated in this License.\n\n5. Submission of Contributions. Unless You explicitly state otherwise,\n    any Contribution intentionally submitted for inclusion in the Work\n    by You to the Licensor shall be under the terms and conditions of\n    this License, without any additional terms or conditions.\n    Notwithstanding the above, nothing herein shall supersede or modify\n    the terms of any separate license agreement you may have executed\n    with Licensor regarding such Contributions.\n\n6. Trademarks. This License does not grant permission to use the trade\n    names, trademarks, service marks, or product names of the Licensor,\n    except as required for reasonable and customary use in describing the\n    origin of the Work and reproducing the content of the NOTICE file.\n\n7. Disclaimer of Warranty. Unless required by applicable law or\n    agreed to in writing, Licensor provides the Work (and each\n    Contributor provides its Contributions) on an \"AS IS\" BASIS,\n    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n    implied, including, without limitation, any warranties or conditions\n    of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n    PARTICULAR PURPOSE. You are solely responsible for determining the\n    appropriateness of using or redistributing the Work and assume any\n    risks associated with Your exercise of permissions under this License.\n\n8. Limitation of Liability. In no event and under no legal theory,\n    whether in tort (including negligence), contract, or otherwise,\n    unless required by applicable law (such as deliberate and grossly\n    negligent acts) or agreed to in writing, shall any Contributor be\n    liable to You for damages, including any direct, indirect, special,\n    incidental, or consequential damages of any character arising as a\n    result of this License or out of the use or inability to use the\n    Work (including but not limited to damages for loss of goodwill,\n    work stoppage, computer failure or malfunction, or any and all\n    other commercial damages or losses), even if such Contributor\n    has been advised of the possibility of such damages.\n\n9. Accepting Warranty or Additional Liability. While redistributing\n    the Work or Derivative Works thereof, You may choose to offer,\n    and charge a fee for, acceptance of support, warranty, indemnity,\n    or other liability obligations and/or rights consistent with this\n    License. However, in accepting such obligations, You may act only\n    on Your own behalf and on Your sole responsibility, not on behalf\n    of any other Contributor, and only if You agree to indemnify,\n    defend, and hold each Contributor harmless for any liability\n    incurred by, or claims asserted against, such Contributor by reason\n    of your accepting any such warranty or additional liability.\n\nEND OF TERMS AND CONDITIONS\n\nAPPENDIX: How to apply the Apache License to your work.\n\n    To apply the Apache License to your work, attach the following\n    boilerplate notice, with the fields enclosed by brackets \"[]\"\n    replaced with your own identifying information. (Don't include\n    the brackets!)  The text should be enclosed in the appropriate\n    comment syntax for the file format. We also recommend that a\n    file or class name and description of purpose be included on the\n    same \"printed page\" as the copyright notice for easier\n    identification within third-party archives.\n\nCopyright [yyyy] [name of copyright owner]\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n                                    NOTE\n                            \nIndividual files contain the following tag instead of the full license\ntext.\n\n    SPDX-License-Identifier:  Apache-2.0\n\nThis enables machine processing of license information based on the SPDX\nLicense Identifiers that are here available: http://spdx.org/licenses/"
  },
  {
    "path": "substrate/dex/pallet/src/benchmarking.rs",
    "content": "// This file was originally:\n\n// Copyright (C) Parity Technologies (UK) Ltd.\n// SPDX-License-Identifier: Apache-2.0\n\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// \thttp://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n// It has been forked into a crate distributed under the AGPL 3.0.\n// Please check the current distribution for up-to-date copyright and licensing information.\n\n//! Dex pallet benchmarking.\n\nuse super::*;\nuse frame_benchmarking::{benchmarks, whitelisted_caller};\nuse frame_support::{assert_ok, storage::bounded_vec::BoundedVec};\nuse frame_system::RawOrigin as SystemOrigin;\n\nuse sp_runtime::traits::StaticLookup;\nuse sp_std::{ops::Div, prelude::*};\n\nuse serai_primitives::{Amount, Balance};\n\nuse crate::Pallet as Dex;\nuse coins_pallet::Pallet as Coins;\n\nconst INITIAL_COIN_BALANCE: u64 = 1_000_000_000;\ntype AccountIdLookupOf<T> = <<T as frame_system::Config>::Lookup as StaticLookup>::Source;\n\ntype LiquidityTokens<T> = coins_pallet::Pallet<T, coins_pallet::Instance1>;\n\nfn create_coin<T: Config>(coin: &ExternalCoin) -> (T::AccountId, AccountIdLookupOf<T>) {\n  let caller: T::AccountId = whitelisted_caller();\n  let caller_lookup = T::Lookup::unlookup(caller);\n  assert_ok!(Coins::<T>::mint(\n    caller,\n    Balance { coin: Coin::native(), amount: Amount(SubstrateAmount::MAX.div(1000u64)) }\n  ));\n  assert_ok!(Coins::<T>::mint(\n    caller,\n    Balance { coin: (*coin).into(), amount: Amount(INITIAL_COIN_BALANCE) }\n  ));\n  (caller, caller_lookup)\n}\n\nfn create_coin_and_pool<T: Config>(\n  coin: &ExternalCoin,\n) -> (ExternalCoin, T::AccountId, AccountIdLookupOf<T>) {\n  let (caller, caller_lookup) = create_coin::<T>(coin);\n  assert_ok!(Dex::<T>::create_pool(*coin));\n\n  (*coin, caller, caller_lookup)\n}\n\nbenchmarks! {\n  add_liquidity {\n    let coin1 = Coin::native();\n    let coin2 = ExternalCoin::Bitcoin;\n    let (lp_token, caller, _) = create_coin_and_pool::<T>(&coin2);\n    let add_amount: u64 = 1000;\n  }: _(\n    SystemOrigin::Signed(caller),\n    coin2,\n    1000u64,\n    add_amount,\n    0u64,\n    0u64,\n    caller\n  )\n  verify {\n    let pool_id = Dex::<T>::get_pool_id(coin1, coin2.into()).unwrap();\n    let lp_minted = Dex::<T>::calc_lp_amount_for_zero_supply(\n      add_amount,\n      1000u64,\n    ).unwrap();\n    assert_eq!(\n      LiquidityTokens::<T>::balance(caller, lp_token.into()).0,\n      lp_minted\n    );\n    assert_eq!(\n      Coins::<T>::balance(Dex::<T>::get_pool_account(pool_id), Coin::native()).0,\n      add_amount\n    );\n    assert_eq!(\n      Coins::<T>::balance(\n        Dex::<T>::get_pool_account(pool_id),\n        ExternalCoin::Bitcoin.into(),\n      ).0,\n      1000\n    );\n  }\n\n  remove_liquidity {\n    let coin1 = Coin::native();\n    let coin2 = ExternalCoin::Monero;\n    let (lp_token, caller, _) = create_coin_and_pool::<T>(&coin2);\n    let add_amount: u64 = 100;\n    let lp_minted = Dex::<T>::calc_lp_amount_for_zero_supply(\n      add_amount,\n      1000u64\n    ).unwrap();\n    let remove_lp_amount: u64 = lp_minted.checked_div(10).unwrap();\n\n    Dex::<T>::add_liquidity(\n      SystemOrigin::Signed(caller).into(),\n      coin2,\n      1000u64,\n      add_amount,\n      0u64,\n      0u64,\n      caller,\n    )?;\n    let total_supply = LiquidityTokens::<T>::supply(Coin::from(lp_token));\n  }: _(\n    SystemOrigin::Signed(caller),\n    coin2,\n    remove_lp_amount,\n    0u64,\n    0u64,\n    caller\n  )\n  verify {\n    let new_total_supply =  LiquidityTokens::<T>::supply(Coin::from(lp_token));\n    assert_eq!(\n      new_total_supply,\n      total_supply - remove_lp_amount\n    );\n  }\n\n  swap_exact_tokens_for_tokens {\n    let native = Coin::native();\n    let coin1 = ExternalCoin::Bitcoin;\n    let coin2 = ExternalCoin::Ether;\n    let (_, caller, _) = create_coin_and_pool::<T>(&coin1);\n    let (_, _) = create_coin::<T>(&coin2);\n\n    Dex::<T>::add_liquidity(\n      SystemOrigin::Signed(caller).into(),\n      coin1,\n      200u64,\n      // TODO: this call otherwise fails with `InsufficientLiquidityMinted` if we don't multiply\n      // with 3. Might be again related to their expectance on ed being > 1.\n      100 * 3,\n      0u64,\n      0u64,\n      caller,\n    )?;\n\n    let swap_amount = 100u64;\n\n    // since we only allow the native-coin pools, then the worst case scenario would be to swap\n    // coin1-native-coin2\n    Dex::<T>::create_pool(coin2)?;\n    Dex::<T>::add_liquidity(\n      SystemOrigin::Signed(caller).into(),\n      coin2,\n      1000u64,\n      500,\n      0u64,\n      0u64,\n      caller,\n    )?;\n\n    let path = vec![Coin::from(coin1), native, Coin::from(coin2)];\n    let path = BoundedVec::<_, T::MaxSwapPathLength>::try_from(path).unwrap();\n    let native_balance = Coins::<T>::balance(caller, native).0;\n    let coin1_balance = Coins::<T>::balance(caller, ExternalCoin::Bitcoin.into()).0;\n  }: _(SystemOrigin::Signed(caller), path, swap_amount, 1u64, caller)\n  verify {\n    let ed_bump = 2u64;\n    let new_coin1_balance = Coins::<T>::balance(caller, ExternalCoin::Bitcoin.into()).0;\n    assert_eq!(new_coin1_balance, coin1_balance - 100u64);\n  }\n\n  swap_tokens_for_exact_tokens {\n    let native = Coin::native();\n    let coin1 = ExternalCoin::Bitcoin;\n    let coin2 = ExternalCoin::Ether;\n    let (_, caller, _) = create_coin_and_pool::<T>(&coin1);\n    let (_, _) = create_coin::<T>(&coin2);\n\n    Dex::<T>::add_liquidity(\n      SystemOrigin::Signed(caller).into(),\n      coin1,\n      500u64,\n      1000,\n      0u64,\n      0u64,\n      caller,\n    )?;\n\n    // since we only allow the native-coin pools, then the worst case scenario would be to swap\n    // coin1-native-coin2\n    Dex::<T>::create_pool(coin2)?;\n    Dex::<T>::add_liquidity(\n      SystemOrigin::Signed(caller).into(),\n      coin2,\n      1000u64,\n      500,\n      0u64,\n      0u64,\n      caller,\n    )?;\n    let path = vec![Coin::from(coin1), native, Coin::from(coin2)];\n\n    let path: BoundedVec<_, T::MaxSwapPathLength> = BoundedVec::try_from(path).unwrap();\n    let coin2_balance = Coins::<T>::balance(caller, ExternalCoin::Ether.into()).0;\n  }: _(\n    SystemOrigin::Signed(caller),\n    path.clone(),\n    100u64,\n    1000,\n    caller\n  )\n  verify {\n    let new_coin2_balance = Coins::<T>::balance(caller, ExternalCoin::Ether.into()).0;\n    assert_eq!(new_coin2_balance, coin2_balance + 100u64);\n  }\n\n  impl_benchmark_test_suite!(Dex, crate::mock::new_test_ext(), crate::mock::Test);\n}\n"
  },
  {
    "path": "substrate/dex/pallet/src/lib.rs",
    "content": "// This file was originally:\n\n// Copyright (C) Parity Technologies (UK) Ltd.\n// SPDX-License-Identifier: Apache-2.0\n\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// \thttp://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n// It has been forked into a crate distributed under the AGPL 3.0.\n// Please check the current distribution for up-to-date copyright and licensing information.\n\n//! # Serai Dex pallet\n//!\n//! Serai Dex pallet based on the [Uniswap V2](https://github.com/Uniswap/v2-core) logic.\n//!\n//! ## Overview\n//!\n//! This pallet allows you to:\n//!\n//!  - [create a liquidity pool](`Pallet::create_pool()`) for 2 coins\n//!  - [provide the liquidity](`Pallet::add_liquidity()`) and receive back an LP token\n//!  - [exchange the LP token back to coins](`Pallet::remove_liquidity()`)\n//!  - [swap a specific amount of coins for another](`Pallet::swap_exact_tokens_for_tokens()`) if\n//!    there is a pool created, or\n//!  - [swap some coins for a specific amount of\n//!    another](`Pallet::swap_tokens_for_exact_tokens()`).\n//!  - [query for an exchange price](`DexApi::quote_price_exact_tokens_for_tokens`) via\n//!    a runtime call endpoint\n//!  - [query the size of a liquidity pool](`DexApi::get_reserves`) via a runtime api\n//!    endpoint.\n//!\n//! The `quote_price_exact_tokens_for_tokens` and `quote_price_tokens_for_exact_tokens` functions\n//! both take a path parameter of the route to take. If you want to swap from native coin to\n//! non-native coin 1, you would pass in a path of `[DOT, 1]` or `[1, DOT]`. If you want to swap\n//! from non-native coin 1 to non-native coin 2, you would pass in a path of `[1, DOT, 2]`.\n//!\n//! (For an example of configuring this pallet to use `MultiLocation` as an coin id, see the\n//! cumulus repo).\n//!\n//! Here is an example `state_call` that asks for a quote of a pool of native versus coin 1:\n//!\n//! ```text\n//! curl -sS -H \"Content-Type: application/json\" -d \\\n//! '{\n//!    \"id\": 1,\n//!    \"jsonrpc\": \"2.0\",\n//!    \"method\": \"state_call\",\n//!    \"params\": [\n//!      \"DexApi_quote_price_tokens_for_exact_tokens\",\n//!      \"0x0101000000000000000000000011000000000000000000\"\n//!    ]\n//! }' \\\n//! http://localhost:9933/\n//! ```\n//! (This can be run against the kitchen sync node in the `node` folder of this repo.)\n\n#![deny(missing_docs)]\n#![cfg_attr(not(feature = \"std\"), no_std)]\n\nuse frame_support::traits::DefensiveOption;\n\n#[cfg(feature = \"runtime-benchmarks\")]\nmod benchmarking;\n\nmod types;\npub mod weights;\n\n#[cfg(test)]\nmod tests;\n\n#[cfg(test)]\nmod mock;\n\nuse frame_support::{ensure, pallet_prelude::*, BoundedBTreeSet};\nuse frame_system::{\n  pallet_prelude::{BlockNumberFor, OriginFor},\n  ensure_signed,\n};\n\npub use pallet::*;\n\nuse sp_runtime::{\n  traits::{TrailingZeroInput, IntegerSquareRoot},\n  DispatchError,\n};\n\nuse serai_primitives::*;\n\nuse sp_std::prelude::*;\npub use types::*;\npub use weights::WeightInfo;\n\n// TODO: Investigate why Substrate generates these\n#[allow(\n  unreachable_patterns,\n  clippy::cast_possible_truncation,\n  clippy::no_effect_underscore_binding\n)]\n#[frame_support::pallet]\npub mod pallet {\n  use super::*;\n\n  use sp_core::sr25519::Public;\n\n  use coins_pallet::{Pallet as CoinsPallet, Config as CoinsConfig};\n\n  /// Pool ID.\n  ///\n  /// The pool's `AccountId` is derived from this type. Any changes to the type may necessitate a\n  /// migration.\n  pub type PoolId = ExternalCoin;\n\n  /// LiquidityTokens Pallet as an instance of coins pallet.\n  pub type LiquidityTokens<T> = coins_pallet::Pallet<T, coins_pallet::Instance1>;\n\n  /// A type used for amount conversions.\n  pub type HigherPrecisionBalance = u128;\n\n  #[pallet::pallet]\n  pub struct Pallet<T>(_);\n\n  #[pallet::config]\n  pub trait Config:\n    frame_system::Config<AccountId = Public>\n    + CoinsConfig\n    + coins_pallet::Config<coins_pallet::Instance1>\n  {\n    /// A % the liquidity providers will take of every swap. Represents 10ths of a percent.\n    #[pallet::constant]\n    type LPFee: Get<u32>;\n\n    /// The minimum LP token amount that could be minted. Ameliorates rounding errors.\n    #[pallet::constant]\n    type MintMinLiquidity: Get<SubstrateAmount>;\n\n    /// The max number of hops in a swap.\n    #[pallet::constant]\n    type MaxSwapPathLength: Get<u32>;\n\n    /// Last N number of blocks that oracle keeps track of the prices.\n    #[pallet::constant]\n    type MedianPriceWindowLength: Get<u16>;\n\n    /// Weight information for extrinsics in this pallet.\n    type WeightInfo: WeightInfo;\n  }\n\n  /// Map from `PoolId` to `()`. This establishes whether a pool has been officially\n  /// created rather than people sending tokens directly to a pool's public account.\n  #[pallet::storage]\n  pub type Pools<T: Config> = StorageMap<_, Blake2_128Concat, PoolId, (), OptionQuery>;\n\n  #[pallet::storage]\n  #[pallet::getter(fn spot_price_for_block)]\n  pub type SpotPriceForBlock<T: Config> =\n    StorageDoubleMap<_, Identity, BlockNumberFor<T>, Identity, ExternalCoin, Amount, OptionQuery>;\n\n  /// Moving window of prices from each block.\n  ///\n  /// The [u8; 8] key is the amount's big endian bytes, and u16 is the amount of inclusions in this\n  /// multi-set. Since the underlying map is lexicographically sorted, this map stores amounts from\n  /// low to high.\n  #[pallet::storage]\n  pub type SpotPrices<T: Config> =\n    StorageDoubleMap<_, Identity, ExternalCoin, Identity, [u8; 8], u16, OptionQuery>;\n\n  // SpotPrices, yet with keys stored in reverse lexicographic order.\n  #[pallet::storage]\n  pub type ReverseSpotPrices<T: Config> =\n    StorageDoubleMap<_, Identity, ExternalCoin, Identity, [u8; 8], (), OptionQuery>;\n\n  /// Current length of the `SpotPrices` map.\n  #[pallet::storage]\n  pub type SpotPricesLength<T: Config> = StorageMap<_, Identity, ExternalCoin, u16, OptionQuery>;\n\n  /// Current position of the median within the `SpotPrices` map;\n  #[pallet::storage]\n  pub type CurrentMedianPosition<T: Config> =\n    StorageMap<_, Identity, ExternalCoin, u16, OptionQuery>;\n\n  /// Current median price of the prices in the `SpotPrices` map at any given time.\n  #[pallet::storage]\n  #[pallet::getter(fn median_price)]\n  pub type MedianPrice<T: Config> = StorageMap<_, Identity, ExternalCoin, Amount, OptionQuery>;\n\n  /// The price used for evaluating economic security, which is the highest observed median price.\n  #[pallet::storage]\n  #[pallet::getter(fn security_oracle_value)]\n  pub type SecurityOracleValue<T: Config> =\n    StorageMap<_, Identity, ExternalCoin, Amount, OptionQuery>;\n\n  /// Total swap volume of a given pool in terms of SRI.\n  #[pallet::storage]\n  #[pallet::getter(fn swap_volume)]\n  pub type SwapVolume<T: Config> = StorageMap<_, Identity, PoolId, u64, OptionQuery>;\n\n  impl<T: Config> Pallet<T> {\n    fn restore_median(\n      coin: ExternalCoin,\n      mut current_median_pos: u16,\n      mut current_median: Amount,\n      length: u16,\n    ) {\n      // 1 -> 0 (the only value)\n      // 2 -> 1 (the higher element), 4 -> 2 (the higher element)\n      // 3 -> 1 (the true median)\n      let target_median_pos = length / 2;\n      while current_median_pos < target_median_pos {\n        // Get the amount of presences for the current element\n        let key = current_median.0.to_be_bytes();\n        let presences = SpotPrices::<T>::get(coin, key).unwrap();\n        // > is correct, not >=.\n        // Consider:\n        // - length = 1, current_median_pos = 0, presences = 1, target_median_pos = 0\n        // - length = 2, current_median_pos = 0, presences = 2, target_median_pos = 1\n        // - length = 2, current_median_pos = 0, presences = 1, target_median_pos = 1\n        if (current_median_pos + presences) > target_median_pos {\n          break;\n        }\n        current_median_pos += presences;\n\n        let key = SpotPrices::<T>::hashed_key_for(coin, key);\n        let next_price = SpotPrices::<T>::iter_key_prefix_from(coin, key).next().unwrap();\n        current_median = Amount(u64::from_be_bytes(next_price));\n      }\n\n      while current_median_pos > target_median_pos {\n        // Get the next element\n        let key = reverse_lexicographic_order(current_median.0.to_be_bytes());\n        let key = ReverseSpotPrices::<T>::hashed_key_for(coin, key);\n        let next_price = ReverseSpotPrices::<T>::iter_key_prefix_from(coin, key).next().unwrap();\n        let next_price = reverse_lexicographic_order(next_price);\n        current_median = Amount(u64::from_be_bytes(next_price));\n\n        // Get its amount of presences\n        let presences = SpotPrices::<T>::get(coin, current_median.0.to_be_bytes()).unwrap();\n        // Adjust from next_value_first_pos to this_value_first_pos by substracting this value's\n        // amount of times present\n        current_median_pos -= presences;\n\n        if current_median_pos <= target_median_pos {\n          break;\n        }\n      }\n\n      CurrentMedianPosition::<T>::set(coin, Some(current_median_pos));\n      MedianPrice::<T>::set(coin, Some(current_median));\n    }\n\n    pub(crate) fn insert_into_median(coin: ExternalCoin, amount: Amount) {\n      let new_quantity_of_presences =\n        SpotPrices::<T>::get(coin, amount.0.to_be_bytes()).unwrap_or(0) + 1;\n      SpotPrices::<T>::set(coin, amount.0.to_be_bytes(), Some(new_quantity_of_presences));\n      if new_quantity_of_presences == 1 {\n        ReverseSpotPrices::<T>::set(\n          coin,\n          reverse_lexicographic_order(amount.0.to_be_bytes()),\n          Some(()),\n        );\n      }\n\n      let new_length = SpotPricesLength::<T>::get(coin).unwrap_or(0) + 1;\n      SpotPricesLength::<T>::set(coin, Some(new_length));\n\n      let Some(current_median) = MedianPrice::<T>::get(coin) else {\n        MedianPrice::<T>::set(coin, Some(amount));\n        CurrentMedianPosition::<T>::set(coin, Some(0));\n        return;\n      };\n\n      let mut current_median_pos = CurrentMedianPosition::<T>::get(coin).unwrap();\n      // If this is being inserted before the current median, the current median's position has\n      // increased\n      if amount < current_median {\n        current_median_pos += 1;\n      }\n      Self::restore_median(coin, current_median_pos, current_median, new_length);\n    }\n\n    pub(crate) fn remove_from_median(coin: ExternalCoin, amount: Amount) {\n      let mut current_median = MedianPrice::<T>::get(coin).unwrap();\n\n      let mut current_median_pos = CurrentMedianPosition::<T>::get(coin).unwrap();\n      if amount < current_median {\n        current_median_pos -= 1;\n      }\n\n      let new_quantity_of_presences =\n        SpotPrices::<T>::get(coin, amount.0.to_be_bytes()).unwrap() - 1;\n      if new_quantity_of_presences == 0 {\n        let normal_key = amount.0.to_be_bytes();\n        SpotPrices::<T>::remove(coin, normal_key);\n        ReverseSpotPrices::<T>::remove(coin, reverse_lexicographic_order(amount.0.to_be_bytes()));\n\n        // If we've removed the current item at this position, update to the item now at this\n        // position\n        if amount == current_median {\n          let key = SpotPrices::<T>::hashed_key_for(coin, normal_key);\n          current_median = Amount(u64::from_be_bytes(\n            SpotPrices::<T>::iter_key_prefix_from(coin, key).next().unwrap(),\n          ));\n        }\n      } else {\n        SpotPrices::<T>::set(coin, amount.0.to_be_bytes(), Some(new_quantity_of_presences));\n      }\n\n      let new_length = SpotPricesLength::<T>::get(coin).unwrap() - 1;\n      SpotPricesLength::<T>::set(coin, Some(new_length));\n\n      Self::restore_median(coin, current_median_pos, current_median, new_length);\n    }\n  }\n\n  // Pallet's events.\n  #[pallet::event]\n  #[pallet::generate_deposit(pub(super) fn deposit_event)]\n  pub enum Event<T: Config> {\n    /// A successful call of the `CreatePool` extrinsic will create this event.\n    PoolCreated {\n      /// The pool id associated with the pool. Note that the order of the coins may not be\n      /// the same as the order specified in the create pool extrinsic.\n      pool_id: PoolId,\n      /// The account ID of the pool.\n      pool_account: T::AccountId,\n    },\n\n    /// A successful call of the `AddLiquidity` extrinsic will create this event.\n    LiquidityAdded {\n      /// The account that the liquidity was taken from.\n      who: T::AccountId,\n      /// The account that the liquidity tokens were minted to.\n      mint_to: T::AccountId,\n      /// The pool id of the pool that the liquidity was added to.\n      pool_id: PoolId,\n      /// The amount of the coin that was added to the pool.\n      coin_amount: SubstrateAmount,\n      /// The amount of the SRI that was added to the pool.\n      sri_amount: SubstrateAmount,\n      /// The amount of lp tokens that were minted of that id.\n      lp_token_minted: SubstrateAmount,\n    },\n\n    /// A successful call of the `RemoveLiquidity` extrinsic will create this event.\n    LiquidityRemoved {\n      /// The account that the liquidity tokens were burned from.\n      who: T::AccountId,\n      /// The account that the coins were transferred to.\n      withdraw_to: T::AccountId,\n      /// The pool id that the liquidity was removed from.\n      pool_id: PoolId,\n      /// The amount of the first coin that was removed from the pool.\n      coin_amount: SubstrateAmount,\n      /// The amount of the second coin that was removed from the pool.\n      sri_amount: SubstrateAmount,\n      /// The amount of lp tokens that were burned of that id.\n      lp_token_burned: SubstrateAmount,\n    },\n\n    /// Coins have been converted from one to another. Both `SwapExactTokenForToken`\n    /// and `SwapTokenForExactToken` will generate this event.\n    SwapExecuted {\n      /// Which account was the instigator of the swap.\n      who: T::AccountId,\n      /// The account that the coins were transferred to.\n      send_to: T::AccountId,\n      /// The route of coin ids that the swap went through.\n      /// E.g. A -> SRI -> B\n      path: BoundedVec<Coin, T::MaxSwapPathLength>,\n      /// The amount of the first coin that was swapped.\n      amount_in: SubstrateAmount,\n      /// The amount of the second coin that was received.\n      amount_out: SubstrateAmount,\n    },\n  }\n\n  #[pallet::error]\n  pub enum Error<T> {\n    /// Provided coins are equal.\n    EqualCoins,\n    /// Pool already exists.\n    PoolExists,\n    /// Desired amount can't be zero.\n    WrongDesiredAmount,\n    /// Provided amount should be greater than or equal to the existential deposit/coin's\n    /// minimum amount.\n    CoinAmountLessThanMinimum,\n    /// Provided amount should be greater than or equal to the existential deposit/coin's\n    /// minimum amount.\n    SriAmountLessThanMinimum,\n    /// Reserve needs to always be greater than or equal to the existential deposit/coin's\n    /// minimum amount.\n    ReserveLeftLessThanMinimum,\n    /// Desired amount can't be equal to the pool reserve.\n    AmountOutTooHigh,\n    /// The pool doesn't exist.\n    PoolNotFound,\n    /// An overflow happened.\n    Overflow,\n    /// The minimum amount requirement for the first token in the pair wasn't met.\n    CoinOneDepositDidNotMeetMinimum,\n    /// The minimum amount requirement for the second token in the pair wasn't met.\n    CoinTwoDepositDidNotMeetMinimum,\n    /// The minimum amount requirement for the first token in the pair wasn't met.\n    CoinOneWithdrawalDidNotMeetMinimum,\n    /// The minimum amount requirement for the second token in the pair wasn't met.\n    CoinTwoWithdrawalDidNotMeetMinimum,\n    /// Optimal calculated amount is less than desired.\n    OptimalAmountLessThanDesired,\n    /// Insufficient liquidity minted.\n    InsufficientLiquidityMinted,\n    /// Requested liquidity can't be zero.\n    ZeroLiquidity,\n    /// Amount can't be zero.\n    ZeroAmount,\n    /// Calculated amount out is less than provided minimum amount.\n    ProvidedMinimumNotSufficientForSwap,\n    /// Provided maximum amount is not sufficient for swap.\n    ProvidedMaximumNotSufficientForSwap,\n    /// The provided path must consists of 2 coins at least.\n    InvalidPath,\n    /// It was not possible to calculate path data.\n    PathError,\n    /// The provided path must consists of unique coins.\n    NonUniquePath,\n    /// Unable to find an element in an array/vec that should have one-to-one correspondence\n    /// with another. For example, an array of coins constituting a `path` should have a\n    /// corresponding array of `amounts` along the path.\n    CorrespondenceError,\n  }\n\n  #[pallet::hooks]\n  impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {\n    fn on_finalize(n: BlockNumberFor<T>) {\n      // we run this on on_finalize because we want to use the last price of the block for a coin.\n      // This prevents the exploit where a malicious block proposer spikes the price in either\n      // direction, then includes a swap in the other direction (ensuring they don't get arbitraged\n      // against)\n      // Since they'll have to leave the spike present at the end of the block, making the next\n      // block the one to include any arbitrage transactions (which there's no guarantee they'll\n      // produce), this cannot be done in a way without significant risk\n      for coin in Pools::<T>::iter_keys() {\n        // insert the new price to our oracle window\n        // The spot price for 1 coin, in atomic units, to SRI is used\n        let sri_per_coin =\n          if let Ok((sri_balance, coin_balance)) = Self::get_reserves(&Coin::Serai, &coin.into()) {\n            // We use 1 coin to handle rounding errors which may occur with atomic units\n            // If we used atomic units, any coin whose atomic unit is worth less than SRI's atomic\n            // unit would cause a 'price' of 0\n            // If the decimals aren't large enough to provide sufficient buffer, use 10,000\n            let coin_decimals = coin.decimals().max(5);\n            let accuracy_increase =\n              HigherPrecisionBalance::from(SubstrateAmount::pow(10, coin_decimals));\n            u64::try_from(\n              accuracy_increase * HigherPrecisionBalance::from(sri_balance) /\n                HigherPrecisionBalance::from(coin_balance),\n            )\n            .unwrap_or(u64::MAX)\n          } else {\n            0\n          };\n\n        let sri_per_coin = Amount(sri_per_coin);\n        SpotPriceForBlock::<T>::set(n, coin, Some(sri_per_coin));\n        Self::insert_into_median(coin, sri_per_coin);\n        if SpotPricesLength::<T>::get(coin).unwrap() > T::MedianPriceWindowLength::get() {\n          let old = n - T::MedianPriceWindowLength::get().into();\n          let old_price = SpotPriceForBlock::<T>::get(old, coin).unwrap();\n          SpotPriceForBlock::<T>::remove(old, coin);\n          Self::remove_from_median(coin, old_price);\n        }\n\n        // update the oracle value\n        let median = Self::median_price(coin).unwrap_or(Amount(0));\n        let oracle_value = Self::security_oracle_value(coin).unwrap_or(Amount(0));\n        if median > oracle_value {\n          SecurityOracleValue::<T>::set(coin, Some(median));\n        }\n      }\n    }\n  }\n\n  impl<T: Config> Pallet<T> {\n    /// Creates an empty liquidity pool and an associated new `lp_token` coin\n    /// (the id of which is returned in the `Event::PoolCreated` event).\n    ///\n    /// Once a pool is created, someone may [`Pallet::add_liquidity`] to it.\n    pub(crate) fn create_pool(coin: ExternalCoin) -> DispatchResult {\n      // get pool_id\n      let pool_id = Self::get_pool_id(coin.into(), Coin::native())?;\n      ensure!(!Pools::<T>::contains_key(pool_id), Error::<T>::PoolExists);\n\n      let pool_account = Self::get_pool_account(pool_id);\n      frame_system::Pallet::<T>::inc_providers(&pool_account);\n\n      Pools::<T>::insert(pool_id, ());\n      Self::deposit_event(Event::PoolCreated { pool_id, pool_account });\n      Ok(())\n    }\n\n    /// A hook to be called whenever a network's session is rotated.\n    pub fn on_new_session(network: NetworkId) {\n      // Only track the price for non-SRI coins as this is SRI denominated\n      if let NetworkId::External(n) = network {\n        for coin in n.coins() {\n          SecurityOracleValue::<T>::set(coin, Self::median_price(coin));\n        }\n      }\n    }\n  }\n\n  /// Pallet's callable functions.\n  #[pallet::call]\n  impl<T: Config> Pallet<T> {\n    /// Provide liquidity into the pool of `coin1` and `coin2`.\n    /// NOTE: an optimal amount of coin1 and coin2 will be calculated and\n    /// might be different than the provided `amount1_desired`/`amount2_desired`\n    /// thus you should provide the min amount you're happy to provide.\n    /// Params `amount1_min`/`amount2_min` represent that.\n    /// `mint_to` will be sent the liquidity tokens that represent this share of the pool.\n    ///\n    /// Once liquidity is added, someone may successfully call\n    /// [`Pallet::swap_exact_tokens_for_tokens`] successfully.\n    #[pallet::call_index(0)]\n    #[pallet::weight(T::WeightInfo::add_liquidity())]\n    #[allow(clippy::too_many_arguments)]\n    pub fn add_liquidity(\n      origin: OriginFor<T>,\n      coin: ExternalCoin,\n      coin_desired: SubstrateAmount,\n      sri_desired: SubstrateAmount,\n      coin_min: SubstrateAmount,\n      sri_min: SubstrateAmount,\n      mint_to: T::AccountId,\n    ) -> DispatchResult {\n      let sender = ensure_signed(origin)?;\n      ensure!((sri_desired > 0) && (coin_desired > 0), Error::<T>::WrongDesiredAmount);\n\n      let pool_id = Self::get_pool_id(coin.into(), Coin::native())?;\n\n      // create the pool if it doesn't exist. We can just attempt to do that because our checks\n      // far enough to allow that.\n      if Pools::<T>::get(pool_id).is_none() {\n        Self::create_pool(coin)?;\n      }\n      let pool_account = Self::get_pool_account(pool_id);\n\n      let sri_reserve = Self::get_balance(&pool_account, Coin::Serai);\n      let coin_reserve = Self::get_balance(&pool_account, coin.into());\n\n      let sri_amount: SubstrateAmount;\n      let coin_amount: SubstrateAmount;\n      if (sri_reserve == 0) || (coin_reserve == 0) {\n        sri_amount = sri_desired;\n        coin_amount = coin_desired;\n      } else {\n        let coin_optimal = Self::quote(sri_desired, sri_reserve, coin_reserve)?;\n\n        if coin_optimal <= coin_desired {\n          ensure!(coin_optimal >= coin_min, Error::<T>::CoinTwoDepositDidNotMeetMinimum);\n          sri_amount = sri_desired;\n          coin_amount = coin_optimal;\n        } else {\n          let sri_optimal = Self::quote(coin_desired, coin_reserve, sri_reserve)?;\n          ensure!(sri_optimal <= sri_desired, Error::<T>::OptimalAmountLessThanDesired);\n          ensure!(sri_optimal >= sri_min, Error::<T>::CoinOneDepositDidNotMeetMinimum);\n          sri_amount = sri_optimal;\n          coin_amount = coin_desired;\n        }\n      }\n\n      ensure!(sri_amount.saturating_add(sri_reserve) >= 1, Error::<T>::SriAmountLessThanMinimum);\n      ensure!(coin_amount.saturating_add(coin_reserve) >= 1, Error::<T>::CoinAmountLessThanMinimum);\n\n      Self::transfer(\n        &sender,\n        &pool_account,\n        Balance { coin: Coin::Serai, amount: Amount(sri_amount) },\n      )?;\n      Self::transfer(\n        &sender,\n        &pool_account,\n        Balance { coin: coin.into(), amount: Amount(coin_amount) },\n      )?;\n\n      let total_supply = LiquidityTokens::<T>::supply(Coin::from(coin));\n\n      let lp_token_amount: SubstrateAmount;\n      if total_supply == 0 {\n        lp_token_amount = Self::calc_lp_amount_for_zero_supply(sri_amount, coin_amount)?;\n        LiquidityTokens::<T>::mint(\n          pool_account,\n          Balance { coin: coin.into(), amount: Amount(T::MintMinLiquidity::get()) },\n        )?;\n      } else {\n        let side1 = Self::mul_div(sri_amount, total_supply, sri_reserve)?;\n        let side2 = Self::mul_div(coin_amount, total_supply, coin_reserve)?;\n        lp_token_amount = side1.min(side2);\n      }\n\n      ensure!(\n        lp_token_amount > T::MintMinLiquidity::get(),\n        Error::<T>::InsufficientLiquidityMinted\n      );\n\n      LiquidityTokens::<T>::mint(\n        mint_to,\n        Balance { coin: coin.into(), amount: Amount(lp_token_amount) },\n      )?;\n\n      Self::deposit_event(Event::LiquidityAdded {\n        who: sender,\n        mint_to,\n        pool_id,\n        coin_amount,\n        sri_amount,\n        lp_token_minted: lp_token_amount,\n      });\n\n      Ok(())\n    }\n\n    /// Allows you to remove liquidity by providing the `lp_token_burn` tokens that will be\n    /// burned in the process. With the usage of `amount1_min_receive`/`amount2_min_receive`\n    /// it's possible to control the min amount of returned tokens you're happy with.\n    #[pallet::call_index(1)]\n    #[pallet::weight(T::WeightInfo::remove_liquidity())]\n    pub fn remove_liquidity(\n      origin: OriginFor<T>,\n      coin: ExternalCoin,\n      lp_token_burn: SubstrateAmount,\n      coin_min_receive: SubstrateAmount,\n      sri_min_receive: SubstrateAmount,\n      withdraw_to: T::AccountId,\n    ) -> DispatchResult {\n      let sender = ensure_signed(origin.clone())?;\n\n      let pool_id = Self::get_pool_id(coin.into(), Coin::native()).unwrap();\n      ensure!(lp_token_burn > 0, Error::<T>::ZeroLiquidity);\n\n      Pools::<T>::get(pool_id).as_ref().ok_or(Error::<T>::PoolNotFound)?;\n\n      let pool_account = Self::get_pool_account(pool_id);\n      let sri_reserve = Self::get_balance(&pool_account, Coin::Serai);\n      let coin_reserve = Self::get_balance(&pool_account, coin.into());\n\n      let total_supply = LiquidityTokens::<T>::supply(Coin::from(coin));\n      let lp_redeem_amount = lp_token_burn;\n\n      let sri_amount = Self::mul_div(lp_redeem_amount, sri_reserve, total_supply)?;\n      let coin_amount = Self::mul_div(lp_redeem_amount, coin_reserve, total_supply)?;\n\n      ensure!(\n        (sri_amount != 0) && (sri_amount >= sri_min_receive),\n        Error::<T>::CoinOneWithdrawalDidNotMeetMinimum\n      );\n      ensure!(\n        (coin_amount != 0) && (coin_amount >= coin_min_receive),\n        Error::<T>::CoinTwoWithdrawalDidNotMeetMinimum\n      );\n      let sri_reserve_left = sri_reserve.saturating_sub(sri_amount);\n      let coin_reserve_left = coin_reserve.saturating_sub(coin_amount);\n\n      ensure!(sri_reserve_left >= 1, Error::<T>::ReserveLeftLessThanMinimum);\n      ensure!(coin_reserve_left >= 1, Error::<T>::ReserveLeftLessThanMinimum);\n\n      // burn the provided lp token amount that includes the fee\n      LiquidityTokens::<T>::burn(\n        origin,\n        Balance { coin: coin.into(), amount: Amount(lp_token_burn) },\n      )?;\n\n      Self::transfer(\n        &pool_account,\n        &withdraw_to,\n        Balance { coin: Coin::Serai, amount: Amount(sri_amount) },\n      )?;\n      Self::transfer(\n        &pool_account,\n        &withdraw_to,\n        Balance { coin: coin.into(), amount: Amount(coin_amount) },\n      )?;\n\n      Self::deposit_event(Event::LiquidityRemoved {\n        who: sender,\n        withdraw_to,\n        pool_id,\n        coin_amount,\n        sri_amount,\n        lp_token_burned: lp_token_burn,\n      });\n\n      Ok(())\n    }\n\n    /// Swap the exact amount of `coin1` into `coin2`.\n    /// `amount_out_min` param allows you to specify the min amount of the `coin2`\n    /// you're happy to receive.\n    ///\n    /// [`DexApi::quote_price_exact_tokens_for_tokens`] runtime call can be called\n    /// for a quote.\n    #[pallet::call_index(2)]\n    #[pallet::weight(T::WeightInfo::swap_exact_tokens_for_tokens())]\n    pub fn swap_exact_tokens_for_tokens(\n      origin: OriginFor<T>,\n      path: BoundedVec<Coin, T::MaxSwapPathLength>,\n      amount_in: SubstrateAmount,\n      amount_out_min: SubstrateAmount,\n      send_to: T::AccountId,\n    ) -> DispatchResult {\n      let sender = ensure_signed(origin)?;\n      Self::do_swap_exact_tokens_for_tokens(\n        sender,\n        path,\n        amount_in,\n        Some(amount_out_min),\n        send_to,\n      )?;\n      Ok(())\n    }\n\n    /// Swap any amount of `coin1` to get the exact amount of `coin2`.\n    /// `amount_in_max` param allows to specify the max amount of the `coin1`\n    /// you're happy to provide.\n    ///\n    /// [`DexApi::quote_price_tokens_for_exact_tokens`] runtime call can be called\n    /// for a quote.\n    #[pallet::call_index(3)]\n    #[pallet::weight(T::WeightInfo::swap_tokens_for_exact_tokens())]\n    pub fn swap_tokens_for_exact_tokens(\n      origin: OriginFor<T>,\n      path: BoundedVec<Coin, T::MaxSwapPathLength>,\n      amount_out: SubstrateAmount,\n      amount_in_max: SubstrateAmount,\n      send_to: T::AccountId,\n    ) -> DispatchResult {\n      let sender = ensure_signed(origin)?;\n      Self::do_swap_tokens_for_exact_tokens(\n        sender,\n        path,\n        amount_out,\n        Some(amount_in_max),\n        send_to,\n      )?;\n      Ok(())\n    }\n  }\n\n  impl<T: Config> Pallet<T> {\n    /// Swap exactly `amount_in` of coin `path[0]` for coin `path[1]`.\n    /// If an `amount_out_min` is specified, it will return an error if it is unable to acquire\n    /// the amount desired.\n    ///\n    /// Withdraws the `path[0]` coin from `sender`, deposits the `path[1]` coin to `send_to`.\n    ///\n    /// If successful, returns the amount of `path[1]` acquired for the `amount_in`.\n    pub fn do_swap_exact_tokens_for_tokens(\n      sender: T::AccountId,\n      path: BoundedVec<Coin, T::MaxSwapPathLength>,\n      amount_in: SubstrateAmount,\n      amount_out_min: Option<SubstrateAmount>,\n      send_to: T::AccountId,\n    ) -> Result<SubstrateAmount, DispatchError> {\n      ensure!(amount_in > 0, Error::<T>::ZeroAmount);\n      if let Some(amount_out_min) = amount_out_min {\n        ensure!(amount_out_min > 0, Error::<T>::ZeroAmount);\n      }\n\n      Self::validate_swap_path(&path)?;\n\n      let amounts = Self::get_amounts_out(amount_in, &path)?;\n      let amount_out =\n        *amounts.last().defensive_ok_or(\"get_amounts_out() returned an empty result\")?;\n\n      if let Some(amount_out_min) = amount_out_min {\n        ensure!(amount_out >= amount_out_min, Error::<T>::ProvidedMinimumNotSufficientForSwap);\n      }\n\n      Self::do_swap(sender, &amounts, path, send_to)?;\n      Ok(amount_out)\n    }\n\n    /// Take the `path[0]` coin and swap some amount for `amount_out` of the `path[1]`. If an\n    /// `amount_in_max` is specified, it will return an error if acquiring `amount_out` would be\n    /// too costly.\n    ///\n    /// Withdraws `path[0]` coin from `sender`, deposits the `path[1]` coin to `send_to`,\n    ///\n    /// If successful returns the amount of the `path[0]` taken to provide `path[1]`.\n    pub fn do_swap_tokens_for_exact_tokens(\n      sender: T::AccountId,\n      path: BoundedVec<Coin, T::MaxSwapPathLength>,\n      amount_out: SubstrateAmount,\n      amount_in_max: Option<SubstrateAmount>,\n      send_to: T::AccountId,\n    ) -> Result<SubstrateAmount, DispatchError> {\n      ensure!(amount_out > 0, Error::<T>::ZeroAmount);\n      if let Some(amount_in_max) = amount_in_max {\n        ensure!(amount_in_max > 0, Error::<T>::ZeroAmount);\n      }\n\n      Self::validate_swap_path(&path)?;\n\n      let amounts = Self::get_amounts_in(amount_out, &path)?;\n      let amount_in =\n        *amounts.first().defensive_ok_or(\"get_amounts_in() returned an empty result\")?;\n\n      if let Some(amount_in_max) = amount_in_max {\n        ensure!(amount_in <= amount_in_max, Error::<T>::ProvidedMaximumNotSufficientForSwap);\n      }\n\n      Self::do_swap(sender, &amounts, path, send_to)?;\n      Ok(amount_in)\n    }\n\n    /// Transfer an `amount` of `coin_id`.\n    fn transfer(\n      from: &T::AccountId,\n      to: &T::AccountId,\n      balance: Balance,\n    ) -> Result<Amount, DispatchError> {\n      CoinsPallet::<T>::transfer_internal(*from, *to, balance)?;\n      Ok(balance.amount)\n    }\n\n    /// Convert a `HigherPrecisionBalance` type to an `SubstrateAmount`.\n    pub(crate) fn convert_hpb_to_coin_balance(\n      amount: HigherPrecisionBalance,\n    ) -> Result<SubstrateAmount, Error<T>> {\n      amount.try_into().map_err(|_| Error::<T>::Overflow)\n    }\n\n    /// Swap coins along a `path`, depositing in `send_to`.\n    pub(crate) fn do_swap(\n      sender: T::AccountId,\n      amounts: &[SubstrateAmount],\n      path: BoundedVec<Coin, T::MaxSwapPathLength>,\n      send_to: T::AccountId,\n    ) -> Result<(), DispatchError> {\n      ensure!(amounts.len() > 1, Error::<T>::CorrespondenceError);\n      if let Some([coin1, coin2]) = &path.get(0 .. 2) {\n        let pool_id = Self::get_pool_id(*coin1, *coin2)?;\n        let pool_account = Self::get_pool_account(pool_id);\n        // amounts should always contain a corresponding element to path.\n        let first_amount = amounts.first().ok_or(Error::<T>::CorrespondenceError)?;\n\n        Self::transfer(\n          &sender,\n          &pool_account,\n          Balance { coin: *coin1, amount: Amount(*first_amount) },\n        )?;\n\n        let mut i = 0;\n        let path_len = u32::try_from(path.len()).unwrap();\n        #[allow(clippy::explicit_counter_loop)]\n        for coins_pair in path.windows(2) {\n          if let [coin1, coin2] = coins_pair {\n            let pool_id = Self::get_pool_id(*coin1, *coin2)?;\n            let pool_account = Self::get_pool_account(pool_id);\n\n            let amount_out =\n              amounts.get((i + 1) as usize).ok_or(Error::<T>::CorrespondenceError)?;\n\n            let to = if i < path_len - 2 {\n              let coin3 = path.get((i + 2) as usize).ok_or(Error::<T>::PathError)?;\n              Self::get_pool_account(Self::get_pool_id(*coin2, *coin3)?)\n            } else {\n              send_to\n            };\n\n            let reserve = Self::get_balance(&pool_account, *coin2);\n            let reserve_left = reserve.saturating_sub(*amount_out);\n            ensure!(reserve_left >= 1, Error::<T>::ReserveLeftLessThanMinimum);\n\n            Self::transfer(\n              &pool_account,\n              &to,\n              Balance { coin: *coin2, amount: Amount(*amount_out) },\n            )?;\n\n            // update the volume\n            let swap_volume = if *coin1 == Coin::Serai {\n              amounts.get(i as usize).ok_or(Error::<T>::CorrespondenceError)?\n            } else {\n              amount_out\n            };\n            let existing = SwapVolume::<T>::get(pool_id).unwrap_or(0);\n            let new_volume = existing.saturating_add(*swap_volume);\n            SwapVolume::<T>::set(pool_id, Some(new_volume));\n          }\n          i += 1;\n        }\n\n        Self::deposit_event(Event::SwapExecuted {\n          who: sender,\n          send_to,\n          path,\n          amount_in: *first_amount,\n          amount_out: *amounts.last().expect(\"Always has more than 1 element\"),\n        });\n      } else {\n        return Err(Error::<T>::InvalidPath.into());\n      }\n      Ok(())\n    }\n\n    /// The account ID of the pool.\n    ///\n    /// This actually does computation. If you need to keep using it, then make sure you cache\n    /// the value and only call this once.\n    pub fn get_pool_account(pool_id: PoolId) -> T::AccountId {\n      let encoded_pool_id = sp_io::hashing::blake2_256(&Encode::encode(&pool_id)[..]);\n\n      Decode::decode(&mut TrailingZeroInput::new(encoded_pool_id.as_ref()))\n        .expect(\"infinite length input; no invalid inputs for type; qed\")\n    }\n\n    /// Get the `owner`'s balance of `coin`, which could be the chain's native coin or another\n    /// fungible. Returns a value in the form of an `Amount`.\n    fn get_balance(owner: &T::AccountId, coin: Coin) -> SubstrateAmount {\n      CoinsPallet::<T>::balance(*owner, coin).0\n    }\n\n    /// Returns a pool id constructed from 2 coins.\n    /// We expect deterministic order, so (coin1, coin2) or (coin2, coin1) returns the same\n    /// result. Coins have to be different and one of them should be Coin::Serai.\n    pub fn get_pool_id(coin1: Coin, coin2: Coin) -> Result<PoolId, Error<T>> {\n      ensure!((coin1 == Coin::Serai) || (coin2 == Coin::Serai), Error::<T>::PoolNotFound);\n      ensure!(coin1 != coin2, Error::<T>::EqualCoins);\n      ExternalCoin::try_from(coin1)\n        .or_else(|()| ExternalCoin::try_from(coin2))\n        .map_err(|()| Error::<T>::PoolNotFound)\n    }\n\n    /// Returns the balance of each coin in the pool.\n    /// The tuple result is in the order requested (not necessarily the same as pool order).\n    pub fn get_reserves(\n      coin1: &Coin,\n      coin2: &Coin,\n    ) -> Result<(SubstrateAmount, SubstrateAmount), Error<T>> {\n      let pool_id = Self::get_pool_id(*coin1, *coin2)?;\n      let pool_account = Self::get_pool_account(pool_id);\n\n      let balance1 = Self::get_balance(&pool_account, *coin1);\n      let balance2 = Self::get_balance(&pool_account, *coin2);\n\n      if (balance1 == 0) || (balance2 == 0) {\n        Err(Error::<T>::PoolNotFound)?;\n      }\n\n      Ok((balance1, balance2))\n    }\n\n    /// Leading to an amount at the end of a `path`, get the required amounts in.\n    pub(crate) fn get_amounts_in(\n      amount_out: SubstrateAmount,\n      path: &BoundedVec<Coin, T::MaxSwapPathLength>,\n    ) -> Result<Vec<SubstrateAmount>, DispatchError> {\n      let mut amounts: Vec<SubstrateAmount> = vec![amount_out];\n\n      for coins_pair in path.windows(2).rev() {\n        if let [coin1, coin2] = coins_pair {\n          let (reserve_in, reserve_out) = Self::get_reserves(coin1, coin2)?;\n          let prev_amount = amounts.last().expect(\"Always has at least one element\");\n          let amount_in = Self::get_amount_in(*prev_amount, reserve_in, reserve_out)?;\n          amounts.push(amount_in);\n        }\n      }\n\n      amounts.reverse();\n      Ok(amounts)\n    }\n\n    /// Following an amount into a `path`, get the corresponding amounts out.\n    pub(crate) fn get_amounts_out(\n      amount_in: SubstrateAmount,\n      path: &BoundedVec<Coin, T::MaxSwapPathLength>,\n    ) -> Result<Vec<SubstrateAmount>, DispatchError> {\n      let mut amounts: Vec<SubstrateAmount> = vec![amount_in];\n\n      for coins_pair in path.windows(2) {\n        if let [coin1, coin2] = coins_pair {\n          let (reserve_in, reserve_out) = Self::get_reserves(coin1, coin2)?;\n          let prev_amount = amounts.last().expect(\"Always has at least one element\");\n          let amount_out = Self::get_amount_out(*prev_amount, reserve_in, reserve_out)?;\n          amounts.push(amount_out);\n        }\n      }\n\n      Ok(amounts)\n    }\n\n    /// Used by the RPC service to provide current prices.\n    pub fn quote_price_exact_tokens_for_tokens(\n      coin1: Coin,\n      coin2: Coin,\n      amount: SubstrateAmount,\n      include_fee: bool,\n    ) -> Option<SubstrateAmount> {\n      let pool_id = Self::get_pool_id(coin1, coin2).ok()?;\n      let pool_account = Self::get_pool_account(pool_id);\n\n      let balance1 = Self::get_balance(&pool_account, coin1);\n      let balance2 = Self::get_balance(&pool_account, coin2);\n      if balance1 != 0 {\n        if include_fee {\n          Self::get_amount_out(amount, balance1, balance2).ok()\n        } else {\n          Self::quote(amount, balance1, balance2).ok()\n        }\n      } else {\n        None\n      }\n    }\n\n    /// Used by the RPC service to provide current prices.\n    pub fn quote_price_tokens_for_exact_tokens(\n      coin1: Coin,\n      coin2: Coin,\n      amount: SubstrateAmount,\n      include_fee: bool,\n    ) -> Option<SubstrateAmount> {\n      let pool_id = Self::get_pool_id(coin1, coin2).ok()?;\n      let pool_account = Self::get_pool_account(pool_id);\n\n      let balance1 = Self::get_balance(&pool_account, coin1);\n      let balance2 = Self::get_balance(&pool_account, coin2);\n      if balance1 != 0 {\n        if include_fee {\n          Self::get_amount_in(amount, balance1, balance2).ok()\n        } else {\n          Self::quote(amount, balance2, balance1).ok()\n        }\n      } else {\n        None\n      }\n    }\n\n    /// Calculates the optimal amount from the reserves.\n    pub fn quote(\n      amount: SubstrateAmount,\n      reserve1: SubstrateAmount,\n      reserve2: SubstrateAmount,\n    ) -> Result<SubstrateAmount, Error<T>> {\n      // amount * reserve2 / reserve1\n      Self::mul_div(amount, reserve2, reserve1)\n    }\n\n    pub(super) fn calc_lp_amount_for_zero_supply(\n      amount1: SubstrateAmount,\n      amount2: SubstrateAmount,\n    ) -> Result<SubstrateAmount, Error<T>> {\n      let amount1 = HigherPrecisionBalance::from(amount1);\n      let amount2 = HigherPrecisionBalance::from(amount2);\n\n      let result = amount1\n        .checked_mul(amount2)\n        .ok_or(Error::<T>::Overflow)?\n        .integer_sqrt()\n        .checked_sub(T::MintMinLiquidity::get().into())\n        .ok_or(Error::<T>::InsufficientLiquidityMinted)?;\n\n      result.try_into().map_err(|_| Error::<T>::Overflow)\n    }\n\n    fn mul_div(\n      a: SubstrateAmount,\n      b: SubstrateAmount,\n      c: SubstrateAmount,\n    ) -> Result<SubstrateAmount, Error<T>> {\n      let a = HigherPrecisionBalance::from(a);\n      let b = HigherPrecisionBalance::from(b);\n      let c = HigherPrecisionBalance::from(c);\n\n      let result =\n        a.checked_mul(b).ok_or(Error::<T>::Overflow)?.checked_div(c).ok_or(Error::<T>::Overflow)?;\n\n      result.try_into().map_err(|_| Error::<T>::Overflow)\n    }\n\n    /// Calculates amount out.\n    ///\n    /// Given an input amount of an coin and pair reserves, returns the maximum output amount\n    /// of the other coin.\n    pub fn get_amount_out(\n      amount_in: SubstrateAmount,\n      reserve_in: SubstrateAmount,\n      reserve_out: SubstrateAmount,\n    ) -> Result<SubstrateAmount, Error<T>> {\n      let amount_in = HigherPrecisionBalance::from(amount_in);\n      let reserve_in = HigherPrecisionBalance::from(reserve_in);\n      let reserve_out = HigherPrecisionBalance::from(reserve_out);\n\n      if (reserve_in == 0) || (reserve_out == 0) {\n        return Err(Error::<T>::ZeroLiquidity);\n      }\n\n      let amount_in_with_fee = amount_in\n        .checked_mul(\n          HigherPrecisionBalance::from(1000u32) - HigherPrecisionBalance::from(T::LPFee::get()),\n        )\n        .ok_or(Error::<T>::Overflow)?;\n\n      let numerator = amount_in_with_fee.checked_mul(reserve_out).ok_or(Error::<T>::Overflow)?;\n\n      let denominator = reserve_in\n        .checked_mul(1000u32.into())\n        .ok_or(Error::<T>::Overflow)?\n        .checked_add(amount_in_with_fee)\n        .ok_or(Error::<T>::Overflow)?;\n\n      let result = numerator.checked_div(denominator).ok_or(Error::<T>::Overflow)?;\n\n      result.try_into().map_err(|_| Error::<T>::Overflow)\n    }\n\n    /// Calculates amount in.\n    ///\n    /// Given an output amount of an coin and pair reserves, returns a required input amount\n    /// of the other coin.\n    pub fn get_amount_in(\n      amount_out: SubstrateAmount,\n      reserve_in: SubstrateAmount,\n      reserve_out: SubstrateAmount,\n    ) -> Result<SubstrateAmount, Error<T>> {\n      let amount_out = HigherPrecisionBalance::from(amount_out);\n      let reserve_in = HigherPrecisionBalance::from(reserve_in);\n      let reserve_out = HigherPrecisionBalance::from(reserve_out);\n\n      if (reserve_in == 0) || (reserve_out == 0) {\n        Err(Error::<T>::ZeroLiquidity)?\n      }\n\n      if amount_out >= reserve_out {\n        Err(Error::<T>::AmountOutTooHigh)?\n      }\n\n      let numerator = reserve_in\n        .checked_mul(amount_out)\n        .ok_or(Error::<T>::Overflow)?\n        .checked_mul(1000u32.into())\n        .ok_or(Error::<T>::Overflow)?;\n\n      let denominator = reserve_out\n        .checked_sub(amount_out)\n        .ok_or(Error::<T>::Overflow)?\n        .checked_mul(\n          HigherPrecisionBalance::from(1000u32) - HigherPrecisionBalance::from(T::LPFee::get()),\n        )\n        .ok_or(Error::<T>::Overflow)?;\n\n      let result = numerator\n        .checked_div(denominator)\n        .ok_or(Error::<T>::Overflow)?\n        .checked_add(1)\n        .ok_or(Error::<T>::Overflow)?;\n\n      result.try_into().map_err(|_| Error::<T>::Overflow)\n    }\n\n    /// Ensure that a path is valid.\n    fn validate_swap_path(\n      path: &BoundedVec<Coin, T::MaxSwapPathLength>,\n    ) -> Result<(), DispatchError> {\n      ensure!(path.len() >= 2, Error::<T>::InvalidPath);\n\n      // validate all the pools in the path are unique\n      let mut pools = BoundedBTreeSet::<PoolId, T::MaxSwapPathLength>::new();\n      for coins_pair in path.windows(2) {\n        if let [coin1, coin2] = coins_pair {\n          let pool_id = Self::get_pool_id(*coin1, *coin2)?;\n          let new_element = pools.try_insert(pool_id).map_err(|_| Error::<T>::Overflow)?;\n          if !new_element {\n            return Err(Error::<T>::NonUniquePath.into());\n          }\n        }\n      }\n      Ok(())\n    }\n  }\n}\n\nimpl<T: Config> Swap<T::AccountId, HigherPrecisionBalance, Coin> for Pallet<T> {\n  fn swap_exact_tokens_for_tokens(\n    sender: T::AccountId,\n    path: Vec<Coin>,\n    amount_in: HigherPrecisionBalance,\n    amount_out_min: Option<HigherPrecisionBalance>,\n    send_to: T::AccountId,\n  ) -> Result<HigherPrecisionBalance, DispatchError> {\n    let path = path.try_into().map_err(|_| Error::<T>::PathError)?;\n    let amount_out_min = amount_out_min.map(Self::convert_hpb_to_coin_balance).transpose()?;\n    let amount_out = Self::do_swap_exact_tokens_for_tokens(\n      sender,\n      path,\n      Self::convert_hpb_to_coin_balance(amount_in)?,\n      amount_out_min,\n      send_to,\n    )?;\n    Ok(amount_out.into())\n  }\n\n  fn swap_tokens_for_exact_tokens(\n    sender: T::AccountId,\n    path: Vec<Coin>,\n    amount_out: HigherPrecisionBalance,\n    amount_in_max: Option<HigherPrecisionBalance>,\n    send_to: T::AccountId,\n  ) -> Result<HigherPrecisionBalance, DispatchError> {\n    let path = path.try_into().map_err(|_| Error::<T>::PathError)?;\n    let amount_in_max = amount_in_max.map(Self::convert_hpb_to_coin_balance).transpose()?;\n    let amount_in = Self::do_swap_tokens_for_exact_tokens(\n      sender,\n      path,\n      Self::convert_hpb_to_coin_balance(amount_out)?,\n      amount_in_max,\n      send_to,\n    )?;\n    Ok(amount_in.into())\n  }\n}\n\nsp_api::decl_runtime_apis! {\n  /// This runtime api allows people to query the size of the liquidity pools\n  /// and quote prices for swaps.\n  pub trait DexApi {\n    /// Provides a quote for [`Pallet::swap_tokens_for_exact_tokens`].\n    ///\n    /// Note that the price may have changed by the time the transaction is executed.\n    /// (Use `amount_in_max` to control slippage.)\n    fn quote_price_tokens_for_exact_tokens(\n      coin1: Coin,\n      coin2: Coin,\n      amount: SubstrateAmount,\n      include_fee: bool\n    ) -> Option<SubstrateAmount>;\n\n    /// Provides a quote for [`Pallet::swap_exact_tokens_for_tokens`].\n    ///\n    /// Note that the price may have changed by the time the transaction is executed.\n    /// (Use `amount_out_min` to control slippage.)\n    fn quote_price_exact_tokens_for_tokens(\n      coin1: Coin,\n      coin2: Coin,\n      amount: SubstrateAmount,\n      include_fee: bool\n    ) -> Option<SubstrateAmount>;\n\n    /// Returns the size of the liquidity pool for the given coin pair.\n    fn get_reserves(coin1: Coin, coin2: Coin) -> Option<(SubstrateAmount, SubstrateAmount)>;\n  }\n}\n\nsp_core::generate_feature_enabled_macro!(\n  runtime_benchmarks_enabled,\n  feature = \"runtime-benchmarks\",\n  $\n);\n"
  },
  {
    "path": "substrate/dex/pallet/src/mock.rs",
    "content": "// This file was originally:\n\n// Copyright (C) Parity Technologies (UK) Ltd.\n// SPDX-License-Identifier: Apache-2.0\n\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// \thttp://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n// It has been forked into a crate distributed under the AGPL 3.0.\n// Please check the current distribution for up-to-date copyright and licensing information.\n\n//! Test environment for Dex pallet.\n\nuse super::*;\nuse crate as dex;\n\nuse frame_support::{\n  construct_runtime, derive_impl,\n  traits::{ConstU16, ConstU32, ConstU64},\n};\n\nuse sp_core::sr25519::Public;\nuse sp_runtime::{traits::IdentityLookup, BuildStorage};\n\nuse serai_primitives::{Coin, Balance, Amount, system_address};\n\npub use coins_pallet as coins;\n\ntype Block = frame_system::mocking::MockBlock<Test>;\n\npub const MEDIAN_PRICE_WINDOW_LENGTH: u16 = 10;\n\nconstruct_runtime!(\n  pub enum Test\n  {\n    System: frame_system,\n    CoinsPallet: coins,\n    LiquidityTokens: coins::<Instance1>::{Pallet, Call, Storage, Event<T>},\n    Dex: dex,\n  }\n);\n\n#[derive_impl(frame_system::config_preludes::TestDefaultConfig)]\nimpl frame_system::Config for Test {\n  type AccountId = Public;\n  type Lookup = IdentityLookup<Self::AccountId>;\n  type Block = Block;\n}\n\nimpl coins::Config for Test {\n  type AllowMint = ();\n}\n\nimpl coins::Config<coins::Instance1> for Test {\n  type AllowMint = ();\n}\n\nimpl Config for Test {\n  type WeightInfo = ();\n  type LPFee = ConstU32<3>; // means 0.3%\n  type MaxSwapPathLength = ConstU32<4>;\n\n  type MedianPriceWindowLength = ConstU16<{ MEDIAN_PRICE_WINDOW_LENGTH }>;\n\n  // 100 is good enough when the main currency has 12 decimals.\n  type MintMinLiquidity = ConstU64<100>;\n}\n\npub(crate) fn new_test_ext() -> sp_io::TestExternalities {\n  let mut t = frame_system::GenesisConfig::<Test>::default().build_storage().unwrap();\n\n  let accounts: Vec<Public> = vec![\n    system_address(b\"account1\").into(),\n    system_address(b\"account2\").into(),\n    system_address(b\"account3\").into(),\n    system_address(b\"account4\").into(),\n  ];\n  coins::GenesisConfig::<Test> {\n    accounts: accounts\n      .into_iter()\n      .map(|a| (a, Balance { coin: Coin::Serai, amount: Amount(1 << 60) }))\n      .collect(),\n    _ignore: Default::default(),\n  }\n  .assimilate_storage(&mut t)\n  .unwrap();\n\n  let mut ext = sp_io::TestExternalities::new(t);\n  ext.execute_with(|| System::set_block_number(1));\n  ext\n}\n"
  },
  {
    "path": "substrate/dex/pallet/src/tests.rs",
    "content": "// This file was originally:\n\n// Copyright (C) Parity Technologies (UK) Ltd.\n// SPDX-License-Identifier: Apache-2.0\n\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// \thttp://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n// It has been forked into a crate distributed under the AGPL 3.0.\n// Please check the current distribution for up-to-date copyright and licensing information.\n\nuse crate::{\n  mock::{*, MEDIAN_PRICE_WINDOW_LENGTH},\n  *,\n};\nuse frame_support::{assert_noop, assert_ok};\n\npub use coins_pallet as coins;\n\nuse coins::Pallet as CoinsPallet;\n\nuse serai_primitives::{Balance, COINS, PublicKey, system_address, Amount};\n\ntype LiquidityTokens<T> = coins_pallet::Pallet<T, coins::Instance1>;\ntype LiquidityTokensError<T> = coins_pallet::Error<T, coins::Instance1>;\n\nfn events() -> Vec<Event<Test>> {\n  let result = System::events()\n    .into_iter()\n    .map(|r| r.event)\n    .filter_map(|e| if let mock::RuntimeEvent::Dex(inner) = e { Some(inner) } else { None })\n    .collect();\n\n  System::reset_events();\n\n  result\n}\n\nfn pools() -> Vec<PoolId> {\n  let mut s: Vec<_> = Pools::<Test>::iter().map(|x| x.0).collect();\n  s.sort();\n  s\n}\n\nfn coins() -> Vec<Coin> {\n  COINS.to_vec()\n}\n\nfn balance(owner: PublicKey, coin: Coin) -> u64 {\n  CoinsPallet::<Test>::balance(owner, coin).0\n}\n\nfn pool_balance(owner: PublicKey, token_id: Coin) -> u64 {\n  LiquidityTokens::<Test>::balance(owner, token_id).0\n}\n\nmacro_rules! bvec {\n\t($( $x:tt )*) => {\n\t\tvec![$( $x )*].try_into().unwrap()\n\t}\n}\n\n#[test]\nfn check_pool_accounts_dont_collide() {\n  use std::collections::HashSet;\n  let mut map = HashSet::new();\n\n  for coin in coins() {\n    if let Coin::External(c) = coin {\n      let account = Dex::get_pool_account(c);\n      if map.contains(&account) {\n        panic!(\"Collision at {c:?}\");\n      }\n      map.insert(account);\n    }\n  }\n}\n\n#[test]\nfn check_max_numbers() {\n  new_test_ext().execute_with(|| {\n    assert_eq!(Dex::quote(3u64, u64::MAX, u64::MAX).ok().unwrap(), 3);\n    assert!(Dex::quote(u64::MAX, 3u64, u64::MAX).is_err());\n    assert_eq!(Dex::quote(u64::MAX, u64::MAX, 1u64).ok().unwrap(), 1);\n\n    assert_eq!(Dex::get_amount_out(100u64, u64::MAX, u64::MAX).ok().unwrap(), 99);\n    assert_eq!(Dex::get_amount_in(100u64, u64::MAX, u64::MAX).ok().unwrap(), 101);\n  });\n}\n\n#[test]\nfn can_create_pool() {\n  new_test_ext().execute_with(|| {\n    let coin_account_deposit: u64 = 0;\n    let user: PublicKey = system_address(b\"user1\").into();\n    let coin1 = Coin::native();\n    let coin2 = Coin::External(ExternalCoin::Monero);\n    let pool_id = Dex::get_pool_id(coin1, coin2).unwrap();\n\n    assert_ok!(CoinsPallet::<Test>::mint(user, Balance { coin: coin1, amount: Amount(1000) }));\n    assert_ok!(Dex::create_pool(coin2.try_into().unwrap()));\n\n    assert_eq!(balance(user, coin1), 1000 - coin_account_deposit);\n\n    assert_eq!(\n      events(),\n      [Event::<Test>::PoolCreated { pool_id, pool_account: Dex::get_pool_account(pool_id) }]\n    );\n    assert_eq!(pools(), vec![pool_id]);\n  });\n}\n\n#[test]\nfn create_same_pool_twice_should_fail() {\n  new_test_ext().execute_with(|| {\n    let coin = ExternalCoin::Dai;\n    assert_ok!(Dex::create_pool(coin));\n    assert_noop!(Dex::create_pool(coin), Error::<Test>::PoolExists);\n  });\n}\n\n#[test]\nfn different_pools_should_have_different_lp_tokens() {\n  new_test_ext().execute_with(|| {\n    let coin1 = Coin::native();\n    let coin2 = Coin::External(ExternalCoin::Bitcoin);\n    let coin3 = Coin::External(ExternalCoin::Ether);\n    let pool_id_1_2 = Dex::get_pool_id(coin1, coin2).unwrap();\n    let pool_id_1_3 = Dex::get_pool_id(coin1, coin3).unwrap();\n\n    let lp_token2_1 = coin2;\n    assert_ok!(Dex::create_pool(coin2.try_into().unwrap()));\n    let lp_token3_1 = coin3;\n\n    assert_eq!(\n      events(),\n      [Event::<Test>::PoolCreated {\n        pool_id: pool_id_1_2,\n        pool_account: Dex::get_pool_account(pool_id_1_2),\n      }]\n    );\n\n    assert_ok!(Dex::create_pool(coin3.try_into().unwrap()));\n    assert_eq!(\n      events(),\n      [Event::<Test>::PoolCreated {\n        pool_id: pool_id_1_3,\n        pool_account: Dex::get_pool_account(pool_id_1_3),\n      }]\n    );\n\n    assert_ne!(lp_token2_1, lp_token3_1);\n  });\n}\n\n#[test]\nfn can_add_liquidity() {\n  new_test_ext().execute_with(|| {\n    let user = system_address(b\"user1\").into();\n    let coin1 = Coin::native();\n    let coin2 = Coin::External(ExternalCoin::Dai);\n    let coin3 = Coin::External(ExternalCoin::Monero);\n\n    let lp_token1 = coin2;\n    assert_ok!(Dex::create_pool(coin2.try_into().unwrap()));\n    let lp_token2 = coin3;\n    assert_ok!(Dex::create_pool(coin3.try_into().unwrap()));\n\n    assert_ok!(CoinsPallet::<Test>::mint(\n      user,\n      Balance { coin: coin1, amount: Amount(10000 * 2 + 1) }\n    ));\n    assert_ok!(CoinsPallet::<Test>::mint(user, Balance { coin: coin2, amount: Amount(1000) }));\n    assert_ok!(CoinsPallet::<Test>::mint(user, Balance { coin: coin3, amount: Amount(1000) }));\n\n    assert_ok!(Dex::add_liquidity(\n      RuntimeOrigin::signed(user),\n      coin2.try_into().unwrap(),\n      10,\n      10000,\n      10,\n      10000,\n      user,\n    ));\n\n    let pool_id = Dex::get_pool_id(coin1, coin2).unwrap();\n    assert!(events().contains(&Event::<Test>::LiquidityAdded {\n      who: user,\n      mint_to: user,\n      pool_id,\n      sri_amount: 10000,\n      coin_amount: 10,\n      lp_token_minted: 216,\n    }));\n    let pallet_account = Dex::get_pool_account(pool_id);\n    assert_eq!(balance(pallet_account, coin1), 10000);\n    assert_eq!(balance(pallet_account, coin2), 10);\n    assert_eq!(balance(user, coin1), 10000 + 1);\n    assert_eq!(balance(user, coin2), 1000 - 10);\n    assert_eq!(pool_balance(user, lp_token1), 216);\n\n    // try to pass the non-native - native coins, the result should be the same\n    assert_ok!(Dex::add_liquidity(\n      RuntimeOrigin::signed(user),\n      coin3.try_into().unwrap(),\n      10,\n      10000,\n      10,\n      10000,\n      user,\n    ));\n\n    let pool_id = Dex::get_pool_id(coin1, coin3).unwrap();\n    assert!(events().contains(&Event::<Test>::LiquidityAdded {\n      who: user,\n      mint_to: user,\n      pool_id,\n      sri_amount: 10000,\n      coin_amount: 10,\n      lp_token_minted: 216,\n    }));\n    let pallet_account = Dex::get_pool_account(pool_id);\n    assert_eq!(balance(pallet_account, coin1), 10000);\n    assert_eq!(balance(pallet_account, coin3), 10);\n    assert_eq!(balance(user, coin1), 1);\n    assert_eq!(balance(user, coin3), 1000 - 10);\n    assert_eq!(pool_balance(user, lp_token2), 216);\n  });\n}\n\n#[test]\nfn add_tiny_liquidity_leads_to_insufficient_liquidity_minted_error() {\n  new_test_ext().execute_with(|| {\n    let user = system_address(b\"user1\").into();\n    let coin1 = Coin::native();\n    let coin2 = ExternalCoin::Bitcoin;\n\n    assert_ok!(Dex::create_pool(coin2));\n\n    assert_ok!(CoinsPallet::<Test>::mint(user, Balance { coin: coin1, amount: Amount(1000) }));\n    assert_ok!(CoinsPallet::<Test>::mint(\n      user,\n      Balance { coin: coin2.into(), amount: Amount(1000) }\n    ));\n\n    assert_noop!(\n      Dex::add_liquidity(RuntimeOrigin::signed(user), coin2, 1, 1, 1, 1, user),\n      Error::<Test>::InsufficientLiquidityMinted\n    );\n  });\n}\n\n#[test]\nfn add_tiny_liquidity_directly_to_pool_address() {\n  new_test_ext().execute_with(|| {\n    let user = system_address(b\"user1\").into();\n    let coin1 = Coin::native();\n    let coin2 = Coin::External(ExternalCoin::Ether);\n    let coin3 = Coin::External(ExternalCoin::Dai);\n\n    assert_ok!(Dex::create_pool(coin2.try_into().unwrap()));\n    assert_ok!(Dex::create_pool(coin3.try_into().unwrap()));\n\n    assert_ok!(CoinsPallet::<Test>::mint(user, Balance { coin: coin1, amount: Amount(10000 * 2) }));\n    assert_ok!(CoinsPallet::<Test>::mint(user, Balance { coin: coin2, amount: Amount(10000) }));\n    assert_ok!(CoinsPallet::<Test>::mint(user, Balance { coin: coin3, amount: Amount(10000) }));\n\n    // check we're still able to add the liquidity even when the pool already has some coin1\n    let pallet_account = Dex::get_pool_account(Dex::get_pool_id(coin1, coin2).unwrap());\n    assert_ok!(CoinsPallet::<Test>::mint(\n      pallet_account,\n      Balance { coin: coin1, amount: Amount(1000) }\n    ));\n\n    assert_ok!(Dex::add_liquidity(\n      RuntimeOrigin::signed(user),\n      coin2.try_into().unwrap(),\n      10,\n      10000,\n      10,\n      10000,\n      user,\n    ));\n\n    // check the same but for coin3 (non-native token)\n    let pallet_account = Dex::get_pool_account(Dex::get_pool_id(coin1, coin3).unwrap());\n    assert_ok!(CoinsPallet::<Test>::mint(\n      pallet_account,\n      Balance { coin: coin2, amount: Amount(1) }\n    ));\n    assert_ok!(Dex::add_liquidity(\n      RuntimeOrigin::signed(user),\n      coin3.try_into().unwrap(),\n      10,\n      10000,\n      10,\n      10000,\n      user,\n    ));\n  });\n}\n\n#[test]\nfn can_remove_liquidity() {\n  new_test_ext().execute_with(|| {\n    let user = system_address(b\"user1\").into();\n    let coin1 = Coin::native();\n    let coin2 = Coin::External(ExternalCoin::Monero);\n    let pool_id = Dex::get_pool_id(coin1, coin2).unwrap();\n\n    let lp_token = coin2;\n    assert_ok!(Dex::create_pool(coin2.try_into().unwrap()));\n\n    assert_ok!(CoinsPallet::<Test>::mint(\n      user,\n      Balance { coin: coin1, amount: Amount(10000000000) }\n    ));\n    assert_ok!(CoinsPallet::<Test>::mint(user, Balance { coin: coin2, amount: Amount(100000) }));\n\n    assert_ok!(Dex::add_liquidity(\n      RuntimeOrigin::signed(user),\n      coin2.try_into().unwrap(),\n      100000,\n      1000000000,\n      100000,\n      1000000000,\n      user,\n    ));\n\n    let total_lp_received = pool_balance(user, lp_token);\n\n    assert_ok!(Dex::remove_liquidity(\n      RuntimeOrigin::signed(user),\n      coin2.try_into().unwrap(),\n      total_lp_received,\n      0,\n      0,\n      user,\n    ));\n\n    assert!(events().contains(&Event::<Test>::LiquidityRemoved {\n      who: user,\n      withdraw_to: user,\n      pool_id,\n      sri_amount: 999990000,\n      coin_amount: 99999,\n      lp_token_burned: total_lp_received,\n    }));\n\n    let pool_account = Dex::get_pool_account(pool_id);\n    assert_eq!(balance(pool_account, coin1), 10000);\n    assert_eq!(balance(pool_account, coin2), 1);\n    assert_eq!(pool_balance(pool_account, lp_token), 100);\n\n    assert_eq!(balance(user, coin1), 10000000000 - 1000000000 + 999990000);\n    assert_eq!(balance(user, coin2), 99999);\n    assert_eq!(pool_balance(user, lp_token), 0);\n  });\n}\n\n#[test]\nfn can_not_redeem_more_lp_tokens_than_were_minted() {\n  new_test_ext().execute_with(|| {\n    let user = system_address(b\"user1\").into();\n    let coin1 = Coin::native();\n    let coin2 = Coin::External(ExternalCoin::Dai);\n    let lp_token = coin2;\n\n    assert_ok!(Dex::create_pool(coin2.try_into().unwrap()));\n\n    assert_ok!(CoinsPallet::<Test>::mint(user, Balance { coin: coin1, amount: Amount(10000) }));\n    assert_ok!(CoinsPallet::<Test>::mint(user, Balance { coin: coin2, amount: Amount(1000) }));\n\n    assert_ok!(Dex::add_liquidity(\n      RuntimeOrigin::signed(user),\n      coin2.try_into().unwrap(),\n      10,\n      10000,\n      10,\n      10000,\n      user,\n    ));\n\n    // Only 216 lp_tokens_minted\n    assert_eq!(pool_balance(user, lp_token), 216);\n\n    assert_noop!(\n      Dex::remove_liquidity(\n        RuntimeOrigin::signed(user),\n        coin2.try_into().unwrap(),\n        216 + 1, // Try and redeem 10 lp tokens while only 9 minted.\n        0,\n        0,\n        user,\n      ),\n      LiquidityTokensError::<Test>::NotEnoughCoins\n    );\n  });\n}\n\n#[test]\nfn can_quote_price() {\n  new_test_ext().execute_with(|| {\n    let user = system_address(b\"user1\").into();\n    let coin1 = Coin::native();\n    let coin2 = Coin::External(ExternalCoin::Ether);\n\n    assert_ok!(Dex::create_pool(coin2.try_into().unwrap()));\n\n    assert_ok!(CoinsPallet::<Test>::mint(user, Balance { coin: coin1, amount: Amount(100000) }));\n    assert_ok!(CoinsPallet::<Test>::mint(user, Balance { coin: coin2, amount: Amount(1000) }));\n\n    assert_ok!(Dex::add_liquidity(\n      RuntimeOrigin::signed(user),\n      coin2.try_into().unwrap(),\n      200,\n      10000,\n      1,\n      1,\n      user,\n    ));\n\n    assert_eq!(\n      Dex::quote_price_exact_tokens_for_tokens(Coin::native(), coin2, 3000, false,),\n      Some(60)\n    );\n    // including fee so should get less out...\n    assert_eq!(\n      Dex::quote_price_exact_tokens_for_tokens(Coin::native(), coin2, 3000, true,),\n      Some(46)\n    );\n    // Check it still gives same price:\n    // (if the above accidentally exchanged then it would not give same quote as before)\n    assert_eq!(\n      Dex::quote_price_exact_tokens_for_tokens(Coin::native(), coin2, 3000, false,),\n      Some(60)\n    );\n    // including fee so should get less out...\n    assert_eq!(\n      Dex::quote_price_exact_tokens_for_tokens(Coin::native(), coin2, 3000, true,),\n      Some(46)\n    );\n\n    // Check inverse:\n    assert_eq!(\n      Dex::quote_price_exact_tokens_for_tokens(coin2, Coin::native(), 60, false,),\n      Some(3000)\n    );\n    // including fee so should get less out...\n    assert_eq!(\n      Dex::quote_price_exact_tokens_for_tokens(coin2, Coin::native(), 60, true,),\n      Some(2302)\n    );\n\n    //\n    // same tests as above but for quote_price_tokens_for_exact_tokens:\n    //\n    assert_eq!(\n      Dex::quote_price_tokens_for_exact_tokens(Coin::native(), coin2, 60, false,),\n      Some(3000)\n    );\n    // including fee so should need to put more in...\n    assert_eq!(\n      Dex::quote_price_tokens_for_exact_tokens(Coin::native(), coin2, 60, true,),\n      Some(4299)\n    );\n    // Check it still gives same price:\n    // (if the above accidentally exchanged then it would not give same quote as before)\n    assert_eq!(\n      Dex::quote_price_tokens_for_exact_tokens(Coin::native(), coin2, 60, false,),\n      Some(3000)\n    );\n    // including fee so should need to put more in...\n    assert_eq!(\n      Dex::quote_price_tokens_for_exact_tokens(Coin::native(), coin2, 60, true,),\n      Some(4299)\n    );\n\n    // Check inverse:\n    assert_eq!(\n      Dex::quote_price_tokens_for_exact_tokens(coin2, Coin::native(), 3000, false,),\n      Some(60)\n    );\n    // including fee so should need to put more in...\n    assert_eq!(\n      Dex::quote_price_tokens_for_exact_tokens(coin2, Coin::native(), 3000, true,),\n      Some(86)\n    );\n\n    //\n    // roundtrip: Without fees one should get the original number\n    //\n    let amount_in = 100;\n\n    assert_eq!(\n      Dex::quote_price_exact_tokens_for_tokens(coin2, Coin::native(), amount_in, false,).and_then(\n        |amount| Dex::quote_price_exact_tokens_for_tokens(Coin::native(), coin2, amount, false,)\n      ),\n      Some(amount_in)\n    );\n    assert_eq!(\n      Dex::quote_price_exact_tokens_for_tokens(Coin::native(), coin2, amount_in, false,).and_then(\n        |amount| Dex::quote_price_exact_tokens_for_tokens(coin2, Coin::native(), amount, false,)\n      ),\n      Some(amount_in)\n    );\n\n    assert_eq!(\n      Dex::quote_price_tokens_for_exact_tokens(coin2, Coin::native(), amount_in, false,).and_then(\n        |amount| Dex::quote_price_tokens_for_exact_tokens(Coin::native(), coin2, amount, false,)\n      ),\n      Some(amount_in)\n    );\n    assert_eq!(\n      Dex::quote_price_tokens_for_exact_tokens(Coin::native(), coin2, amount_in, false,).and_then(\n        |amount| Dex::quote_price_tokens_for_exact_tokens(coin2, Coin::native(), amount, false,)\n      ),\n      Some(amount_in)\n    );\n  });\n}\n\n#[test]\nfn quote_price_exact_tokens_for_tokens_matches_execution() {\n  new_test_ext().execute_with(|| {\n    let user = system_address(b\"user1\").into();\n    let user2 = system_address(b\"user2\").into();\n    let coin1 = Coin::native();\n    let coin2 = Coin::External(ExternalCoin::Bitcoin);\n\n    assert_ok!(Dex::create_pool(coin2.try_into().unwrap()));\n\n    assert_ok!(CoinsPallet::<Test>::mint(user, Balance { coin: coin1, amount: Amount(100000) }));\n    assert_ok!(CoinsPallet::<Test>::mint(user, Balance { coin: coin2, amount: Amount(1000) }));\n\n    assert_ok!(Dex::add_liquidity(\n      RuntimeOrigin::signed(user),\n      coin2.try_into().unwrap(),\n      200,\n      10000,\n      1,\n      1,\n      user,\n    ));\n\n    let amount = 1;\n    let quoted_price = 49;\n    assert_eq!(\n      Dex::quote_price_exact_tokens_for_tokens(coin2, coin1, amount, true,),\n      Some(quoted_price)\n    );\n\n    assert_ok!(CoinsPallet::<Test>::mint(user2, Balance { coin: coin2, amount: Amount(amount) }));\n    let prior_sri_balance = 0;\n    assert_eq!(prior_sri_balance, balance(user2, coin1));\n    assert_ok!(Dex::swap_exact_tokens_for_tokens(\n      RuntimeOrigin::signed(user2),\n      bvec![coin2, coin1],\n      amount,\n      1,\n      user2,\n    ));\n\n    assert_eq!(prior_sri_balance + quoted_price, balance(user2, coin1));\n  });\n}\n\n#[test]\nfn quote_price_tokens_for_exact_tokens_matches_execution() {\n  new_test_ext().execute_with(|| {\n    let user = system_address(b\"user1\").into();\n    let user2 = system_address(b\"user2\").into();\n    let coin1 = Coin::native();\n    let coin2 = Coin::External(ExternalCoin::Monero);\n\n    assert_ok!(Dex::create_pool(coin2.try_into().unwrap()));\n\n    assert_ok!(CoinsPallet::<Test>::mint(user, Balance { coin: coin1, amount: Amount(100000) }));\n    assert_ok!(CoinsPallet::<Test>::mint(user, Balance { coin: coin2, amount: Amount(1000) }));\n\n    assert_ok!(Dex::add_liquidity(\n      RuntimeOrigin::signed(user),\n      coin2.try_into().unwrap(),\n      200,\n      10000,\n      1,\n      1,\n      user,\n    ));\n\n    let amount = 49;\n    let quoted_price = 1;\n    assert_eq!(\n      Dex::quote_price_tokens_for_exact_tokens(coin2, coin1, amount, true,),\n      Some(quoted_price)\n    );\n\n    assert_ok!(CoinsPallet::<Test>::mint(user2, Balance { coin: coin2, amount: Amount(amount) }));\n    let prior_sri_balance = 0;\n    assert_eq!(prior_sri_balance, balance(user2, coin1));\n    let prior_coin_balance = 49;\n    assert_eq!(prior_coin_balance, balance(user2, coin2));\n    assert_ok!(Dex::swap_tokens_for_exact_tokens(\n      RuntimeOrigin::signed(user2),\n      bvec![coin2, coin1],\n      amount,\n      1,\n      user2,\n    ));\n\n    assert_eq!(prior_sri_balance + amount, balance(user2, coin1));\n    assert_eq!(prior_coin_balance - quoted_price, balance(user2, coin2));\n  });\n}\n\n#[test]\nfn can_swap_with_native() {\n  new_test_ext().execute_with(|| {\n    let user = system_address(b\"user1\").into();\n    let coin1 = Coin::native();\n    let coin2 = Coin::External(ExternalCoin::Ether);\n    let pool_id = Dex::get_pool_id(coin1, coin2).unwrap();\n\n    assert_ok!(Dex::create_pool(coin2.try_into().unwrap()));\n\n    assert_ok!(CoinsPallet::<Test>::mint(user, Balance { coin: coin1, amount: Amount(10000) }));\n    assert_ok!(CoinsPallet::<Test>::mint(user, Balance { coin: coin2, amount: Amount(1000) }));\n\n    let liquidity1 = 10000;\n    let liquidity2 = 200;\n\n    assert_ok!(Dex::add_liquidity(\n      RuntimeOrigin::signed(user),\n      coin2.try_into().unwrap(),\n      liquidity2,\n      liquidity1,\n      1,\n      1,\n      user,\n    ));\n\n    let input_amount = 100;\n    let expect_receive = Dex::get_amount_out(input_amount, liquidity2, liquidity1).ok().unwrap();\n\n    assert_ok!(Dex::swap_exact_tokens_for_tokens(\n      RuntimeOrigin::signed(user),\n      bvec![coin2, coin1],\n      input_amount,\n      1,\n      user,\n    ));\n\n    let pallet_account = Dex::get_pool_account(pool_id);\n    assert_eq!(balance(user, coin1), expect_receive);\n    assert_eq!(balance(user, coin2), 1000 - liquidity2 - input_amount);\n    assert_eq!(balance(pallet_account, coin1), liquidity1 - expect_receive);\n    assert_eq!(balance(pallet_account, coin2), liquidity2 + input_amount);\n  });\n}\n\n#[test]\nfn can_swap_with_realistic_values() {\n  new_test_ext().execute_with(|| {\n    let user = system_address(b\"user1\").into();\n    let sri = Coin::native();\n    let dai = Coin::External(ExternalCoin::Dai);\n    assert_ok!(Dex::create_pool(dai.try_into().unwrap()));\n\n    const UNIT: u64 = 1_000_000_000;\n\n    assert_ok!(CoinsPallet::<Test>::mint(\n      user,\n      Balance { coin: sri, amount: Amount(300_000 * UNIT) }\n    ));\n    assert_ok!(CoinsPallet::<Test>::mint(\n      user,\n      Balance { coin: dai, amount: Amount(1_100_000 * UNIT) }\n    ));\n\n    let liquidity_sri = 200_000 * UNIT; // ratio for a 5$ price\n    let liquidity_dai = 1_000_000 * UNIT;\n    assert_ok!(Dex::add_liquidity(\n      RuntimeOrigin::signed(user),\n      dai.try_into().unwrap(),\n      liquidity_dai,\n      liquidity_sri,\n      1,\n      1,\n      user,\n    ));\n\n    let input_amount = 10 * UNIT; // dai\n\n    assert_ok!(Dex::swap_exact_tokens_for_tokens(\n      RuntimeOrigin::signed(user),\n      bvec![dai, sri],\n      input_amount,\n      1,\n      user,\n    ));\n\n    assert!(events().contains(&Event::<Test>::SwapExecuted {\n      who: user,\n      send_to: user,\n      path: bvec![dai, sri],\n      amount_in: 10 * UNIT,      // usd\n      amount_out: 1_993_980_120, // About 2 dot after div by UNIT.\n    }));\n  });\n}\n\n#[test]\nfn can_not_swap_in_pool_with_no_liquidity_added_yet() {\n  new_test_ext().execute_with(|| {\n    let user = system_address(b\"user1\").into();\n    let coin1 = Coin::native();\n    let coin2 = Coin::External(ExternalCoin::Monero);\n\n    assert_ok!(Dex::create_pool(coin2.try_into().unwrap()));\n\n    // Check can't swap an empty pool\n    assert_noop!(\n      Dex::swap_exact_tokens_for_tokens(\n        RuntimeOrigin::signed(user),\n        bvec![coin2, coin1],\n        10,\n        1,\n        user,\n      ),\n      Error::<Test>::PoolNotFound\n    );\n  });\n}\n\n#[test]\nfn check_no_panic_when_try_swap_close_to_empty_pool() {\n  new_test_ext().execute_with(|| {\n    let user = system_address(b\"user1\").into();\n    let coin1 = Coin::native();\n    let coin2 = Coin::External(ExternalCoin::Bitcoin);\n    let pool_id = Dex::get_pool_id(coin1, coin2).unwrap();\n    let lp_token = coin2;\n\n    assert_ok!(Dex::create_pool(coin2.try_into().unwrap()));\n\n    assert_ok!(CoinsPallet::<Test>::mint(user, Balance { coin: coin1, amount: Amount(10000) }));\n    assert_ok!(CoinsPallet::<Test>::mint(user, Balance { coin: coin2, amount: Amount(1000) }));\n\n    let liquidity1 = 10000;\n    let liquidity2 = 200;\n\n    assert_ok!(Dex::add_liquidity(\n      RuntimeOrigin::signed(user),\n      coin2.try_into().unwrap(),\n      liquidity2,\n      liquidity1,\n      1,\n      1,\n      user,\n    ));\n\n    let lp_token_minted = pool_balance(user, lp_token);\n    assert!(events().contains(&Event::<Test>::LiquidityAdded {\n      who: user,\n      mint_to: user,\n      pool_id,\n      sri_amount: liquidity1,\n      coin_amount: liquidity2,\n      lp_token_minted,\n    }));\n\n    let pallet_account = Dex::get_pool_account(pool_id);\n    assert_eq!(balance(pallet_account, coin1), liquidity1);\n    assert_eq!(balance(pallet_account, coin2), liquidity2);\n\n    assert_ok!(Dex::remove_liquidity(\n      RuntimeOrigin::signed(user),\n      coin2.try_into().unwrap(),\n      lp_token_minted,\n      1,\n      1,\n      user,\n    ));\n\n    // Now, the pool should exist but be almost empty.\n    // Let's try and drain it.\n    assert_eq!(balance(pallet_account, coin1), 708);\n    assert_eq!(balance(pallet_account, coin2), 15);\n\n    // validate the reserve should always stay above the ED\n    // Following test fail again due to the force on ED being > 1.\n    // assert_noop!(\n    // \tDex::swap_tokens_for_exact_tokens(\n    // \t\tRuntimeOrigin::signed(user),\n    // \t\tbvec![coin2, coin1],\n    // \t\t708 - ed + 1, // amount_out\n    // \t\t500,          // amount_in_max\n    // \t\tuser,\n    // \t),\n    // \tError::<Test>::ReserveLeftLessThanMinimum\n    // );\n\n    assert_ok!(Dex::swap_tokens_for_exact_tokens(\n      RuntimeOrigin::signed(user),\n      bvec![coin2, coin1],\n      608, // amount_out\n      500, // amount_in_max\n      user,\n    ));\n\n    let token_1_left = balance(pallet_account, coin1);\n    let token_2_left = balance(pallet_account, coin2);\n    assert_eq!(token_1_left, 708 - 608);\n\n    // The price for the last tokens should be very high\n    assert_eq!(\n      Dex::get_amount_in(token_1_left - 1, token_2_left, token_1_left).ok().unwrap(),\n      10625\n    );\n\n    assert_noop!(\n      Dex::swap_tokens_for_exact_tokens(\n        RuntimeOrigin::signed(user),\n        bvec![coin2, coin1],\n        token_1_left - 1, // amount_out\n        1000,             // amount_in_max\n        user,\n      ),\n      Error::<Test>::ProvidedMaximumNotSufficientForSwap\n    );\n\n    // Try to swap what's left in the pool\n    assert_noop!(\n      Dex::swap_tokens_for_exact_tokens(\n        RuntimeOrigin::signed(user),\n        bvec![coin2, coin1],\n        token_1_left, // amount_out\n        1000,         // amount_in_max\n        user,\n      ),\n      Error::<Test>::AmountOutTooHigh\n    );\n  });\n}\n\n#[test]\nfn swap_should_not_work_if_too_much_slippage() {\n  new_test_ext().execute_with(|| {\n    let user = system_address(b\"user1\").into();\n    let coin1 = Coin::native();\n    let coin2 = Coin::External(ExternalCoin::Ether);\n\n    assert_ok!(Dex::create_pool(coin2.try_into().unwrap()));\n\n    assert_ok!(CoinsPallet::<Test>::mint(user, Balance { coin: coin1, amount: Amount(10000) }));\n    assert_ok!(CoinsPallet::<Test>::mint(user, Balance { coin: coin2, amount: Amount(1000) }));\n\n    let liquidity1 = 10000;\n    let liquidity2 = 200;\n\n    assert_ok!(Dex::add_liquidity(\n      RuntimeOrigin::signed(user),\n      coin2.try_into().unwrap(),\n      liquidity2,\n      liquidity1,\n      1,\n      1,\n      user,\n    ));\n\n    let exchange_amount = 100;\n\n    assert_noop!(\n      Dex::swap_exact_tokens_for_tokens(\n        RuntimeOrigin::signed(user),\n        bvec![coin2, coin1],\n        exchange_amount, // amount_in\n        4000,            // amount_out_min\n        user,\n      ),\n      Error::<Test>::ProvidedMinimumNotSufficientForSwap\n    );\n  });\n}\n\n#[test]\nfn can_swap_tokens_for_exact_tokens() {\n  new_test_ext().execute_with(|| {\n    let user = system_address(b\"user1\").into();\n    let coin1 = Coin::native();\n    let coin2 = Coin::External(ExternalCoin::Dai);\n    let pool_id = Dex::get_pool_id(coin1, coin2).unwrap();\n\n    assert_ok!(Dex::create_pool(coin2.try_into().unwrap()));\n\n    assert_ok!(CoinsPallet::<Test>::mint(user, Balance { coin: coin1, amount: Amount(20000) }));\n    assert_ok!(CoinsPallet::<Test>::mint(user, Balance { coin: coin2, amount: Amount(1000) }));\n\n    let pallet_account = Dex::get_pool_account(pool_id);\n    let before1 = balance(pallet_account, coin1) + balance(user, coin1);\n    let before2 = balance(pallet_account, coin2) + balance(user, coin2);\n\n    let liquidity1 = 10000;\n    let liquidity2 = 200;\n\n    assert_ok!(Dex::add_liquidity(\n      RuntimeOrigin::signed(user),\n      coin2.try_into().unwrap(),\n      liquidity2,\n      liquidity1,\n      1,\n      1,\n      user,\n    ));\n\n    let exchange_out = 50;\n    let expect_in = Dex::get_amount_in(exchange_out, liquidity1, liquidity2).ok().unwrap();\n\n    assert_ok!(Dex::swap_tokens_for_exact_tokens(\n      RuntimeOrigin::signed(user),\n      bvec![coin1, coin2],\n      exchange_out, // amount_out\n      3500,         // amount_in_max\n      user,\n    ));\n\n    assert_eq!(balance(user, coin1), 10000 - expect_in);\n    assert_eq!(balance(user, coin2), 1000 - liquidity2 + exchange_out);\n    assert_eq!(balance(pallet_account, coin1), liquidity1 + expect_in);\n    assert_eq!(balance(pallet_account, coin2), liquidity2 - exchange_out);\n\n    // check invariants:\n\n    // native and coin totals should be preserved.\n    assert_eq!(before1, balance(pallet_account, coin1) + balance(user, coin1));\n    assert_eq!(before2, balance(pallet_account, coin2) + balance(user, coin2));\n  });\n}\n\n#[test]\nfn can_swap_tokens_for_exact_tokens_when_not_liquidity_provider() {\n  new_test_ext().execute_with(|| {\n    let user = system_address(b\"user1\").into();\n    let user2 = system_address(b\"user2\").into();\n    let coin1 = Coin::native();\n    let coin2 = Coin::External(ExternalCoin::Monero);\n    let pool_id = Dex::get_pool_id(coin1, coin2).unwrap();\n    let lp_token = coin2;\n\n    assert_ok!(Dex::create_pool(coin2.try_into().unwrap()));\n\n    let base1 = 10000;\n    let base2 = 1000;\n    assert_ok!(CoinsPallet::<Test>::mint(user, Balance { coin: coin1, amount: Amount(base1) }));\n    assert_ok!(CoinsPallet::<Test>::mint(user2, Balance { coin: coin1, amount: Amount(base1) }));\n    assert_ok!(CoinsPallet::<Test>::mint(user2, Balance { coin: coin2, amount: Amount(base2) }));\n\n    let pallet_account = Dex::get_pool_account(pool_id);\n    let before1 = balance(pallet_account, coin1) + balance(user, coin1) + balance(user2, coin1);\n    let before2 = balance(pallet_account, coin2) + balance(user, coin2) + balance(user2, coin2);\n\n    let liquidity1 = 10000;\n    let liquidity2 = 200;\n\n    assert_ok!(Dex::add_liquidity(\n      RuntimeOrigin::signed(user2),\n      coin2.try_into().unwrap(),\n      liquidity2,\n      liquidity1,\n      1,\n      1,\n      user2,\n    ));\n\n    assert_eq!(balance(user, coin1), base1);\n    assert_eq!(balance(user, coin2), 0);\n\n    let exchange_out = 50;\n    let expect_in = Dex::get_amount_in(exchange_out, liquidity1, liquidity2).ok().unwrap();\n\n    assert_ok!(Dex::swap_tokens_for_exact_tokens(\n      RuntimeOrigin::signed(user),\n      bvec![coin1, coin2],\n      exchange_out, // amount_out\n      3500,         // amount_in_max\n      user,\n    ));\n\n    assert_eq!(balance(user, coin1), base1 - expect_in);\n    assert_eq!(balance(pallet_account, coin1), liquidity1 + expect_in);\n    assert_eq!(balance(user, coin2), exchange_out);\n    assert_eq!(balance(pallet_account, coin2), liquidity2 - exchange_out);\n\n    // check invariants:\n\n    // native and coin totals should be preserved.\n    assert_eq!(\n      before1,\n      balance(pallet_account, coin1) + balance(user, coin1) + balance(user2, coin1)\n    );\n    assert_eq!(\n      before2,\n      balance(pallet_account, coin2) + balance(user, coin2) + balance(user2, coin2)\n    );\n\n    let lp_token_minted = pool_balance(user2, lp_token);\n    assert_eq!(lp_token_minted, 1314);\n\n    assert_ok!(Dex::remove_liquidity(\n      RuntimeOrigin::signed(user2),\n      coin2.try_into().unwrap(),\n      lp_token_minted,\n      0,\n      0,\n      user2,\n    ));\n  });\n}\n\n#[test]\nfn swap_tokens_for_exact_tokens_should_not_work_if_too_much_slippage() {\n  new_test_ext().execute_with(|| {\n    let user = system_address(b\"user1\").into();\n    let coin1 = Coin::native();\n    let coin2 = Coin::External(ExternalCoin::Ether);\n\n    assert_ok!(Dex::create_pool(coin2.try_into().unwrap()));\n\n    assert_ok!(CoinsPallet::<Test>::mint(user, Balance { coin: coin1, amount: Amount(20000) }));\n    assert_ok!(CoinsPallet::<Test>::mint(user, Balance { coin: coin2, amount: Amount(1000) }));\n\n    let liquidity1 = 10000;\n    let liquidity2 = 200;\n\n    assert_ok!(Dex::add_liquidity(\n      RuntimeOrigin::signed(user),\n      coin2.try_into().unwrap(),\n      liquidity2,\n      liquidity1,\n      1,\n      1,\n      user,\n    ));\n\n    let exchange_out = 1;\n\n    assert_noop!(\n      Dex::swap_tokens_for_exact_tokens(\n        RuntimeOrigin::signed(user),\n        bvec![coin1, coin2],\n        exchange_out, // amount_out\n        50,           // amount_in_max just greater than slippage.\n        user,\n      ),\n      Error::<Test>::ProvidedMaximumNotSufficientForSwap\n    );\n  });\n}\n\n#[test]\nfn swap_exact_tokens_for_tokens_in_multi_hops() {\n  new_test_ext().execute_with(|| {\n    let user = system_address(b\"user1\").into();\n    let coin1 = Coin::native();\n    let coin2 = Coin::External(ExternalCoin::Dai);\n    let coin3 = Coin::External(ExternalCoin::Monero);\n\n    assert_ok!(Dex::create_pool(coin2.try_into().unwrap()));\n    assert_ok!(Dex::create_pool(coin3.try_into().unwrap()));\n\n    let base1 = 10000;\n    let base2 = 10000;\n    assert_ok!(CoinsPallet::<Test>::mint(user, Balance { coin: coin1, amount: Amount(base1 * 2) }));\n    assert_ok!(CoinsPallet::<Test>::mint(user, Balance { coin: coin2, amount: Amount(base2) }));\n    assert_ok!(CoinsPallet::<Test>::mint(user, Balance { coin: coin3, amount: Amount(base2) }));\n\n    let liquidity1 = 10000;\n    let liquidity2 = 200;\n    let liquidity3 = 2000;\n\n    assert_ok!(Dex::add_liquidity(\n      RuntimeOrigin::signed(user),\n      coin2.try_into().unwrap(),\n      liquidity2,\n      liquidity1,\n      1,\n      1,\n      user,\n    ));\n    assert_ok!(Dex::add_liquidity(\n      RuntimeOrigin::signed(user),\n      coin3.try_into().unwrap(),\n      liquidity3,\n      liquidity1,\n      1,\n      1,\n      user,\n    ));\n\n    let input_amount = 500;\n    let expect_out2 = Dex::get_amount_out(input_amount, liquidity2, liquidity1).ok().unwrap();\n    let expect_out3 = Dex::get_amount_out(expect_out2, liquidity1, liquidity3).ok().unwrap();\n\n    assert_noop!(\n      Dex::swap_exact_tokens_for_tokens(\n        RuntimeOrigin::signed(user),\n        bvec![coin1],\n        input_amount,\n        80,\n        user,\n      ),\n      Error::<Test>::InvalidPath\n    );\n\n    assert_noop!(\n      Dex::swap_exact_tokens_for_tokens(\n        RuntimeOrigin::signed(user),\n        bvec![coin2, coin1, coin2],\n        input_amount,\n        80,\n        user,\n      ),\n      Error::<Test>::NonUniquePath\n    );\n\n    assert_ok!(Dex::swap_exact_tokens_for_tokens(\n      RuntimeOrigin::signed(user),\n      bvec![coin2, coin1, coin3],\n      input_amount, // amount_in\n      80,           // amount_out_min\n      user,\n    ));\n\n    let pool_id1 = Dex::get_pool_id(coin1, coin2).unwrap();\n    let pool_id2 = Dex::get_pool_id(coin1, coin3).unwrap();\n    let pallet_account1 = Dex::get_pool_account(pool_id1);\n    let pallet_account2 = Dex::get_pool_account(pool_id2);\n\n    assert_eq!(balance(user, coin2), base2 - liquidity2 - input_amount);\n    assert_eq!(balance(pallet_account1, coin2), liquidity2 + input_amount);\n    assert_eq!(balance(pallet_account1, coin1), liquidity1 - expect_out2);\n    assert_eq!(balance(pallet_account2, coin1), liquidity1 + expect_out2);\n    assert_eq!(balance(pallet_account2, coin3), liquidity3 - expect_out3);\n    assert_eq!(balance(user, coin3), 10000 - liquidity3 + expect_out3);\n  });\n}\n\n#[test]\nfn swap_tokens_for_exact_tokens_in_multi_hops() {\n  new_test_ext().execute_with(|| {\n    let user = system_address(b\"user1\").into();\n    let coin1 = Coin::native();\n    let coin2 = Coin::External(ExternalCoin::Bitcoin);\n    let coin3 = Coin::External(ExternalCoin::Ether);\n\n    assert_ok!(Dex::create_pool(coin2.try_into().unwrap()));\n    assert_ok!(Dex::create_pool(coin3.try_into().unwrap()));\n\n    let base1 = 10000;\n    let base2 = 10000;\n    assert_ok!(CoinsPallet::<Test>::mint(user, Balance { coin: coin1, amount: Amount(base1 * 2) }));\n    assert_ok!(CoinsPallet::<Test>::mint(user, Balance { coin: coin2, amount: Amount(base2) }));\n    assert_ok!(CoinsPallet::<Test>::mint(user, Balance { coin: coin3, amount: Amount(base2) }));\n\n    let liquidity1 = 10000;\n    let liquidity2 = 200;\n    let liquidity3 = 2000;\n\n    assert_ok!(Dex::add_liquidity(\n      RuntimeOrigin::signed(user),\n      coin2.try_into().unwrap(),\n      liquidity2,\n      liquidity1,\n      1,\n      1,\n      user,\n    ));\n    assert_ok!(Dex::add_liquidity(\n      RuntimeOrigin::signed(user),\n      coin3.try_into().unwrap(),\n      liquidity3,\n      liquidity1,\n      1,\n      1,\n      user,\n    ));\n\n    let exchange_out3 = 100;\n    let expect_in2 = Dex::get_amount_in(exchange_out3, liquidity1, liquidity3).ok().unwrap();\n    let expect_in1 = Dex::get_amount_in(expect_in2, liquidity2, liquidity1).ok().unwrap();\n\n    assert_ok!(Dex::swap_tokens_for_exact_tokens(\n      RuntimeOrigin::signed(user),\n      bvec![coin2, coin1, coin3],\n      exchange_out3, // amount_out\n      1000,          // amount_in_max\n      user,\n    ));\n\n    let pool_id1 = Dex::get_pool_id(coin1, coin2).unwrap();\n    let pool_id2 = Dex::get_pool_id(coin1, coin3).unwrap();\n    let pallet_account1 = Dex::get_pool_account(pool_id1);\n    let pallet_account2 = Dex::get_pool_account(pool_id2);\n\n    assert_eq!(balance(user, coin2), base2 - liquidity2 - expect_in1);\n    assert_eq!(balance(pallet_account1, coin1), liquidity1 - expect_in2);\n    assert_eq!(balance(pallet_account1, coin2), liquidity2 + expect_in1);\n    assert_eq!(balance(pallet_account2, coin1), liquidity1 + expect_in2);\n    assert_eq!(balance(pallet_account2, coin3), liquidity3 - exchange_out3);\n    assert_eq!(balance(user, coin3), 10000 - liquidity3 + exchange_out3);\n  });\n}\n\n#[test]\nfn can_not_swap_same_coin() {\n  new_test_ext().execute_with(|| {\n    let user = system_address(b\"user1\").into();\n    let coin1 = Coin::External(ExternalCoin::Dai);\n    assert_ok!(CoinsPallet::<Test>::mint(user, Balance { coin: coin1, amount: Amount(1000) }));\n\n    let exchange_amount = 10;\n    assert_noop!(\n      Dex::swap_exact_tokens_for_tokens(\n        RuntimeOrigin::signed(user),\n        bvec![coin1, coin1],\n        exchange_amount,\n        1,\n        user,\n      ),\n      Error::<Test>::PoolNotFound\n    );\n\n    assert_noop!(\n      Dex::swap_exact_tokens_for_tokens(\n        RuntimeOrigin::signed(user),\n        bvec![Coin::native(), Coin::native()],\n        exchange_amount,\n        1,\n        user,\n      ),\n      Error::<Test>::EqualCoins\n    );\n  });\n}\n\n#[test]\nfn validate_pool_id_sorting() {\n  new_test_ext().execute_with(|| {\n    // Serai < Bitcoin < Ether < Dai < Monero.\n    // coin1 <= coin2 for this test to pass.\n    let native = Coin::native();\n    let coin1 = Coin::External(ExternalCoin::Bitcoin);\n    let coin2 = Coin::External(ExternalCoin::Monero);\n    assert_eq!(Dex::get_pool_id(native, coin2).unwrap(), coin2.try_into().unwrap());\n    assert_eq!(Dex::get_pool_id(coin2, native).unwrap(), coin2.try_into().unwrap());\n    assert!(matches!(Dex::get_pool_id(native, native), Err(Error::<Test>::EqualCoins)));\n    assert!(matches!(Dex::get_pool_id(coin2, coin1), Err(Error::<Test>::PoolNotFound)));\n    assert!(coin2 > coin1);\n    assert!(coin1 <= coin1);\n    assert_eq!(coin1, coin1);\n    assert!(native < coin1);\n  });\n}\n\n#[test]\nfn cannot_block_pool_creation() {\n  new_test_ext().execute_with(|| {\n    // User 1 is the pool creator\n    let user = system_address(b\"user1\").into();\n    // User 2 is the attacker\n    let attacker = system_address(b\"attacker\").into();\n\n    assert_ok!(CoinsPallet::<Test>::mint(\n      attacker,\n      Balance { coin: Coin::native(), amount: Amount(10000) }\n    ));\n\n    // The target pool the user wants to create is Native <=> Coin(2)\n    let coin1 = Coin::native();\n    let coin2 = Coin::External(ExternalCoin::Ether);\n\n    // Attacker computes the still non-existing pool account for the target pair\n    let pool_account = Dex::get_pool_account(Dex::get_pool_id(coin2, coin1).unwrap());\n    // And transfers 1 to that pool account\n    assert_ok!(CoinsPallet::<Test>::transfer_internal(\n      attacker,\n      pool_account,\n      Balance { coin: Coin::native(), amount: Amount(1) }\n    ));\n    // Then, the attacker creates 14 tokens and sends one of each to the pool account\n    // skip the coin1 and coin2 coins.\n    for coin in coins().into_iter().filter(|c| (*c != coin1 && *c != coin2)) {\n      assert_ok!(CoinsPallet::<Test>::mint(attacker, Balance { coin, amount: Amount(1000) }));\n      assert_ok!(CoinsPallet::<Test>::transfer_internal(\n        attacker,\n        pool_account,\n        Balance { coin, amount: Amount(1) }\n      ));\n    }\n\n    // User can still create the pool\n    assert_ok!(Dex::create_pool(coin2.try_into().unwrap()));\n\n    // User has to transfer one Coin(2) token to the pool account (otherwise add_liquidity will\n    // fail with `CoinTwoDepositDidNotMeetMinimum`), also transfer native token for the same error.\n    assert_ok!(CoinsPallet::<Test>::mint(user, Balance { coin: coin1, amount: Amount(10000) }));\n    assert_ok!(CoinsPallet::<Test>::mint(user, Balance { coin: coin2, amount: Amount(10000) }));\n    assert_ok!(CoinsPallet::<Test>::transfer_internal(\n      user,\n      pool_account,\n      Balance { coin: coin2, amount: Amount(1) }\n    ));\n    assert_ok!(CoinsPallet::<Test>::transfer_internal(\n      user,\n      pool_account,\n      Balance { coin: coin1, amount: Amount(100) }\n    ));\n\n    // add_liquidity shouldn't fail because of the number of consumers\n    assert_ok!(Dex::add_liquidity(\n      RuntimeOrigin::signed(user),\n      coin2.try_into().unwrap(),\n      100,\n      9900,\n      10,\n      9900,\n      user,\n    ));\n  });\n}\n\n#[test]\nfn test_median_price() {\n  new_test_ext().execute_with(|| {\n    use rand_core::{RngCore, OsRng};\n\n    let mut prices = vec![];\n    for i in 0 .. 100 {\n      // Randomly use an active number\n      if (i != 0) && (OsRng.next_u64() % u64::from(MEDIAN_PRICE_WINDOW_LENGTH / 3) == 0) {\n        let old_index = usize::try_from(\n          OsRng.next_u64() %\n            u64::from(MEDIAN_PRICE_WINDOW_LENGTH) %\n            u64::try_from(prices.len()).unwrap(),\n        )\n        .unwrap();\n        let window_base = prices.len().saturating_sub(MEDIAN_PRICE_WINDOW_LENGTH.into());\n        prices.push(prices[window_base + old_index]);\n      } else {\n        prices.push(OsRng.next_u64());\n      }\n    }\n    let coin = ExternalCoin::Bitcoin;\n\n    assert!(prices.len() >= (2 * usize::from(MEDIAN_PRICE_WINDOW_LENGTH)));\n    for i in 0 .. prices.len() {\n      let price = Amount(prices[i]);\n\n      let n = BlockNumberFor::<Test>::from(u32::try_from(i).unwrap());\n      SpotPriceForBlock::<Test>::set(n, coin, Some(price));\n      Dex::insert_into_median(coin, price);\n      if SpotPricesLength::<Test>::get(coin).unwrap() > MEDIAN_PRICE_WINDOW_LENGTH {\n        let old = n - u64::from(MEDIAN_PRICE_WINDOW_LENGTH);\n        let old_price = SpotPriceForBlock::<Test>::get(old, coin).unwrap();\n        SpotPriceForBlock::<Test>::remove(old, coin);\n        Dex::remove_from_median(coin, old_price);\n      }\n\n      // get the current window (cloning so our sort doesn't affect the original array)\n      let window_base = (i + 1).saturating_sub(MEDIAN_PRICE_WINDOW_LENGTH.into());\n      let mut window = Vec::from(&prices[window_base ..= i]);\n      assert!(window.len() <= MEDIAN_PRICE_WINDOW_LENGTH.into());\n\n      // get the median\n      window.sort();\n      let median_index = window.len() / 2;\n      assert_eq!(Dex::median_price(coin).unwrap(), Amount(window[median_index]));\n    }\n  });\n}\n"
  },
  {
    "path": "substrate/dex/pallet/src/types.rs",
    "content": "// This file was originally:\n\n// Copyright (C) Parity Technologies (UK) Ltd.\n// SPDX-License-Identifier: Apache-2.0\n\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// \thttp://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n// It has been forked into a crate distributed under the AGPL 3.0.\n// Please check the current distribution for up-to-date copyright and licensing information.\n\nuse super::*;\n\n/// Trait for providing methods to swap between the various coin classes.\npub trait Swap<AccountId, Balance, MultiCoinId> {\n  /// Swap exactly `amount_in` of coin `path[0]` for coin `path[1]`.\n  /// If an `amount_out_min` is specified, it will return an error if it is unable to acquire\n  /// the amount desired.\n  ///\n  /// Withdraws the `path[0]` coin from `sender`, deposits the `path[1]` coin to `send_to`,\n  ///\n  /// If successful, returns the amount of `path[1]` acquired for the `amount_in`.\n  fn swap_exact_tokens_for_tokens(\n    sender: AccountId,\n    path: Vec<MultiCoinId>,\n    amount_in: Balance,\n    amount_out_min: Option<Balance>,\n    send_to: AccountId,\n  ) -> Result<Balance, DispatchError>;\n\n  /// Take the `path[0]` coin and swap some amount for `amount_out` of the `path[1]`. If an\n  /// `amount_in_max` is specified, it will return an error if acquiring `amount_out` would be\n  /// too costly.\n  ///\n  /// Withdraws `path[0]` coin from `sender`, deposits `path[1]` coin to `send_to`,\n  ///\n  /// If successful returns the amount of the `path[0]` taken to provide `path[1]`.\n  fn swap_tokens_for_exact_tokens(\n    sender: AccountId,\n    path: Vec<MultiCoinId>,\n    amount_out: Balance,\n    amount_in_max: Option<Balance>,\n    send_to: AccountId,\n  ) -> Result<Balance, DispatchError>;\n}\n"
  },
  {
    "path": "substrate/dex/pallet/src/weights.rs",
    "content": "// This file was originally:\n\n// Copyright (C) Parity Technologies (UK) Ltd.\n// SPDX-License-Identifier: Apache-2.0\n\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// \thttp://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n// It has been forked into a crate distributed under the AGPL 3.0.\n// Please check the current distribution for up-to-date copyright and licensing information.\n\n//! Autogenerated weights for Dex Pallet.\n//!\n//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev\n//! DATE: 2023-07-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`\n//! WORST CASE MAP SIZE: `1000000`\n//! HOSTNAME: `runner-gghbxkbs-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`\n//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some(\"dev\")`, DB CACHE: `1024`\n\n// Executed Command:\n// target/production/substrate\n// benchmark\n// pallet\n// --steps=50\n// --repeat=20\n// --extrinsic=*\n// --wasm-execution=compiled\n// --heap-pages=4096\n// --json-file=/builds/parity/mirrors/substrate/.git/.artifacts/bench.json\n// --pallet=serai_dex_pallet\n// --chain=dev\n// --header=./HEADER-APACHE2\n// --output=./substrate/dex/pallet/src/weights.rs\n// --template=./.maintain/frame-weight-template.hbs\n\n#![cfg_attr(rustfmt, rustfmt_skip)]\n#![allow(unused_parens)]\n#![allow(unused_imports)]\n#![allow(missing_docs)]\n\nuse frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}};\nuse core::marker::PhantomData;\n\n/// Weight functions needed for Dex Pallet.\npub trait WeightInfo {\n\tfn create_pool() -> Weight;\n\tfn add_liquidity() -> Weight;\n\tfn remove_liquidity() -> Weight;\n\tfn swap_exact_tokens_for_tokens() -> Weight;\n\tfn swap_tokens_for_exact_tokens() -> Weight;\n}\n\n/// Weights for Dex Pallet using the Substrate node and recommended hardware.\npub struct SubstrateWeight<T>(PhantomData<T>);\nimpl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {\n\t/// Storage: `DexPallet::Pools` (r:1 w:1)\n\t/// Proof: `DexPallet::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`)\n\t/// Storage: `System::Account` (r:2 w:2)\n\t/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)\n\t/// Storage: `Coins::Account` (r:1 w:1)\n\t/// Proof: `Coins::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`)\n\t/// Storage: `Coins::Coin` (r:1 w:1)\n\t/// Proof: `Coins::Coin` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`)\n\t/// Storage: `DexPallet::NextPoolCoinId` (r:1 w:1)\n\t/// Proof: `DexPallet::NextPoolCoinId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)\n\t/// Storage: `PoolCoins::Coin` (r:1 w:1)\n\t/// Proof: `PoolCoins::Coin` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`)\n\t/// Storage: `PoolCoins::Account` (r:1 w:1)\n\t/// Proof: `PoolCoins::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`)\n\tfn create_pool() -> Weight {\n\t\t// Proof Size summary in bytes:\n\t\t//  Measured:  `729`\n\t\t//  Estimated: `6196`\n\t\t// Minimum execution time: 131_688_000 picoseconds.\n\t\tWeight::from_parts(134_092_000, 6196)\n\t\t\t.saturating_add(T::DbWeight::get().reads(8_u64))\n\t\t\t.saturating_add(T::DbWeight::get().writes(8_u64))\n\t}\n\t/// Storage: `DexPallet::Pools` (r:1 w:0)\n\t/// Proof: `DexPallet::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`)\n\t/// Storage: `System::Account` (r:1 w:1)\n\t/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)\n\t/// Storage: `Coins::Coin` (r:1 w:1)\n\t/// Proof: `Coins::Coin` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`)\n\t/// Storage: `Coins::Account` (r:2 w:2)\n\t/// Proof: `Coins::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`)\n\t/// Storage: `PoolCoins::Coin` (r:1 w:1)\n\t/// Proof: `PoolCoins::Coin` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`)\n\t/// Storage: `PoolCoins::Account` (r:2 w:2)\n\t/// Proof: `PoolCoins::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`)\n\tfn add_liquidity() -> Weight {\n\t\t// Proof Size summary in bytes:\n\t\t//  Measured:  `1382`\n\t\t//  Estimated: `6208`\n\t\t// Minimum execution time: 157_310_000 picoseconds.\n\t\tWeight::from_parts(161_547_000, 6208)\n\t\t\t.saturating_add(T::DbWeight::get().reads(8_u64))\n\t\t\t.saturating_add(T::DbWeight::get().writes(7_u64))\n\t}\n\t/// Storage: `DexPallet::Pools` (r:1 w:0)\n\t/// Proof: `DexPallet::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`)\n\t/// Storage: `System::Account` (r:1 w:1)\n\t/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)\n\t/// Storage: `Coins::Coin` (r:1 w:1)\n\t/// Proof: `Coins::Coin` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`)\n\t/// Storage: `Coins::Account` (r:2 w:2)\n\t/// Proof: `Coins::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`)\n\t/// Storage: `PoolCoins::Coin` (r:1 w:1)\n\t/// Proof: `PoolCoins::Coin` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`)\n\t/// Storage: `PoolCoins::Account` (r:1 w:1)\n\t/// Proof: `PoolCoins::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`)\n\tfn remove_liquidity() -> Weight {\n\t\t// Proof Size summary in bytes:\n\t\t//  Measured:  `1371`\n\t\t//  Estimated: `6208`\n\t\t// Minimum execution time: 142_769_000 picoseconds.\n\t\tWeight::from_parts(145_139_000, 6208)\n\t\t\t.saturating_add(T::DbWeight::get().reads(7_u64))\n\t\t\t.saturating_add(T::DbWeight::get().writes(6_u64))\n\t}\n\t/// Storage: `System::Account` (r:1 w:1)\n\t/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)\n\t/// Storage: `Coins::Coin` (r:3 w:3)\n\t/// Proof: `Coins::Coin` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`)\n\t/// Storage: `Coins::Account` (r:6 w:6)\n\t/// Proof: `Coins::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`)\n\tfn swap_exact_tokens_for_tokens() -> Weight {\n\t\t// Proof Size summary in bytes:\n\t\t//  Measured:  `1738`\n\t\t//  Estimated: `16644`\n\t\t// Minimum execution time: 213_186_000 picoseconds.\n\t\tWeight::from_parts(217_471_000, 16644)\n\t\t\t.saturating_add(T::DbWeight::get().reads(10_u64))\n\t\t\t.saturating_add(T::DbWeight::get().writes(10_u64))\n\t}\n\t/// Storage: `Coins::Coin` (r:3 w:3)\n\t/// Proof: `Coins::Coin` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`)\n\t/// Storage: `Coins::Account` (r:6 w:6)\n\t/// Proof: `Coins::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`)\n\t/// Storage: `System::Account` (r:1 w:1)\n\t/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)\n\tfn swap_tokens_for_exact_tokens() -> Weight {\n\t\t// Proof Size summary in bytes:\n\t\t//  Measured:  `1738`\n\t\t//  Estimated: `16644`\n\t\t// Minimum execution time: 213_793_000 picoseconds.\n\t\tWeight::from_parts(218_584_000, 16644)\n\t\t\t.saturating_add(T::DbWeight::get().reads(10_u64))\n\t\t\t.saturating_add(T::DbWeight::get().writes(10_u64))\n\t}\n}\n\n// For backwards compatibility and tests.\nimpl WeightInfo for () {\n\t/// Storage: `DexPallet::Pools` (r:1 w:1)\n\t/// Proof: `DexPallet::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`)\n\t/// Storage: `System::Account` (r:2 w:2)\n\t/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)\n\t/// Storage: `Coins::Account` (r:1 w:1)\n\t/// Proof: `Coins::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`)\n\t/// Storage: `Coins::Coin` (r:1 w:1)\n\t/// Proof: `Coins::Coin` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`)\n\t/// Storage: `DexPallet::NextPoolCoinId` (r:1 w:1)\n\t/// Proof: `DexPallet::NextPoolCoinId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)\n\t/// Storage: `PoolCoins::Coin` (r:1 w:1)\n\t/// Proof: `PoolCoins::Coin` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`)\n\t/// Storage: `PoolCoins::Account` (r:1 w:1)\n\t/// Proof: `PoolCoins::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`)\n\tfn create_pool() -> Weight {\n\t\t// Proof Size summary in bytes:\n\t\t//  Measured:  `729`\n\t\t//  Estimated: `6196`\n\t\t// Minimum execution time: 131_688_000 picoseconds.\n\t\tWeight::from_parts(134_092_000, 6196)\n\t\t\t.saturating_add(RocksDbWeight::get().reads(8_u64))\n\t\t\t.saturating_add(RocksDbWeight::get().writes(8_u64))\n\t}\n\t/// Storage: `DexPallet::Pools` (r:1 w:0)\n\t/// Proof: `DexPallet::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`)\n\t/// Storage: `System::Account` (r:1 w:1)\n\t/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)\n\t/// Storage: `Coins::Coin` (r:1 w:1)\n\t/// Proof: `Coins::Coin` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`)\n\t/// Storage: `Coins::Account` (r:2 w:2)\n\t/// Proof: `Coins::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`)\n\t/// Storage: `PoolCoins::Coin` (r:1 w:1)\n\t/// Proof: `PoolCoins::Coin` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`)\n\t/// Storage: `PoolCoins::Account` (r:2 w:2)\n\t/// Proof: `PoolCoins::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`)\n\tfn add_liquidity() -> Weight {\n\t\t// Proof Size summary in bytes:\n\t\t//  Measured:  `1382`\n\t\t//  Estimated: `6208`\n\t\t// Minimum execution time: 157_310_000 picoseconds.\n\t\tWeight::from_parts(161_547_000, 6208)\n\t\t\t.saturating_add(RocksDbWeight::get().reads(8_u64))\n\t\t\t.saturating_add(RocksDbWeight::get().writes(7_u64))\n\t}\n\t/// Storage: `DexPallet::Pools` (r:1 w:0)\n\t/// Proof: `DexPallet::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`)\n\t/// Storage: `System::Account` (r:1 w:1)\n\t/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)\n\t/// Storage: `Coins::Coin` (r:1 w:1)\n\t/// Proof: `Coins::Coin` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`)\n\t/// Storage: `Coins::Account` (r:2 w:2)\n\t/// Proof: `Coins::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`)\n\t/// Storage: `PoolCoins::Coin` (r:1 w:1)\n\t/// Proof: `PoolCoins::Coin` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`)\n\t/// Storage: `PoolCoins::Account` (r:1 w:1)\n\t/// Proof: `PoolCoins::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`)\n\tfn remove_liquidity() -> Weight {\n\t\t// Proof Size summary in bytes:\n\t\t//  Measured:  `1371`\n\t\t//  Estimated: `6208`\n\t\t// Minimum execution time: 142_769_000 picoseconds.\n\t\tWeight::from_parts(145_139_000, 6208)\n\t\t\t.saturating_add(RocksDbWeight::get().reads(7_u64))\n\t\t\t.saturating_add(RocksDbWeight::get().writes(6_u64))\n\t}\n\t/// Storage: `System::Account` (r:1 w:1)\n\t/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)\n\t/// Storage: `Coins::Coin` (r:3 w:3)\n\t/// Proof: `Coins::Coin` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`)\n\t/// Storage: `Coins::Account` (r:6 w:6)\n\t/// Proof: `Coins::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`)\n\tfn swap_exact_tokens_for_tokens() -> Weight {\n\t\t// Proof Size summary in bytes:\n\t\t//  Measured:  `1738`\n\t\t//  Estimated: `16644`\n\t\t// Minimum execution time: 213_186_000 picoseconds.\n\t\tWeight::from_parts(217_471_000, 16644)\n\t\t\t.saturating_add(RocksDbWeight::get().reads(10_u64))\n\t\t\t.saturating_add(RocksDbWeight::get().writes(10_u64))\n\t}\n\t/// Storage: `Coins::Coin` (r:3 w:3)\n\t/// Proof: `Coins::Coin` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`)\n\t/// Storage: `Coins::Account` (r:6 w:6)\n\t/// Proof: `Coins::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`)\n\t/// Storage: `System::Account` (r:1 w:1)\n\t/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)\n\tfn swap_tokens_for_exact_tokens() -> Weight {\n\t\t// Proof Size summary in bytes:\n\t\t//  Measured:  `1738`\n\t\t//  Estimated: `16644`\n\t\t// Minimum execution time: 213_793_000 picoseconds.\n\t\tWeight::from_parts(218_584_000, 16644)\n\t\t\t.saturating_add(RocksDbWeight::get().reads(10_u64))\n\t\t\t.saturating_add(RocksDbWeight::get().writes(10_u64))\n\t}\n}\n"
  },
  {
    "path": "substrate/economic-security/pallet/Cargo.toml",
    "content": "[package]\nname = \"serai-economic-security-pallet\"\nversion = \"0.1.0\"\ndescription = \"Economic Security pallet for Serai\"\nlicense = \"AGPL-3.0-only\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/substrate/economic-security/pallet\"\nauthors = [\"Akil Demir <akildemir72@gmail.com>\"]\nedition = \"2021\"\nrust-version = \"1.77\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[package.metadata.cargo-machete]\nignored = [\"scale\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nscale = { package = \"parity-scale-codec\", version = \"3\", default-features = false, features = [\"derive\"] }\n\nframe-system = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nframe-support = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\n\ndex-pallet = { package = \"serai-dex-pallet\", path = \"../../dex/pallet\", default-features = false }\ncoins-pallet = { package = \"serai-coins-pallet\", path = \"../../coins/pallet\", default-features = false }\n\nserai-primitives = { path = \"../../primitives\", default-features = false }\n\n[features]\nstd = [\n  \"scale/std\",\n\n  \"frame-system/std\",\n  \"frame-support/std\",\n\n  \"dex-pallet/std\",\n  \"coins-pallet/std\",\n\n  \"serai-primitives/std\",\n]\ntry-runtime = [] # TODO\n\ndefault = [\"std\"]\n"
  },
  {
    "path": "substrate/economic-security/pallet/LICENSE",
    "content": "AGPL-3.0-only license\n\nCopyright (c) 2024 Luke Parker\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License Version 3 as\npublished by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n"
  },
  {
    "path": "substrate/economic-security/pallet/src/lib.rs",
    "content": "#![cfg_attr(not(feature = \"std\"), no_std)]\n\n#[allow(\n  unreachable_patterns,\n  clippy::cast_possible_truncation,\n  clippy::no_effect_underscore_binding,\n  clippy::empty_docs\n)]\n#[frame_support::pallet]\npub mod pallet {\n  use frame_system::pallet_prelude::*;\n  use frame_support::pallet_prelude::*;\n\n  use dex_pallet::{Config as DexConfig, Pallet as Dex};\n  use coins_pallet::{Config as CoinsConfig, AllowMint};\n\n  use serai_primitives::*;\n\n  #[pallet::config]\n  pub trait Config: frame_system::Config + CoinsConfig + DexConfig {}\n\n  #[pallet::event]\n  #[pallet::generate_deposit(fn deposit_event)]\n  pub enum Event<T: Config> {\n    EconomicSecurityReached { network: ExternalNetworkId },\n  }\n\n  #[pallet::pallet]\n  pub struct Pallet<T>(PhantomData<T>);\n\n  #[pallet::storage]\n  #[pallet::getter(fn economic_security_block)]\n  pub(crate) type EconomicSecurityBlock<T: Config> =\n    StorageMap<_, Identity, ExternalNetworkId, BlockNumberFor<T>, OptionQuery>;\n\n  #[pallet::hooks]\n  impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {\n    fn on_initialize(n: BlockNumberFor<T>) -> Weight {\n      // we accept we reached economic security once we can mint smallest amount of a network's coin\n      for coin in EXTERNAL_COINS {\n        let existing = EconomicSecurityBlock::<T>::get(coin.network());\n        // TODO: we don't need to check for oracle value if is_allowed returns false when there is\n        // no coin value\n        if existing.is_none() &&\n          Dex::<T>::security_oracle_value(coin).is_some() &&\n          <T as CoinsConfig>::AllowMint::is_allowed(&ExternalBalance { coin, amount: Amount(1) })\n        {\n          EconomicSecurityBlock::<T>::set(coin.network(), Some(n));\n          Self::deposit_event(Event::EconomicSecurityReached { network: coin.network() });\n        }\n      }\n\n      Weight::zero() // TODO\n    }\n  }\n}\n\npub use pallet::*;\n"
  },
  {
    "path": "substrate/emissions/pallet/Cargo.toml",
    "content": "[package]\nname = \"serai-emissions-pallet\"\nversion = \"0.1.0\"\ndescription = \"Emissions pallet for Serai\"\nlicense = \"AGPL-3.0-only\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/substrate/emissions/pallet\"\nauthors = [\"Akil Demir <akildemir72@gmail.com>\"]\nedition = \"2021\"\nrust-version = \"1.77\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[package.metadata.cargo-machete]\nignored = [\"scale\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nscale = { package = \"parity-scale-codec\", version = \"3\", default-features = false, features = [\"derive\"] }\n\nframe-system = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nframe-support = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\n\nsp-std = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nsp-runtime = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\n\ncoins-pallet = { package = \"serai-coins-pallet\", path = \"../../coins/pallet\", default-features = false }\nvalidator-sets-pallet = { package = \"serai-validator-sets-pallet\", path = \"../../validator-sets/pallet\", default-features = false }\ndex-pallet = { package = \"serai-dex-pallet\", path = \"../../dex/pallet\", default-features = false }\ngenesis-liquidity-pallet = { package = \"serai-genesis-liquidity-pallet\", path = \"../../genesis-liquidity/pallet\", default-features = false }\n\neconomic-security-pallet = { package = \"serai-economic-security-pallet\", path = \"../../economic-security/pallet\", default-features = false }\n\nserai-primitives = { path = \"../../primitives\", default-features = false }\nvalidator-sets-primitives = { package = \"serai-validator-sets-primitives\", path = \"../../validator-sets/primitives\", default-features = false }\nemissions-primitives = { package = \"serai-emissions-primitives\", path = \"../primitives\", default-features = false }\n\n[features]\nstd = [\n  \"scale/std\",\n\n  \"frame-system/std\",\n  \"frame-support/std\",\n\n  \"sp-std/std\",\n  \"sp-runtime/std\",\n\n  \"coins-pallet/std\",\n  \"validator-sets-pallet/std\",\n  \"dex-pallet/std\",\n  \"genesis-liquidity-pallet/std\",\n\n  \"economic-security-pallet/std\",\n\n  \"serai-primitives/std\",\n  \"emissions-primitives/std\",\n]\nfast-epoch = []\ntry-runtime = [] # TODO\ndefault = [\"std\"]\n"
  },
  {
    "path": "substrate/emissions/pallet/LICENSE",
    "content": "AGPL-3.0-only license\n\nCopyright (c) 2024 Luke Parker\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License Version 3 as\npublished by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n"
  },
  {
    "path": "substrate/emissions/pallet/src/lib.rs",
    "content": "#![cfg_attr(not(feature = \"std\"), no_std)]\n\n#[allow(\n  unreachable_patterns,\n  clippy::cast_possible_truncation,\n  clippy::no_effect_underscore_binding,\n  clippy::empty_docs\n)]\n#[frame_support::pallet]\npub mod pallet {\n  use super::*;\n  use frame_system::{pallet_prelude::*, RawOrigin};\n  use frame_support::{pallet_prelude::*, sp_runtime::SaturatedConversion};\n\n  use sp_std::{vec, vec::Vec, ops::Mul, collections::btree_map::BTreeMap};\n\n  use coins_pallet::{Config as CoinsConfig, Pallet as Coins};\n  use dex_pallet::{Config as DexConfig, Pallet as Dex};\n\n  use validator_sets_pallet::{Pallet as ValidatorSets, Config as ValidatorSetsConfig};\n  use genesis_liquidity_pallet::{Pallet as GenesisLiquidity, Config as GenesisLiquidityConfig};\n\n  use economic_security_pallet::{Config as EconomicSecurityConfig, Pallet as EconomicSecurity};\n\n  use serai_primitives::*;\n  use validator_sets_primitives::{MAX_KEY_SHARES_PER_SET, Session};\n  pub use emissions_primitives as primitives;\n  use primitives::*;\n\n  #[pallet::config]\n  pub trait Config:\n    frame_system::Config<AccountId = PublicKey>\n    + ValidatorSetsConfig\n    + CoinsConfig\n    + DexConfig\n    + GenesisLiquidityConfig\n    + EconomicSecurityConfig\n  {\n  }\n\n  #[pallet::genesis_config]\n  #[derive(Clone, Debug)]\n  pub struct GenesisConfig<T: Config> {\n    /// Networks to spawn Serai with.\n    pub networks: Vec<(NetworkId, Amount)>,\n    /// List of participants to place in the initial validator sets.\n    pub participants: Vec<T::AccountId>,\n  }\n\n  impl<T: Config> Default for GenesisConfig<T> {\n    fn default() -> Self {\n      GenesisConfig { networks: Default::default(), participants: Default::default() }\n    }\n  }\n\n  #[pallet::error]\n  pub enum Error<T> {\n    NetworkHasEconomicSecurity,\n    NoValueForCoin,\n    InsufficientAllocation,\n  }\n\n  #[pallet::event]\n  pub enum Event<T: Config> {}\n\n  #[pallet::pallet]\n  pub struct Pallet<T>(PhantomData<T>);\n\n  // TODO: Remove this. This should be the sole domain of validator-sets\n  #[pallet::storage]\n  #[pallet::getter(fn participants)]\n  pub(crate) type Participants<T: Config> = StorageMap<\n    _,\n    Identity,\n    NetworkId,\n    BoundedVec<(PublicKey, u64), ConstU32<{ MAX_KEY_SHARES_PER_SET }>>,\n    OptionQuery,\n  >;\n\n  // TODO: Remove this too\n  #[pallet::storage]\n  #[pallet::getter(fn session)]\n  pub type CurrentSession<T: Config> = StorageMap<_, Identity, NetworkId, u32, ValueQuery>;\n\n  #[pallet::storage]\n  pub(crate) type LastSwapVolume<T: Config> =\n    StorageMap<_, Identity, ExternalCoin, u64, OptionQuery>;\n\n  #[pallet::genesis_build]\n  impl<T: Config> BuildGenesisConfig for GenesisConfig<T> {\n    fn build(&self) {\n      for (id, stake) in self.networks.clone() {\n        let mut participants = vec![];\n        for p in self.participants.clone() {\n          participants.push((p, stake.0));\n        }\n        Participants::<T>::set(id, Some(participants.try_into().unwrap()));\n        CurrentSession::<T>::set(id, 0);\n      }\n    }\n  }\n\n  #[pallet::hooks]\n  impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {\n    fn on_initialize(n: BlockNumberFor<T>) -> Weight {\n      let genesis_ended = GenesisLiquidity::<T>::genesis_complete_block().is_some();\n\n      // check if we got a new session\n      let mut session_changed = false;\n      let session = ValidatorSets::<T>::session(NetworkId::Serai).unwrap_or(Session(0));\n      if session.0 > Self::session(NetworkId::Serai) {\n        session_changed = true;\n        CurrentSession::<T>::set(NetworkId::Serai, session.0);\n      }\n\n      // update participants per session before the genesis\n      // after the genesis, we update them after reward distribution.\n      if (!genesis_ended) && session_changed {\n        Self::update_participants();\n      }\n\n      // We only want to distribute emissions if the genesis period is over AND the session has\n      // ended\n      if !(genesis_ended && session_changed) {\n        return Weight::zero(); // TODO\n      }\n\n      // figure out the amount of blocks in the last session\n      // Since the session has changed, we're now at least at session 1\n      let block_count = ValidatorSets::<T>::session_begin_block(NetworkId::Serai, session) -\n        ValidatorSets::<T>::session_begin_block(NetworkId::Serai, Session(session.0 - 1));\n\n      // get total reward for this epoch\n      let pre_ec_security = Self::pre_ec_security();\n      let mut distances = BTreeMap::new();\n      let mut total_distance: u64 = 0;\n      let reward_this_epoch = if pre_ec_security {\n        // calculate distance to economic security per network\n        for n in EXTERNAL_NETWORKS {\n          let required = ValidatorSets::<T>::required_stake_for_network(n);\n          let mut current =\n            ValidatorSets::<T>::total_allocated_stake(NetworkId::from(n)).unwrap_or(Amount(0)).0;\n          if current > required {\n            current = required;\n          }\n\n          let distance = required - current;\n          distances.insert(NetworkId::from(n), distance);\n          total_distance = total_distance.saturating_add(distance);\n        }\n\n        // add serai network portion (20%)\n        let new_total_distance =\n          total_distance.saturating_mul(100) / (100 - SERAI_VALIDATORS_DESIRED_PERCENTAGE);\n        distances.insert(NetworkId::Serai, new_total_distance - total_distance);\n        total_distance = new_total_distance;\n\n        if Self::initial_period(n) {\n          // rewards are fixed for initial period\n          block_count * INITIAL_REWARD_PER_BLOCK\n        } else {\n          // rewards for pre-economic security is\n          // (STAKE_REQUIRED - CURRENT_STAKE) / blocks_until(SECURE_BY).\n          let block_reward = total_distance / Self::blocks_until(SECURE_BY);\n          block_count * block_reward\n        }\n      } else {\n        // post ec security\n        block_count * REWARD_PER_BLOCK\n      };\n\n      // map epoch ec-security-distance/volume to rewards\n      let (rewards_per_network, volume_per_network, volume_per_coin) = if pre_ec_security {\n        (\n          distances\n            .into_iter()\n            .map(|(n, distance)| {\n              // calculate how much each network gets based on distance to ec-security\n              let reward = u64::try_from(\n                u128::from(reward_this_epoch).saturating_mul(u128::from(distance)) /\n                  u128::from(total_distance),\n              )\n              .unwrap();\n              (n, reward)\n            })\n            .collect::<BTreeMap<NetworkId, u64>>(),\n          None,\n          None,\n        )\n      } else {\n        // get swap volumes\n        let mut volume_per_coin: BTreeMap<ExternalCoin, u64> = BTreeMap::new();\n        for c in EXTERNAL_COINS {\n          let current_volume = Dex::<T>::swap_volume(c).unwrap_or(0);\n          let last_volume = LastSwapVolume::<T>::get(c).unwrap_or(0);\n          let vol_this_epoch = current_volume.saturating_sub(last_volume);\n\n          // update the current volume\n          LastSwapVolume::<T>::set(c, Some(current_volume));\n          volume_per_coin.insert(c, vol_this_epoch);\n        }\n\n        // aggregate per network\n        let mut total_volume = 0u64;\n        let mut volume_per_network: BTreeMap<NetworkId, u64> = BTreeMap::new();\n        for (c, vol) in &volume_per_coin {\n          volume_per_network.insert(\n            c.network().into(),\n            (*volume_per_network.get(&c.network().into()).unwrap_or(&0)).saturating_add(*vol),\n          );\n          total_volume = total_volume.saturating_add(*vol);\n        }\n        // we add the serai network now\n        volume_per_network.insert(NetworkId::Serai, 0);\n\n        (\n          volume_per_network\n            .iter()\n            .map(|(n, vol)| {\n              // 20% of the reward goes to the Serai network and rest is distributed among others\n              // based on swap-volume.\n              let reward = if *n == NetworkId::Serai {\n                reward_this_epoch / 5\n              } else {\n                let reward = reward_this_epoch - (reward_this_epoch / 5);\n                // TODO: It is highly unlikely but what to do in case of 0 total volume?\n                if total_volume != 0 {\n                  u64::try_from(\n                    u128::from(reward).saturating_mul(u128::from(*vol)) / u128::from(total_volume),\n                  )\n                  .unwrap()\n                } else {\n                  0\n                }\n              };\n              (*n, reward)\n            })\n            .collect::<BTreeMap<NetworkId, u64>>(),\n          Some(volume_per_network),\n          Some(volume_per_coin),\n        )\n      };\n\n      // distribute the rewards within the network\n      for (n, reward) in rewards_per_network {\n        let validators_reward = if let NetworkId::External(external_network) = n {\n          // calculate pool vs validator share\n          let capacity =\n            ValidatorSets::<T>::total_allocated_stake(NetworkId::from(external_network))\n              .unwrap_or(Amount(0))\n              .0;\n          let required = ValidatorSets::<T>::required_stake_for_network(external_network);\n          let unused_capacity = capacity.saturating_sub(required);\n\n          let distribution = unused_capacity.saturating_mul(ACCURACY_MULTIPLIER) / capacity;\n          let total = DESIRED_DISTRIBUTION.saturating_add(distribution);\n\n          let validators_reward = DESIRED_DISTRIBUTION.saturating_mul(reward) / total;\n          let network_pool_reward = reward.saturating_sub(validators_reward);\n\n          // send the rest to the pool\n          if network_pool_reward != 0 {\n            // these should be available to unwrap if we have a network_pool_reward. Because that\n            // means we had an unused capacity hence in a post-ec era.\n            let vpn = volume_per_network.as_ref().unwrap();\n            let vpc = volume_per_coin.as_ref().unwrap();\n            for c in external_network.coins() {\n              let pool_reward = u64::try_from(\n                u128::from(network_pool_reward).saturating_mul(u128::from(vpc[&c])) /\n                  u128::from(vpn[&n]),\n              )\n              .unwrap();\n\n              if Coins::<T>::mint(\n                Dex::<T>::get_pool_account(c),\n                Balance { coin: Coin::Serai, amount: Amount(pool_reward) },\n              )\n              .is_err()\n              {\n                // TODO: log the failure\n                continue;\n              }\n            }\n          }\n\n          validators_reward\n        } else {\n          reward\n        };\n\n        // distribute validators rewards\n        Self::distribute_to_validators(n, validators_reward);\n      }\n\n      // TODO: we have the past session participants here in the emissions pallet so that we can\n      // distribute rewards to them in the next session. Ideally we should be able to fetch this\n      // information from validator sets pallet.\n      Self::update_participants();\n      Weight::zero() // TODO\n    }\n  }\n\n  impl<T: Config> Pallet<T> {\n    fn blocks_until(block: u64) -> u64 {\n      let current = <frame_system::Pallet<T>>::block_number().saturated_into::<u64>();\n      block.saturating_sub(current)\n    }\n\n    fn initial_period(n: BlockNumberFor<T>) -> bool {\n      #[cfg(feature = \"fast-epoch\")]\n      let initial_period_duration = FAST_EPOCH_INITIAL_PERIOD;\n\n      #[cfg(not(feature = \"fast-epoch\"))]\n      let initial_period_duration = 2 * MONTHS;\n\n      let genesis_complete_block = GenesisLiquidity::<T>::genesis_complete_block();\n      genesis_complete_block.is_some() &&\n        (n.saturated_into::<u64>() < (genesis_complete_block.unwrap() + initial_period_duration))\n    }\n\n    /// Returns true if any of the external networks haven't reached economic security yet.\n    fn pre_ec_security() -> bool {\n      for n in EXTERNAL_NETWORKS {\n        if EconomicSecurity::<T>::economic_security_block(n).is_none() {\n          return true;\n        }\n      }\n      false\n    }\n\n    // Distribute the reward among network's set based on\n    // -> (key shares * stake per share) + ((stake % stake per share) / 2)\n    fn distribute_to_validators(n: NetworkId, reward: u64) {\n      let stake_per_share = ValidatorSets::<T>::allocation_per_key_share(n).unwrap().0;\n      let mut scores = vec![];\n      let mut total_score = 0u64;\n      for (p, amount) in Self::participants(n).unwrap() {\n        let remainder = amount % stake_per_share;\n        let score = amount - (remainder / 2);\n\n        total_score = total_score.saturating_add(score);\n        scores.push((p, score));\n      }\n\n      // stake the rewards\n      for (p, score) in scores {\n        let p_reward = u64::try_from(\n          u128::from(reward).saturating_mul(u128::from(score)) / u128::from(total_score),\n        )\n        .unwrap();\n\n        Coins::<T>::mint(p, Balance { coin: Coin::Serai, amount: Amount(p_reward) }).unwrap();\n        if ValidatorSets::<T>::distribute_block_rewards(n, p, Amount(p_reward)).is_err() {\n          // TODO: log the failure\n          continue;\n        }\n      }\n    }\n\n    pub fn swap_to_staked_sri(\n      to: PublicKey,\n      network: NetworkId,\n      balance: ExternalBalance,\n    ) -> DispatchResult {\n      // check the network didn't reach the economic security yet\n      if let NetworkId::External(n) = network {\n        if EconomicSecurity::<T>::economic_security_block(n).is_some() {\n          Err(Error::<T>::NetworkHasEconomicSecurity)?;\n        }\n      } else {\n        // we target 20% of the network's stake to be behind the Serai network\n        let mut total_stake = 0;\n        for n in NETWORKS {\n          total_stake += ValidatorSets::<T>::total_allocated_stake(n).unwrap_or(Amount(0)).0;\n        }\n\n        let stake = ValidatorSets::<T>::total_allocated_stake(network).unwrap_or(Amount(0)).0;\n        let desired_stake = total_stake / (100 / SERAI_VALIDATORS_DESIRED_PERCENTAGE);\n        if stake >= desired_stake {\n          Err(Error::<T>::NetworkHasEconomicSecurity)?;\n        }\n      }\n\n      // swap half of the liquidity for SRI to form PoL.\n      let half = balance.amount.0 / 2;\n      let path = BoundedVec::try_from(vec![balance.coin.into(), Coin::Serai]).unwrap();\n      let origin = RawOrigin::Signed(POL_ACCOUNT.into());\n      Dex::<T>::swap_exact_tokens_for_tokens(\n        origin.clone().into(),\n        path,\n        half,\n        1, // minimum out, so we accept whatever we get.\n        POL_ACCOUNT.into(),\n      )?;\n\n      // get how much we got for our swap\n      let sri_amount = Coins::<T>::balance(POL_ACCOUNT.into(), Coin::Serai).0;\n\n      // add liquidity\n      Dex::<T>::add_liquidity(\n        origin.clone().into(),\n        balance.coin,\n        half,\n        sri_amount,\n        1,\n        1,\n        POL_ACCOUNT.into(),\n      )?;\n\n      // use last block spot price to calculate how much SRI the balance makes.\n      let last_block = <frame_system::Pallet<T>>::block_number() - 1u32.into();\n      let value = Dex::<T>::spot_price_for_block(last_block, balance.coin)\n        .ok_or(Error::<T>::NoValueForCoin)?;\n      // TODO: may panic? It might be best for this math ops to return the result as is instead of\n      // doing an unwrap so that it can be properly dealt with.\n      let sri_amount = balance.amount.mul(value);\n\n      // Mint\n      Coins::<T>::mint(to, Balance { coin: Coin::Serai, amount: sri_amount })?;\n\n      // Stake the SRI for the network.\n      ValidatorSets::<T>::allocate(\n        frame_system::RawOrigin::Signed(to).into(),\n        network,\n        sri_amount,\n      )?;\n      Ok(())\n    }\n\n    fn update_participants() {\n      for n in NETWORKS {\n        let participants = ValidatorSets::<T>::participants_for_latest_decided_set(n)\n          .unwrap()\n          .into_iter()\n          .map(|(key, _)| (key, ValidatorSets::<T>::allocation((n, key)).unwrap_or(Amount(0)).0))\n          .collect::<Vec<_>>();\n\n        Participants::<T>::set(n, Some(participants.try_into().unwrap()));\n      }\n    }\n  }\n}\n\npub use pallet::*;\n"
  },
  {
    "path": "substrate/emissions/primitives/Cargo.toml",
    "content": "[package]\nname = \"serai-emissions-primitives\"\nversion = \"0.1.0\"\ndescription = \"Serai emissions primitives\"\nlicense = \"MIT\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/substrate/emissions/primitives\"\nauthors = [\"Akil Demir <akildemir72@gmail.com>\"]\nedition = \"2021\"\nrust-version = \"1.77\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nserai-primitives = { path = \"../../primitives\", default-features = false }\n\n[features]\nstd = [\"serai-primitives/std\"]\ndefault = [\"std\"]\n"
  },
  {
    "path": "substrate/emissions/primitives/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2024 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "substrate/emissions/primitives/src/lib.rs",
    "content": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![cfg_attr(docsrs, feature(doc_cfg))]\n#![cfg_attr(not(feature = \"std\"), no_std)]\n\nuse serai_primitives::{DAYS, YEARS, SeraiAddress, system_address};\n\n// Protocol owned liquidity account.\npub const POL_ACCOUNT: SeraiAddress = system_address(b\"Serai-protocol_owned_liquidity\");\n\n/// INITIAL_REWARD = 100,000 SRI / BLOCKS_PER_DAY for 60 days\npub const INITIAL_REWARD_PER_BLOCK: u64 = (100_000 * 10u64.pow(8)) / DAYS;\n\n/// REWARD = 20M SRI / BLOCKS_PER_YEAR\npub const REWARD_PER_BLOCK: u64 = (20_000_000 * 10u64.pow(8)) / YEARS;\n\n/// 20% of all stake desired to be for Serai network\npub const SERAI_VALIDATORS_DESIRED_PERCENTAGE: u64 = 20;\n\n/// Desired unused capacity ratio for a network assuming capacity is 10,000.\npub const DESIRED_DISTRIBUTION: u64 = 1_000;\n\n/// Percentage scale for the validator vs. pool reward distribution.\npub const ACCURACY_MULTIPLIER: u64 = 10_000;\n\n/// The block to target for economic security\npub const SECURE_BY: u64 = YEARS;\n"
  },
  {
    "path": "substrate/genesis-liquidity/pallet/Cargo.toml",
    "content": "[package]\nname = \"serai-genesis-liquidity-pallet\"\nversion = \"0.1.0\"\ndescription = \"Genesis liquidity pallet for Serai\"\nlicense = \"AGPL-3.0-only\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/substrate/genesis-liquidity/pallet\"\nauthors = [\"Akil Demir <akildemir72@gmail.com>\"]\nedition = \"2021\"\nrust-version = \"1.77\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[package.metadata.cargo-machete]\nignored = [\"scale\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nscale = { package = \"parity-scale-codec\", version = \"3\", default-features = false, features = [\"derive\"] }\n\nframe-system = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nframe-support = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\n\nsp-std = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nsp-core = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nsp-application-crypto = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\n\ndex-pallet = { package = \"serai-dex-pallet\", path = \"../../dex/pallet\", default-features = false }\ncoins-pallet = { package = \"serai-coins-pallet\", path = \"../../coins/pallet\", default-features = false }\nvalidator-sets-pallet = { package = \"serai-validator-sets-pallet\", path = \"../../validator-sets/pallet\", default-features = false }\n\neconomic-security-pallet = { package = \"serai-economic-security-pallet\", path = \"../../economic-security/pallet\", default-features = false }\n\nserai-primitives = { path = \"../../primitives\", default-features = false }\ngenesis-liquidity-primitives = { package = \"serai-genesis-liquidity-primitives\", path = \"../primitives\", default-features = false }\nvalidator-sets-primitives = { package = \"serai-validator-sets-primitives\", path = \"../../validator-sets/primitives\", default-features = false }\n\n[features]\nstd = [\n  \"scale/std\",\n\n  \"frame-system/std\",\n  \"frame-support/std\",\n\n  \"sp-std/std\",\n  \"sp-core/std\",\n  \"sp-application-crypto/std\",\n\n  \"coins-pallet/std\",\n  \"dex-pallet/std\",\n  \"validator-sets-pallet/std\",\n\n  \"economic-security-pallet/std\",\n\n  \"serai-primitives/std\",\n  \"genesis-liquidity-primitives/std\",\n  \"validator-sets-primitives/std\",\n]\ntry-runtime = [] # TODO\nfast-epoch = []\n\ndefault = [\"std\"]\n"
  },
  {
    "path": "substrate/genesis-liquidity/pallet/LICENSE",
    "content": "AGPL-3.0-only license\n\nCopyright (c) 2024 Luke Parker\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License Version 3 as\npublished by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n"
  },
  {
    "path": "substrate/genesis-liquidity/pallet/src/lib.rs",
    "content": "#![cfg_attr(not(feature = \"std\"), no_std)]\n\n#[allow(\n  unreachable_patterns,\n  clippy::cast_possible_truncation,\n  clippy::no_effect_underscore_binding,\n  clippy::empty_docs\n)]\n#[frame_support::pallet]\npub mod pallet {\n  use super::*;\n  use frame_system::{pallet_prelude::*, RawOrigin};\n  use frame_support::{pallet_prelude::*, sp_runtime::SaturatedConversion};\n\n  use sp_std::{vec, vec::Vec};\n  use sp_core::sr25519::Signature;\n  use sp_application_crypto::RuntimePublic;\n\n  use dex_pallet::{Pallet as Dex, Config as DexConfig};\n  use coins_pallet::{Config as CoinsConfig, Pallet as Coins};\n  use validator_sets_pallet::{Config as VsConfig, Pallet as ValidatorSets};\n\n  use economic_security_pallet::{Config as EconomicSecurityConfig, Pallet as EconomicSecurity};\n\n  use serai_primitives::*;\n  use validator_sets_primitives::{ValidatorSet, musig_key};\n  pub use genesis_liquidity_primitives as primitives;\n  use primitives::*;\n\n  // TODO: Have a more robust way of accessing LiquidityTokens pallet.\n  /// LiquidityTokens Pallet as an instance of coins pallet.\n  pub type LiquidityTokens<T> = coins_pallet::Pallet<T, coins_pallet::Instance1>;\n\n  #[pallet::config]\n  pub trait Config:\n    frame_system::Config\n    + VsConfig\n    + DexConfig\n    + EconomicSecurityConfig\n    + CoinsConfig\n    + coins_pallet::Config<coins_pallet::Instance1>\n  {\n  }\n\n  #[pallet::error]\n  pub enum Error<T> {\n    GenesisPeriodEnded,\n    AmountOverflowed,\n    NotEnoughLiquidity,\n    CanOnlyRemoveFullAmount,\n  }\n\n  #[pallet::event]\n  #[pallet::generate_deposit(fn deposit_event)]\n  pub enum Event<T: Config> {\n    GenesisLiquidityAdded { by: SeraiAddress, balance: ExternalBalance },\n    GenesisLiquidityRemoved { by: SeraiAddress, balance: ExternalBalance },\n    GenesisLiquidityAddedToPool { coin: ExternalBalance, sri: Amount },\n  }\n\n  #[pallet::pallet]\n  pub struct Pallet<T>(PhantomData<T>);\n\n  /// Keeps shares and the amount of coins per account.\n  #[pallet::storage]\n  pub(crate) type Liquidity<T: Config> = StorageDoubleMap<\n    _,\n    Identity,\n    ExternalCoin,\n    Blake2_128Concat,\n    PublicKey,\n    LiquidityAmount,\n    OptionQuery,\n  >;\n\n  /// Keeps the total shares and the total amount of coins per coin.\n  #[pallet::storage]\n  pub(crate) type Supply<T: Config> =\n    StorageMap<_, Identity, ExternalCoin, LiquidityAmount, OptionQuery>;\n\n  #[pallet::storage]\n  pub(crate) type Oracle<T: Config> = StorageMap<_, Identity, ExternalCoin, u64, OptionQuery>;\n\n  #[pallet::storage]\n  #[pallet::getter(fn genesis_complete_block)]\n  pub(crate) type GenesisCompleteBlock<T: Config> = StorageValue<_, u64, OptionQuery>;\n\n  #[pallet::hooks]\n  impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {\n    fn on_initialize(n: BlockNumberFor<T>) -> Weight {\n      #[cfg(feature = \"fast-epoch\")]\n      let final_block = 10u64;\n\n      #[cfg(not(feature = \"fast-epoch\"))]\n      let final_block = MONTHS;\n\n      // Distribute the genesis sri to pools after a month\n      if (n.saturated_into::<u64>() >= final_block) &&\n        Self::oraclization_is_done() &&\n        GenesisCompleteBlock::<T>::get().is_none()\n      {\n        // mint the SRI\n        Coins::<T>::mint(\n          GENESIS_LIQUIDITY_ACCOUNT.into(),\n          Balance { coin: Coin::Serai, amount: Amount(GENESIS_SRI) },\n        )\n        .unwrap();\n\n        // get pool & total values\n        let mut pool_values = vec![];\n        let mut total_value: u128 = 0;\n        for coin in EXTERNAL_COINS {\n          // initial coin value in terms of btc\n          let Some(value) = Oracle::<T>::get(coin) else {\n            continue;\n          };\n\n          let pool_amount =\n            u128::from(Supply::<T>::get(coin).unwrap_or(LiquidityAmount::zero()).coins);\n          let pool_value = pool_amount\n            .checked_mul(value.into())\n            .unwrap()\n            .checked_div(10u128.pow(coin.decimals()))\n            .unwrap();\n          total_value = total_value.checked_add(pool_value).unwrap();\n          pool_values.push((coin, pool_amount, pool_value));\n        }\n\n        // add the liquidity per pool\n        let mut total_sri_distributed = 0;\n        let pool_values_len = pool_values.len();\n        for (i, (coin, pool_amount, pool_value)) in pool_values.into_iter().enumerate() {\n          // whatever sri left for the last coin should be ~= it's ratio\n          let sri_amount = if i == (pool_values_len - 1) {\n            GENESIS_SRI.checked_sub(total_sri_distributed).unwrap()\n          } else {\n            u64::try_from(\n              u128::from(GENESIS_SRI)\n                .checked_mul(pool_value)\n                .unwrap()\n                .checked_div(total_value)\n                .unwrap(),\n            )\n            .unwrap()\n          };\n          total_sri_distributed = total_sri_distributed.checked_add(sri_amount).unwrap();\n\n          // actually add the liquidity to dex\n          let origin = RawOrigin::Signed(GENESIS_LIQUIDITY_ACCOUNT.into());\n          let Ok(()) = Dex::<T>::add_liquidity(\n            origin.into(),\n            coin,\n            u64::try_from(pool_amount).unwrap(),\n            sri_amount,\n            u64::try_from(pool_amount).unwrap(),\n            sri_amount,\n            GENESIS_LIQUIDITY_ACCOUNT.into(),\n          ) else {\n            continue;\n          };\n\n          // let everyone know about the event\n          Self::deposit_event(Event::GenesisLiquidityAddedToPool {\n            coin: ExternalBalance { coin, amount: Amount(u64::try_from(pool_amount).unwrap()) },\n            sri: Amount(sri_amount),\n          });\n        }\n        assert_eq!(total_sri_distributed, GENESIS_SRI);\n\n        // we shouldn't have left any coin in genesis account at this moment, including SRI.\n        // All transferred to the pools.\n        for coin in COINS {\n          assert_eq!(Coins::<T>::balance(GENESIS_LIQUIDITY_ACCOUNT.into(), coin), Amount(0));\n        }\n\n        GenesisCompleteBlock::<T>::set(Some(n.saturated_into::<u64>()));\n      }\n\n      Weight::zero() // TODO\n    }\n  }\n\n  impl<T: Config> Pallet<T> {\n    /// Add genesis liquidity for the given account. All accounts that provide liquidity\n    /// will receive the genesis SRI according to their liquidity ratio.\n    pub fn add_coin_liquidity(account: PublicKey, balance: ExternalBalance) -> DispatchResult {\n      // check we are still in genesis period\n      if Self::genesis_ended() {\n        Err(Error::<T>::GenesisPeriodEnded)?;\n      }\n\n      // calculate new shares & supply\n      let (new_liquidity, new_supply) = if let Some(supply) = Supply::<T>::get(balance.coin) {\n        // calculate amount of shares for this amount\n        let shares = Self::mul_div(supply.shares, balance.amount.0, supply.coins)?;\n\n        // get new shares for this account\n        let existing =\n          Liquidity::<T>::get(balance.coin, account).unwrap_or(LiquidityAmount::zero());\n        (\n          LiquidityAmount {\n            shares: existing.shares.checked_add(shares).ok_or(Error::<T>::AmountOverflowed)?,\n            coins: existing\n              .coins\n              .checked_add(balance.amount.0)\n              .ok_or(Error::<T>::AmountOverflowed)?,\n          },\n          LiquidityAmount {\n            shares: supply.shares.checked_add(shares).ok_or(Error::<T>::AmountOverflowed)?,\n            coins: supply\n              .coins\n              .checked_add(balance.amount.0)\n              .ok_or(Error::<T>::AmountOverflowed)?,\n          },\n        )\n      } else {\n        let first_amount =\n          LiquidityAmount { shares: INITIAL_GENESIS_LP_SHARES, coins: balance.amount.0 };\n        (first_amount, first_amount)\n      };\n\n      // save\n      Liquidity::<T>::set(balance.coin, account, Some(new_liquidity));\n      Supply::<T>::set(balance.coin, Some(new_supply));\n      Self::deposit_event(Event::GenesisLiquidityAdded { by: account.into(), balance });\n      Ok(())\n    }\n\n    /// Returns the number of blocks since the all networks reached economic security first time.\n    /// If networks is yet to be reached that threshold, None is returned.\n    fn blocks_since_ec_security() -> Option<u64> {\n      let mut min = u64::MAX;\n      for n in EXTERNAL_NETWORKS {\n        let ec_security_block =\n          EconomicSecurity::<T>::economic_security_block(n)?.saturated_into::<u64>();\n        let current = <frame_system::Pallet<T>>::block_number().saturated_into::<u64>();\n        let diff = current.saturating_sub(ec_security_block);\n        min = diff.min(min);\n      }\n      Some(min)\n    }\n\n    fn genesis_ended() -> bool {\n      Self::oraclization_is_done() &&\n        <frame_system::Pallet<T>>::block_number().saturated_into::<u64>() >= MONTHS\n    }\n\n    fn oraclization_is_done() -> bool {\n      for c in EXTERNAL_COINS {\n        if Oracle::<T>::get(c).is_none() {\n          return false;\n        }\n      }\n\n      true\n    }\n\n    fn mul_div(a: u64, b: u64, c: u64) -> Result<u64, Error<T>> {\n      let a = u128::from(a);\n      let b = u128::from(b);\n      let c = u128::from(c);\n\n      let result = a\n        .checked_mul(b)\n        .ok_or(Error::<T>::AmountOverflowed)?\n        .checked_div(c)\n        .ok_or(Error::<T>::AmountOverflowed)?;\n\n      result.try_into().map_err(|_| Error::<T>::AmountOverflowed)\n    }\n  }\n\n  #[pallet::call]\n  impl<T: Config> Pallet<T> {\n    /// Remove the provided genesis liquidity for an account.\n    #[pallet::call_index(0)]\n    #[pallet::weight((0, DispatchClass::Operational))] // TODO\n    pub fn remove_coin_liquidity(origin: OriginFor<T>, balance: ExternalBalance) -> DispatchResult {\n      let account = ensure_signed(origin)?;\n      let origin = RawOrigin::Signed(GENESIS_LIQUIDITY_ACCOUNT.into());\n      let supply = Supply::<T>::get(balance.coin).ok_or(Error::<T>::NotEnoughLiquidity)?;\n\n      // check we are still in genesis period\n      let (new_liquidity, new_supply) = if Self::genesis_ended() {\n        // see how much liq tokens we have\n        let total_liq_tokens =\n          LiquidityTokens::<T>::balance(GENESIS_LIQUIDITY_ACCOUNT.into(), Coin::Serai).0;\n\n        // get how much user wants to remove\n        let LiquidityAmount { shares, coins } =\n          Liquidity::<T>::get(balance.coin, account).unwrap_or(LiquidityAmount::zero());\n        let total_shares = Supply::<T>::get(balance.coin).unwrap_or(LiquidityAmount::zero()).shares;\n        let user_liq_tokens = Self::mul_div(total_liq_tokens, shares, total_shares)?;\n        let amount_to_remove =\n          Self::mul_div(user_liq_tokens, balance.amount.0, INITIAL_GENESIS_LP_SHARES)?;\n\n        // remove liquidity from pool\n        let prev_sri = Coins::<T>::balance(GENESIS_LIQUIDITY_ACCOUNT.into(), Coin::Serai);\n        let prev_coin = Coins::<T>::balance(GENESIS_LIQUIDITY_ACCOUNT.into(), balance.coin.into());\n        Dex::<T>::remove_liquidity(\n          origin.clone().into(),\n          balance.coin,\n          amount_to_remove,\n          1,\n          1,\n          GENESIS_LIQUIDITY_ACCOUNT.into(),\n        )?;\n        let current_sri = Coins::<T>::balance(GENESIS_LIQUIDITY_ACCOUNT.into(), Coin::Serai);\n        let current_coin =\n          Coins::<T>::balance(GENESIS_LIQUIDITY_ACCOUNT.into(), balance.coin.into());\n\n        // burn the SRI if necessary\n        // TODO: take into consideration movement between pools.\n        let mut sri: u64 = current_sri.0.saturating_sub(prev_sri.0);\n        let distance_to_full_pay =\n          GENESIS_SRI_TRICKLE_FEED.saturating_sub(Self::blocks_since_ec_security().unwrap_or(0));\n        let burn_sri_amount = u64::try_from(\n          u128::from(sri)\n            .checked_mul(u128::from(distance_to_full_pay))\n            .ok_or(Error::<T>::AmountOverflowed)?\n            .checked_div(u128::from(GENESIS_SRI_TRICKLE_FEED))\n            .ok_or(Error::<T>::AmountOverflowed)?,\n        )\n        .map_err(|_| Error::<T>::AmountOverflowed)?;\n        Coins::<T>::burn(\n          origin.clone().into(),\n          Balance { coin: Coin::Serai, amount: Amount(burn_sri_amount) },\n        )?;\n        sri = sri.checked_sub(burn_sri_amount).ok_or(Error::<T>::AmountOverflowed)?;\n\n        // transfer to owner\n        let coin_out = current_coin.0.saturating_sub(prev_coin.0);\n        Coins::<T>::transfer(\n          origin.clone().into(),\n          account,\n          Balance { coin: balance.coin.into(), amount: Amount(coin_out) },\n        )?;\n        Coins::<T>::transfer(\n          origin.into(),\n          account,\n          Balance { coin: Coin::Serai, amount: Amount(sri) },\n        )?;\n\n        // return new amounts\n        (\n          LiquidityAmount {\n            shares: shares.checked_sub(amount_to_remove).ok_or(Error::<T>::AmountOverflowed)?,\n            coins: coins.checked_sub(coin_out).ok_or(Error::<T>::AmountOverflowed)?,\n          },\n          LiquidityAmount {\n            shares: supply\n              .shares\n              .checked_sub(amount_to_remove)\n              .ok_or(Error::<T>::AmountOverflowed)?,\n            coins: supply.coins.checked_sub(coin_out).ok_or(Error::<T>::AmountOverflowed)?,\n          },\n        )\n      } else {\n        if balance.amount.0 != INITIAL_GENESIS_LP_SHARES {\n          Err(Error::<T>::CanOnlyRemoveFullAmount)?;\n        }\n        let existing =\n          Liquidity::<T>::get(balance.coin, account).ok_or(Error::<T>::NotEnoughLiquidity)?;\n\n        // transfer to the user\n        Coins::<T>::transfer(\n          origin.into(),\n          account,\n          Balance { coin: balance.coin.into(), amount: Amount(existing.coins) },\n        )?;\n\n        (\n          LiquidityAmount::zero(),\n          LiquidityAmount {\n            shares: supply\n              .shares\n              .checked_sub(existing.shares)\n              .ok_or(Error::<T>::AmountOverflowed)?,\n            coins: supply.coins.checked_sub(existing.coins).ok_or(Error::<T>::AmountOverflowed)?,\n          },\n        )\n      };\n\n      // save\n      if new_liquidity == LiquidityAmount::zero() {\n        Liquidity::<T>::set(balance.coin, account, None);\n      } else {\n        Liquidity::<T>::set(balance.coin, account, Some(new_liquidity));\n      }\n      Supply::<T>::set(balance.coin, Some(new_supply));\n\n      Self::deposit_event(Event::GenesisLiquidityRemoved { by: account.into(), balance });\n      Ok(())\n    }\n\n    /// A call to submit the initial coin values in terms of BTC.\n    #[pallet::call_index(1)]\n    #[pallet::weight((0, DispatchClass::Operational))] // TODO\n    pub fn oraclize_values(\n      origin: OriginFor<T>,\n      values: Values,\n      _signature: Signature,\n    ) -> DispatchResult {\n      ensure_none(origin)?;\n\n      // set their relative values\n      Oracle::<T>::set(ExternalCoin::Bitcoin, Some(10u64.pow(ExternalCoin::Bitcoin.decimals())));\n      Oracle::<T>::set(ExternalCoin::Monero, Some(values.monero));\n      Oracle::<T>::set(ExternalCoin::Ether, Some(values.ether));\n      Oracle::<T>::set(ExternalCoin::Dai, Some(values.dai));\n      Ok(())\n    }\n  }\n\n  #[pallet::validate_unsigned]\n  impl<T: Config> ValidateUnsigned for Pallet<T> {\n    type Call = Call<T>;\n\n    fn validate_unsigned(_: TransactionSource, call: &Self::Call) -> TransactionValidity {\n      match call {\n        Call::oraclize_values { ref values, ref signature } => {\n          let network = NetworkId::Serai;\n          let Some(session) = ValidatorSets::<T>::session(network) else {\n            return Err(TransactionValidityError::from(InvalidTransaction::Custom(0)));\n          };\n\n          let set = ValidatorSet { network, session };\n          let signers = ValidatorSets::<T>::participants_for_latest_decided_set(network)\n            .expect(\"no participant in the current set\")\n            .into_iter()\n            .map(|(p, _)| p)\n            .collect::<Vec<_>>();\n\n          // check this didn't get called before\n          if Self::oraclization_is_done() {\n            Err(InvalidTransaction::Custom(1))?;\n          }\n\n          // make sure signers settings the value at the end of the genesis period.\n          // we don't need this check for tests.\n          #[cfg(not(feature = \"fast-epoch\"))]\n          if <frame_system::Pallet<T>>::block_number().saturated_into::<u64>() < MONTHS {\n            Err(InvalidTransaction::Custom(2))?;\n          }\n\n          if !musig_key(set, &signers).verify(&oraclize_values_message(&set, values), signature) {\n            Err(InvalidTransaction::BadProof)?;\n          }\n\n          ValidTransaction::with_tag_prefix(\"GenesisLiquidity\")\n            .and_provides((0, set))\n            .longevity(u64::MAX)\n            .propagate(true)\n            .build()\n        }\n        Call::remove_coin_liquidity { .. } => Err(InvalidTransaction::Call)?,\n        Call::__Ignore(_, _) => unreachable!(),\n      }\n    }\n  }\n}\n\npub use pallet::*;\n"
  },
  {
    "path": "substrate/genesis-liquidity/primitives/Cargo.toml",
    "content": "[package]\nname = \"serai-genesis-liquidity-primitives\"\nversion = \"0.1.0\"\ndescription = \"Serai genesis liquidity primitives\"\nlicense = \"MIT\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/substrate/genesis-liquidity/primitives\"\nauthors = [\"Akil Demir <akildemir72@gmail.com>\"]\nedition = \"2021\"\nrust-version = \"1.77\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nzeroize = { version = \"^1.5\", features = [\"derive\"], optional = true }\n\nborsh = { version = \"1\", default-features = false, features = [\"derive\", \"de_strict_order\"], optional = true }\nserde = { version = \"1\", default-features = false, features = [\"derive\", \"alloc\"], optional = true }\n\nscale = { package = \"parity-scale-codec\", version = \"3\", default-features = false, features = [\"derive\"] }\n\nsp-std = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\n\nserai-primitives = { path = \"../../primitives\", default-features = false }\nvalidator-sets-primitives = { package = \"serai-validator-sets-primitives\", path = \"../../validator-sets/primitives\", default-features = false }\n\n[features]\nstd = [\n  \"zeroize\",\n  \"scale/std\",\n  \"borsh?/std\",\n  \"serde?/std\",\n\n  \"serai-primitives/std\",\n  \"validator-sets-primitives/std\",\n\n  \"sp-std/std\"\n]\ndefault = [\"std\"]\n"
  },
  {
    "path": "substrate/genesis-liquidity/primitives/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2024 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "substrate/genesis-liquidity/primitives/src/lib.rs",
    "content": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![cfg_attr(docsrs, feature(doc_cfg))]\n#![cfg_attr(not(feature = \"std\"), no_std)]\n\n#[cfg(feature = \"std\")]\nuse zeroize::Zeroize;\n\n#[cfg(feature = \"borsh\")]\nuse borsh::{BorshSerialize, BorshDeserialize};\n#[cfg(feature = \"serde\")]\nuse serde::{Serialize, Deserialize};\n\nuse sp_std::vec::Vec;\n\nuse scale::{Encode, Decode, DecodeWithMemTracking, MaxEncodedLen};\n\nuse serai_primitives::*;\nuse validator_sets_primitives::ValidatorSet;\n\npub const INITIAL_GENESIS_LP_SHARES: u64 = 10_000;\n\n// This is the account to hold and manage the genesis liquidity.\npub const GENESIS_LIQUIDITY_ACCOUNT: SeraiAddress = system_address(b\"GenesisLiquidity-account\");\n\n#[derive(\n  Clone, Copy, PartialEq, Eq, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen,\n)]\n#[cfg_attr(feature = \"std\", derive(Zeroize))]\n#[cfg_attr(feature = \"borsh\", derive(BorshSerialize, BorshDeserialize))]\n#[cfg_attr(feature = \"serde\", derive(Serialize, Deserialize))]\npub struct Values {\n  pub monero: u64,\n  pub ether: u64,\n  pub dai: u64,\n}\n\n#[derive(\n  Clone, Copy, PartialEq, Eq, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen,\n)]\n#[cfg_attr(feature = \"std\", derive(Zeroize))]\n#[cfg_attr(feature = \"borsh\", derive(BorshSerialize, BorshDeserialize))]\n#[cfg_attr(feature = \"serde\", derive(Serialize, Deserialize))]\npub struct LiquidityAmount {\n  pub shares: u64,\n  pub coins: u64,\n}\n\nimpl LiquidityAmount {\n  pub fn zero() -> Self {\n    LiquidityAmount { shares: 0, coins: 0 }\n  }\n}\n\n/// The message for the oraclize_values signature.\npub fn oraclize_values_message(set: &ValidatorSet, values: &Values) -> Vec<u8> {\n  (b\"GenesisLiquidity-oraclize_values\", set, values).encode()\n}\n"
  },
  {
    "path": "substrate/in-instructions/pallet/Cargo.toml",
    "content": "[package]\nname = \"serai-in-instructions-pallet\"\nversion = \"0.1.0\"\ndescription = \"Execute calls via In Instructions from unsigned transactions\"\nlicense = \"AGPL-3.0-only\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nedition = \"2021\"\npublish = false\nrust-version = \"1.74\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[package.metadata.cargo-machete]\nignored = [\"scale\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nscale = { package = \"parity-scale-codec\", version = \"3\", default-features = false, features = [\"derive\", \"max-encoded-len\"] }\n\nsp-std = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nsp-application-crypto = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nsp-io = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nsp-runtime = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nsp-core = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\n\nframe-system = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nframe-support = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\n\nserai-primitives = { path = \"../../primitives\", default-features = false }\nin-instructions-primitives = { package = \"serai-in-instructions-primitives\", path = \"../primitives\", default-features = false }\n\ncoins-pallet = { package = \"serai-coins-pallet\", path = \"../../coins/pallet\", default-features = false }\ndex-pallet = { package = \"serai-dex-pallet\", path = \"../../dex/pallet\", default-features = false }\nvalidator-sets-pallet = { package = \"serai-validator-sets-pallet\", path = \"../../validator-sets/pallet\", default-features = false }\ngenesis-liquidity-pallet = { package = \"serai-genesis-liquidity-pallet\", path = \"../../genesis-liquidity/pallet\", default-features = false }\nemissions-pallet = { package = \"serai-emissions-pallet\", path = \"../../emissions/pallet\", default-features = false }\n\n[features]\nstd = [\n  \"scale/std\",\n\n  \"sp-std/std\",\n  \"sp-application-crypto/std\",\n  \"sp-io/std\",\n  \"sp-runtime/std\",\n  \"sp-core/std\",\n\n  \"frame-system/std\",\n  \"frame-support/std\",\n\n  \"serai-primitives/std\",\n  \"in-instructions-primitives/std\",\n\n  \"coins-pallet/std\",\n  \"dex-pallet/std\",\n  \"validator-sets-pallet/std\",\n  \"genesis-liquidity-pallet/std\",\n  \"emissions-pallet/std\",\n]\ndefault = [\"std\"]\n\n# TODO\ntry-runtime = []\n"
  },
  {
    "path": "substrate/in-instructions/pallet/LICENSE",
    "content": "AGPL-3.0-only license\n\nCopyright (c) 2022-2023 Luke Parker\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License Version 3 as\npublished by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n"
  },
  {
    "path": "substrate/in-instructions/pallet/src/lib.rs",
    "content": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![cfg_attr(not(feature = \"std\"), no_std)]\n\nuse sp_io::hashing::blake2_256;\n\nuse serai_primitives::*;\n\npub use in_instructions_primitives as primitives;\nuse primitives::*;\n\n// TODO: Investigate why Substrate generates these\n#[allow(\n  unreachable_patterns,\n  clippy::cast_possible_truncation,\n  clippy::no_effect_underscore_binding,\n  clippy::empty_docs\n)]\n#[frame_support::pallet]\npub mod pallet {\n  use sp_std::vec;\n  use sp_application_crypto::RuntimePublic;\n  use sp_runtime::traits::Zero;\n  use sp_core::sr25519::Public;\n\n  use frame_support::pallet_prelude::*;\n  use frame_system::{pallet_prelude::*, RawOrigin};\n\n  use coins_pallet::{\n    Config as CoinsConfig, Pallet as Coins,\n    primitives::{OutInstruction, OutInstructionWithBalance},\n  };\n  use dex_pallet::{Config as DexConfig, Pallet as Dex};\n  use validator_sets_pallet::{\n    primitives::{Session, ValidatorSet, ExternalValidatorSet},\n    Config as ValidatorSetsConfig, Pallet as ValidatorSets,\n  };\n\n  use genesis_liquidity_pallet::{\n    Pallet as GenesisLiq, Config as GenesisLiqConfig, primitives::GENESIS_LIQUIDITY_ACCOUNT,\n  };\n  use emissions_pallet::{Pallet as Emissions, Config as EmissionsConfig, primitives::POL_ACCOUNT};\n\n  use super::*;\n\n  #[pallet::config]\n  pub trait Config:\n    frame_system::Config\n    + CoinsConfig\n    + DexConfig\n    + ValidatorSetsConfig\n    + GenesisLiqConfig\n    + EmissionsConfig\n  {\n  }\n\n  #[pallet::event]\n  #[pallet::generate_deposit(fn deposit_event)]\n  pub enum Event<T: Config> {\n    Batch { network: ExternalNetworkId, id: u32, block: BlockHash, instructions_hash: [u8; 32] },\n    InstructionFailure { network: ExternalNetworkId, id: u32, index: u32 },\n    Halt { network: ExternalNetworkId },\n  }\n\n  #[pallet::error]\n  pub enum Error<T> {\n    /// Coin and OutAddress types don't match.\n    InvalidAddressForCoin,\n  }\n\n  #[pallet::pallet]\n  pub struct Pallet<T>(PhantomData<T>);\n\n  // The ID of the last executed Batch for a network.\n  #[pallet::storage]\n  #[pallet::getter(fn batches)]\n  pub(crate) type LastBatch<T: Config> =\n    StorageMap<_, Identity, ExternalNetworkId, u32, OptionQuery>;\n\n  // The last Serai block in which this validator set included a batch\n  #[pallet::storage]\n  #[pallet::getter(fn last_batch_block)]\n  pub(crate) type LastBatchBlock<T: Config> =\n    StorageMap<_, Identity, ExternalNetworkId, BlockNumberFor<T>, OptionQuery>;\n\n  // Halted networks.\n  #[pallet::storage]\n  pub(crate) type Halted<T: Config> = StorageMap<_, Identity, ExternalNetworkId, (), OptionQuery>;\n\n  // The latest block a network has acknowledged as finalized\n  #[pallet::storage]\n  #[pallet::getter(fn latest_network_block)]\n  pub(crate) type LatestNetworkBlock<T: Config> =\n    StorageMap<_, Identity, ExternalNetworkId, BlockHash, OptionQuery>;\n\n  impl<T: Config> Pallet<T> {\n    // Use a dedicated transaction layer when executing this InInstruction\n    // This lets it individually error without causing any storage modifications\n    #[frame_support::transactional]\n    fn execute(instruction: InInstructionWithBalance) -> Result<(), DispatchError> {\n      match instruction.instruction {\n        InInstruction::Transfer(address) => {\n          Coins::<T>::mint(address.into(), instruction.balance.into())?;\n        }\n        InInstruction::Dex(call) => {\n          // This will only be initiated by external chain transactions. That is why we only need\n          // add liquidity and swaps. Other functionalities (such as remove_liq, etc) will be\n          // called directly from Serai with a native transaction.\n          match call {\n            DexCall::SwapAndAddLiquidity(address) => {\n              let origin = RawOrigin::Signed(IN_INSTRUCTION_EXECUTOR.into());\n              let coin = instruction.balance.coin;\n\n              // mint the given coin on the account\n              Coins::<T>::mint(IN_INSTRUCTION_EXECUTOR.into(), instruction.balance.into())?;\n\n              // swap half of it for SRI\n              let half = instruction.balance.amount.0 / 2;\n              let path = BoundedVec::try_from(vec![coin.into(), Coin::Serai]).unwrap();\n              Dex::<T>::swap_exact_tokens_for_tokens(\n                origin.clone().into(),\n                path,\n                half,\n                1, // minimum out, so we accept whatever we get.\n                IN_INSTRUCTION_EXECUTOR.into(),\n              )?;\n\n              // get how much we got for our swap\n              let sri_amount = Coins::<T>::balance(IN_INSTRUCTION_EXECUTOR.into(), Coin::Serai).0;\n\n              // add liquidity\n              Dex::<T>::add_liquidity(\n                origin.clone().into(),\n                coin,\n                half,\n                sri_amount,\n                1,\n                1,\n                address.into(),\n              )?;\n\n              // TODO: minimums are set to 1 above to guarantee successful adding liq call.\n              // Ideally we either get this info from user or send the leftovers back to user.\n              // Let's send the leftovers back to user for now.\n              let coin_balance = Coins::<T>::balance(IN_INSTRUCTION_EXECUTOR.into(), coin.into());\n              let sri_balance = Coins::<T>::balance(IN_INSTRUCTION_EXECUTOR.into(), Coin::Serai);\n              if coin_balance != Amount(0) {\n                Coins::<T>::transfer_internal(\n                  IN_INSTRUCTION_EXECUTOR.into(),\n                  address.into(),\n                  Balance { coin: coin.into(), amount: coin_balance },\n                )?;\n              }\n              if sri_balance != Amount(0) {\n                Coins::<T>::transfer_internal(\n                  IN_INSTRUCTION_EXECUTOR.into(),\n                  address.into(),\n                  Balance { coin: Coin::Serai, amount: sri_balance },\n                )?;\n              }\n            }\n            DexCall::Swap(out_balance, out_address) => {\n              let send_to_external = !out_address.is_native();\n              let native_coin = out_balance.coin.is_native();\n\n              // we can't send native coin to external chain\n              if native_coin && send_to_external {\n                Err(Error::<T>::InvalidAddressForCoin)?;\n              }\n\n              // mint the given coin on our account\n              Coins::<T>::mint(IN_INSTRUCTION_EXECUTOR.into(), instruction.balance.into())?;\n\n              // get the path\n              let mut path = vec![instruction.balance.coin.into(), Coin::Serai];\n              if !native_coin {\n                path.push(out_balance.coin);\n              }\n\n              // get the swap address\n              // if the address is internal, we can directly swap to it. if not, we swap to\n              // ourselves and burn the coins to send them back on the external chain.\n              let send_to = if send_to_external {\n                IN_INSTRUCTION_EXECUTOR\n              } else {\n                out_address.clone().as_native().unwrap()\n              };\n\n              // do the swap\n              let origin = RawOrigin::Signed(IN_INSTRUCTION_EXECUTOR.into());\n              Dex::<T>::swap_exact_tokens_for_tokens(\n                origin.clone().into(),\n                BoundedVec::try_from(path).unwrap(),\n                instruction.balance.amount.0,\n                out_balance.amount.0,\n                send_to.into(),\n              )?;\n\n              // burn the received coins so that they sent back to the user\n              // if it is requested to an external address.\n              if send_to_external {\n                // see how much we got\n                let coin_balance =\n                  Coins::<T>::balance(IN_INSTRUCTION_EXECUTOR.into(), out_balance.coin);\n                let instruction = OutInstructionWithBalance {\n                  instruction: OutInstruction {\n                    address: out_address.as_external().unwrap(),\n                    // TODO: Properly pass data. Replace address with an OutInstruction entirely?\n                    data: None,\n                  },\n                  balance: ExternalBalance {\n                    coin: out_balance.coin.try_into().unwrap(),\n                    amount: coin_balance,\n                  },\n                };\n                Coins::<T>::burn_with_instruction(origin.into(), instruction)?;\n              }\n            }\n          }\n        }\n        InInstruction::GenesisLiquidity(address) => {\n          Coins::<T>::mint(GENESIS_LIQUIDITY_ACCOUNT.into(), instruction.balance.into())?;\n          GenesisLiq::<T>::add_coin_liquidity(address.into(), instruction.balance)?;\n        }\n        InInstruction::SwapToStakedSRI(address, network) => {\n          Coins::<T>::mint(POL_ACCOUNT.into(), instruction.balance.into())?;\n          Emissions::<T>::swap_to_staked_sri(address.into(), network, instruction.balance)?;\n        }\n      }\n      Ok(())\n    }\n\n    pub fn halt(network: ExternalNetworkId) -> Result<(), DispatchError> {\n      Halted::<T>::set(network, Some(()));\n      Self::deposit_event(Event::Halt { network });\n      Ok(())\n    }\n  }\n\n  fn keys_for_network<T: Config>(\n    network: ExternalNetworkId,\n  ) -> Result<(Session, Option<Public>, Option<Public>), InvalidTransaction> {\n    // If there's no session set, and therefore no keys set, then this must be an invalid signature\n    let Some(session) = ValidatorSets::<T>::session(NetworkId::from(network)) else {\n      Err(InvalidTransaction::BadProof)?\n    };\n    let mut set = ExternalValidatorSet { network, session };\n    let latest = ValidatorSets::<T>::keys(set).map(|keys| keys.0);\n    let prior = if set.session.0 != 0 {\n      set.session.0 -= 1;\n      ValidatorSets::<T>::keys(set).map(|keys| keys.0)\n    } else {\n      None\n    };\n    if prior.is_none() && latest.is_none() {\n      Err(InvalidTransaction::BadProof)?;\n    }\n    Ok((session, prior, latest))\n  }\n\n  #[pallet::call]\n  impl<T: Config> Pallet<T> {\n    #[pallet::call_index(0)]\n    #[pallet::weight((0, DispatchClass::Operational))] // TODO\n    pub fn execute_batch(origin: OriginFor<T>, batch: SignedBatch) -> DispatchResult {\n      ensure_none(origin)?;\n\n      let batch = batch.batch;\n\n      LatestNetworkBlock::<T>::insert(batch.network, batch.block);\n      Self::deposit_event(Event::Batch {\n        network: batch.network,\n        id: batch.id,\n        block: batch.block,\n        instructions_hash: blake2_256(&batch.instructions.encode()),\n      });\n      for (i, instruction) in batch.instructions.into_iter().enumerate() {\n        if Self::execute(instruction).is_err() {\n          Self::deposit_event(Event::InstructionFailure {\n            network: batch.network,\n            id: batch.id,\n            index: u32::try_from(i).unwrap(),\n          });\n        }\n      }\n\n      Ok(())\n    }\n  }\n\n  #[pallet::validate_unsigned]\n  impl<T: Config> ValidateUnsigned for Pallet<T> {\n    type Call = Call<T>;\n\n    fn validate_unsigned(_: TransactionSource, call: &Self::Call) -> TransactionValidity {\n      // Match to be exhaustive\n      let batch = match call {\n        Call::execute_batch { ref batch } => batch,\n        Call::__Ignore(_, _) => unreachable!(),\n      };\n\n      // verify the batch size\n      // TODO: Merge this encode with the one done by batch_message\n      if batch.batch.encode().len() > MAX_BATCH_SIZE {\n        Err(InvalidTransaction::ExhaustsResources)?;\n      }\n      let network = batch.batch.network;\n\n      // verify the signature\n      let (current_session, prior, current) = keys_for_network::<T>(network)?;\n      let batch_message = batch_message(&batch.batch);\n      // Check the prior key first since only a single `Batch` (the last one) will be when prior is\n      // Some yet prior wasn't the signing key\n      let valid_by_prior =\n        if let Some(key) = prior { key.verify(&batch_message, &batch.signature) } else { false };\n      let valid = valid_by_prior ||\n        (if let Some(key) = current {\n          key.verify(&batch_message, &batch.signature)\n        } else {\n          false\n        });\n      if !valid {\n        Err(InvalidTransaction::BadProof)?;\n      }\n\n      if Halted::<T>::contains_key(network) {\n        Err(InvalidTransaction::Custom(1))?;\n      }\n\n      // If it wasn't valid by the prior key, meaning it was valid by the current key, the current\n      // key is publishing `Batch`s. This should only happen once the current key has verified all\n      // `Batch`s published by the prior key, meaning they are accepting the hand-over.\n      if prior.is_some() && (!valid_by_prior) {\n        ValidatorSets::<T>::retire_set(ValidatorSet {\n          network: network.into(),\n          session: Session(current_session.0 - 1),\n        });\n      }\n\n      // check that this validator set isn't publishing a batch more than once per block\n      let current_block = <frame_system::Pallet<T>>::block_number();\n      let last_block = LastBatchBlock::<T>::get(network).unwrap_or(Zero::zero());\n      if last_block >= current_block {\n        Err(InvalidTransaction::Future)?;\n      }\n      LastBatchBlock::<T>::insert(batch.batch.network, frame_system::Pallet::<T>::block_number());\n\n      // Verify the batch is sequential\n      // LastBatch has the last ID set. The next ID should be it + 1\n      // If there's no ID, the next ID should be 0\n      let expected = LastBatch::<T>::get(network).map_or(0, |prev| prev + 1);\n      if batch.batch.id < expected {\n        Err(InvalidTransaction::Stale)?;\n      }\n      if batch.batch.id > expected {\n        Err(InvalidTransaction::Future)?;\n      }\n      LastBatch::<T>::insert(batch.batch.network, batch.batch.id);\n\n      // Verify all Balances in this Batch are for this network\n      for instruction in &batch.batch.instructions {\n        // Verify this coin is for this network\n        // If this is ever hit, it means the validator set has turned malicious and should be fully\n        // slashed\n        // Because we have an error here, no validator set which turns malicious should execute\n        // this code path\n        // Accordingly, there's no value in writing code to fully slash the network, when such an\n        // even would require a runtime upgrade to fully resolve anyways\n        if instruction.balance.coin.network() != batch.batch.network {\n          Err(InvalidTransaction::Custom(2))?;\n        }\n      }\n\n      ValidTransaction::with_tag_prefix(\"in-instructions\")\n        .and_provides((batch.batch.network, batch.batch.id))\n        // Set a 10 block longevity, though this should be included in the next block\n        .longevity(10)\n        .propagate(true)\n        .build()\n    }\n\n    // Explicitly provide a pre-dispatch which calls validate_unsigned\n    fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> {\n      Self::validate_unsigned(TransactionSource::InBlock, call).map(|_| ()).map_err(Into::into)\n    }\n  }\n}\n\npub use pallet::*;\n"
  },
  {
    "path": "substrate/in-instructions/primitives/Cargo.toml",
    "content": "[package]\nname = \"serai-in-instructions-primitives\"\nversion = \"0.1.0\"\ndescription = \"Serai instructions library, enabling encoding and decoding\"\nlicense = \"MIT\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nedition = \"2021\"\nrust-version = \"1.74\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nzeroize = { version = \"^1.5\", features = [\"derive\"], optional = true }\n\nborsh = { version = \"1\", default-features = false, features = [\"derive\", \"de_strict_order\"], optional = true }\nserde = { version = \"1\", default-features = false, features = [\"derive\", \"alloc\"], optional = true }\n\nscale = { package = \"parity-scale-codec\", version = \"3\", default-features = false, features = [\"derive\"] }\n\nsp-std = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nsp-application-crypto = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\n\nserai-primitives = { path = \"../../primitives\", default-features = false }\ncoins-primitives = { package = \"serai-coins-primitives\", path = \"../../coins/primitives\", default-features = false }\n\n[features]\nstd = [\n  \"zeroize\",\n\n  \"borsh?/std\",\n  \"serde?/std\",\n\n  \"scale/std\",\n\n  \"sp-std/std\",\n  \"sp-application-crypto/std\",\n\n  \"serai-primitives/std\",\n  \"coins-primitives/std\",\n]\nborsh = [\"dep:borsh\", \"serai-primitives/borsh\", \"coins-primitives/borsh\"]\nserde = [\"dep:serde\", \"serai-primitives/serde\", \"coins-primitives/serde\"]\ndefault = [\"std\"]\n"
  },
  {
    "path": "substrate/in-instructions/primitives/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2022-2023 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "substrate/in-instructions/primitives/src/lib.rs",
    "content": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![cfg_attr(docsrs, feature(doc_cfg))]\n#![cfg_attr(not(feature = \"std\"), no_std)]\n#![expect(clippy::cast_possible_truncation)]\n\n#[cfg(feature = \"std\")]\nuse zeroize::Zeroize;\n\n#[cfg(feature = \"borsh\")]\nuse borsh::{BorshSerialize, BorshDeserialize};\n#[cfg(feature = \"serde\")]\nuse serde::{Serialize, Deserialize};\n\nuse scale::{Encode, Decode, DecodeWithMemTracking, MaxEncodedLen};\n\nuse sp_application_crypto::sr25519::Signature;\n\n#[cfg(not(feature = \"std\"))]\nuse sp_std::vec::Vec;\n\n#[rustfmt::skip]\nuse serai_primitives::{BlockHash, Balance, ExternalNetworkId, NetworkId, SeraiAddress, ExternalBalance, ExternalAddress, system_address};\n\nmod shorthand;\npub use shorthand::*;\n\npub const MAX_BATCH_SIZE: usize = 25_000; // ~25kb\n\n// This is the account which will be the origin for any dispatched `InInstruction`s.\npub const IN_INSTRUCTION_EXECUTOR: SeraiAddress = system_address(b\"InInstructions-executor\");\n\n#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen)]\n#[cfg_attr(feature = \"std\", derive(Zeroize))]\n#[cfg_attr(feature = \"borsh\", derive(BorshSerialize, BorshDeserialize))]\n#[cfg_attr(feature = \"serde\", derive(Serialize, Deserialize))]\npub enum OutAddress {\n  Serai(SeraiAddress),\n  External(ExternalAddress),\n}\n\nimpl OutAddress {\n  pub fn is_native(&self) -> bool {\n    matches!(self, Self::Serai(_))\n  }\n\n  pub fn as_native(self) -> Option<SeraiAddress> {\n    match self {\n      Self::Serai(addr) => Some(addr),\n      _ => None,\n    }\n  }\n\n  pub fn as_external(self) -> Option<ExternalAddress> {\n    match self {\n      Self::External(addr) => Some(addr),\n      Self::Serai(_) => None,\n    }\n  }\n}\n\n#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen)]\n#[cfg_attr(feature = \"std\", derive(Zeroize))]\n#[cfg_attr(feature = \"borsh\", derive(BorshSerialize, BorshDeserialize))]\n#[cfg_attr(feature = \"serde\", derive(Serialize, Deserialize))]\npub enum DexCall {\n  // address to send the lp tokens to\n  // TODO: Update this per documentation/Shorthand\n  SwapAndAddLiquidity(SeraiAddress),\n  // minimum out balance and out address\n  Swap(Balance, OutAddress),\n}\n\n#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen)]\n#[cfg_attr(feature = \"std\", derive(Zeroize))]\n#[cfg_attr(feature = \"borsh\", derive(BorshSerialize, BorshDeserialize))]\n#[cfg_attr(feature = \"serde\", derive(Serialize, Deserialize))]\npub enum InInstruction {\n  Transfer(SeraiAddress),\n  Dex(DexCall),\n  GenesisLiquidity(SeraiAddress),\n  SwapToStakedSRI(SeraiAddress, NetworkId),\n}\n\n#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen)]\n#[cfg_attr(feature = \"std\", derive(Zeroize))]\n#[cfg_attr(feature = \"borsh\", derive(BorshSerialize, BorshDeserialize))]\n#[cfg_attr(feature = \"serde\", derive(Serialize, Deserialize))]\npub struct RefundableInInstruction {\n  pub origin: Option<ExternalAddress>,\n  pub instruction: InInstruction,\n}\n\n#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen)]\n#[cfg_attr(feature = \"std\", derive(Zeroize))]\n#[cfg_attr(feature = \"borsh\", derive(BorshSerialize, BorshDeserialize))]\n#[cfg_attr(feature = \"serde\", derive(Serialize, Deserialize))]\npub struct InInstructionWithBalance {\n  pub instruction: InInstruction,\n  pub balance: ExternalBalance,\n}\n\n#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, DecodeWithMemTracking)]\n#[cfg_attr(feature = \"std\", derive(Zeroize))]\n#[cfg_attr(feature = \"borsh\", derive(BorshSerialize, BorshDeserialize))]\n#[cfg_attr(feature = \"serde\", derive(Serialize, Deserialize))]\npub struct Batch {\n  pub network: ExternalNetworkId,\n  pub id: u32,\n  pub block: BlockHash,\n  pub instructions: Vec<InInstructionWithBalance>,\n}\n\n#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, DecodeWithMemTracking)]\n#[cfg_attr(feature = \"borsh\", derive(BorshSerialize, BorshDeserialize))]\n#[cfg_attr(feature = \"serde\", derive(Serialize, Deserialize))]\npub struct SignedBatch {\n  pub batch: Batch,\n  #[cfg_attr(\n    feature = \"borsh\",\n    borsh(\n      serialize_with = \"serai_primitives::borsh_serialize_signature\",\n      deserialize_with = \"serai_primitives::borsh_deserialize_signature\"\n    )\n  )]\n  pub signature: Signature,\n}\n\n#[cfg(feature = \"std\")]\nimpl Zeroize for SignedBatch {\n  fn zeroize(&mut self) {\n    self.batch.zeroize();\n    let signature: &mut [u8] = self.signature.as_mut();\n    signature.zeroize();\n  }\n}\n\n// TODO: Make this an associated method?\n/// The message for the batch signature.\npub fn batch_message(batch: &Batch) -> Vec<u8> {\n  [b\"InInstructions-batch\".as_ref(), &batch.encode()].concat()\n}\n"
  },
  {
    "path": "substrate/in-instructions/primitives/src/shorthand.rs",
    "content": "#[cfg(feature = \"std\")]\nuse zeroize::Zeroize;\n\n#[cfg(feature = \"borsh\")]\nuse borsh::{BorshSerialize, BorshDeserialize};\n#[cfg(feature = \"serde\")]\nuse serde::{Serialize, Deserialize};\n\nuse scale::{Encode, Decode, DecodeWithMemTracking, MaxEncodedLen};\n\nuse serai_primitives::{Amount, ExternalAddress, ExternalCoin, SeraiAddress};\n\nuse coins_primitives::OutInstruction;\n\nuse crate::RefundableInInstruction;\n#[cfg(feature = \"std\")]\nuse crate::InInstruction;\n\n#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen)]\n#[cfg_attr(feature = \"std\", derive(Zeroize))]\n#[cfg_attr(feature = \"borsh\", derive(BorshSerialize, BorshDeserialize))]\n#[cfg_attr(feature = \"serde\", derive(Serialize, Deserialize))]\npub enum Shorthand {\n  Raw(RefundableInInstruction),\n  Swap {\n    origin: Option<ExternalAddress>,\n    coin: ExternalCoin,\n    minimum: Amount,\n    out: OutInstruction,\n  },\n  SwapAndAddLiquidity {\n    origin: Option<ExternalAddress>,\n    minimum: Amount,\n    gas: Amount,\n    address: SeraiAddress,\n  },\n}\n\nimpl Shorthand {\n  #[cfg(feature = \"std\")]\n  pub fn transfer(origin: Option<ExternalAddress>, address: SeraiAddress) -> Self {\n    Self::Raw(RefundableInInstruction { origin, instruction: InInstruction::Transfer(address) })\n  }\n}\n\nimpl TryFrom<Shorthand> for RefundableInInstruction {\n  type Error = &'static str;\n  fn try_from(shorthand: Shorthand) -> Result<RefundableInInstruction, &'static str> {\n    Ok(match shorthand {\n      Shorthand::Raw(instruction) => instruction,\n      Shorthand::Swap { .. } => todo!(),\n      Shorthand::SwapAndAddLiquidity { .. } => todo!(),\n    })\n  }\n}\n"
  },
  {
    "path": "substrate/node/Cargo.toml",
    "content": "[package]\nname = \"serai-node\"\nversion = \"0.1.0\"\ndescription = \"Serai network node, built over Substrate\"\nlicense = \"AGPL-3.0-only\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/substrate/node\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nedition = \"2021\"\npublish = false\nrust-version = \"1.74\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[[bin]]\nname = \"serai-node\"\n\n[dependencies]\nrand_core = \"0.6\"\nzeroize = \"1\"\nhex = \"0.4\"\nlog = \"0.4\"\n\nschnorrkel = \"0.11\"\n\nlibp2p = \"0.56\"\n\nsp-core = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev =  \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\" }\nsp-keystore = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev =  \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\" }\nsp-timestamp = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev =  \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\" }\nsp-state-machine = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev =  \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\" }\nsp-io = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev =  \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\" }\nsp-blockchain = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev =  \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\" }\nsp-runtime = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev =  \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\" }\nsp-api = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev =  \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\" }\nsp-block-builder = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev =  \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\" }\nsp-consensus-babe = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev =  \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\" }\n\nframe-benchmarking = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev =  \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\" }\n\nserai-runtime = { path = \"../runtime\", features = [\"std\"] }\n\nclap = { version = \"4\", features = [\"derive\"] }\n\nfutures-util = \"0.3\"\ntokio = { version = \"1\", features = [\"sync\", \"rt-multi-thread\"] }\njsonrpsee = { version = \"0.24\", features = [\"server\"] }\n\nsc-transaction-pool = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev =  \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\" }\nsc-transaction-pool-api = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev =  \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\" }\nsc-basic-authorship = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev =  \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\" }\nsc-executor = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev =  \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\" }\nsc-service = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev =  \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\" }\nsc-client-api = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev =  \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\" }\nsc-network = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev =  \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\" }\n\nsc-consensus = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev =  \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\" }\nsc-consensus-babe = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev =  \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\" }\nsc-consensus-grandpa = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev =  \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\" }\nsc-authority-discovery = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev =  \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\" }\n\nsc-telemetry = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev =  \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\" }\nsc-chain-spec = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev =  \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\" }\nsc-cli = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev =  \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\" }\n\nframe-system-rpc-runtime-api = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev =  \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\" }\nsubstrate-frame-rpc-system = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev =  \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\" }\npallet-transaction-payment-rpc = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev =  \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\" }\n\nserai-env = { path = \"../../common/env\" }\n\n[build-dependencies]\nsubstrate-build-script-utils = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev =  \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\" }\n\n[features]\ndefault = []\nfast-epoch = [\"serai-runtime/fast-epoch\"]\nruntime-benchmarks = [\n  \"frame-benchmarking/runtime-benchmarks\",\n\n  \"serai-runtime/runtime-benchmarks\",\n]\n"
  },
  {
    "path": "substrate/node/LICENSE",
    "content": "AGPL-3.0-only license\n\nCopyright (c) 2022-2023 Luke Parker\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License Version 3 as\npublished by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n"
  },
  {
    "path": "substrate/node/build.rs",
    "content": "use substrate_build_script_utils::generate_cargo_keys;\n\nfn main() {\n  generate_cargo_keys();\n}\n"
  },
  {
    "path": "substrate/node/src/chain_spec.rs",
    "content": "use core::marker::PhantomData;\nuse std::collections::HashSet;\n\nuse sp_core::{Decode, Pair as PairTrait, sr25519::Public};\n\nuse sc_service::ChainType;\n\nuse serai_runtime::{\n  primitives::*, WASM_BINARY, BABE_GENESIS_EPOCH_CONFIG, RuntimeGenesisConfig, SystemConfig,\n  CoinsConfig, ValidatorSetsConfig, SignalsConfig, BabeConfig, GrandpaConfig, EmissionsConfig,\n};\n\npub type ChainSpec = sc_service::GenericChainSpec;\n\nfn account_from_name(name: &'static str) -> PublicKey {\n  insecure_pair_from_name(name).public()\n}\n\nfn wasm_binary() -> Vec<u8> {\n  // TODO: Accept a config of runtime path\n  const WASM_PATH: &str = \"/runtime/serai.wasm\";\n  if let Ok(binary) = std::fs::read(WASM_PATH) {\n    log::info!(\"using {WASM_PATH}\");\n    return binary;\n  }\n  log::info!(\"using built-in wasm\");\n  WASM_BINARY.ok_or(\"compiled in wasm not available\").unwrap().to_vec()\n}\n\nfn devnet_genesis(\n  validators: &[&'static str],\n  endowed_accounts: Vec<PublicKey>,\n) -> RuntimeGenesisConfig {\n  let validators = validators.iter().map(|name| account_from_name(name)).collect::<Vec<_>>();\n  let key_shares = NETWORKS\n    .iter()\n    .map(|network| match network {\n      NetworkId::Serai => (NetworkId::Serai, Amount(50_000 * 10_u64.pow(8))),\n      NetworkId::External(ExternalNetworkId::Bitcoin) => {\n        (NetworkId::External(ExternalNetworkId::Bitcoin), Amount(1_000_000 * 10_u64.pow(8)))\n      }\n      NetworkId::External(ExternalNetworkId::Ethereum) => {\n        (NetworkId::External(ExternalNetworkId::Ethereum), Amount(1_000_000 * 10_u64.pow(8)))\n      }\n      NetworkId::External(ExternalNetworkId::Monero) => {\n        (NetworkId::External(ExternalNetworkId::Monero), Amount(100_000 * 10_u64.pow(8)))\n      }\n    })\n    .collect::<Vec<_>>();\n\n  RuntimeGenesisConfig {\n    system: SystemConfig { _config: PhantomData },\n\n    transaction_payment: Default::default(),\n\n    coins: CoinsConfig {\n      accounts: endowed_accounts\n        .into_iter()\n        .map(|a| (a, Balance { coin: Coin::Serai, amount: Amount(1 << 60) }))\n        .collect(),\n      _ignore: Default::default(),\n    },\n\n    validator_sets: ValidatorSetsConfig {\n      networks: key_shares.clone(),\n      participants: validators.clone(),\n    },\n    emissions: EmissionsConfig { networks: key_shares, participants: validators.clone() },\n    signals: SignalsConfig::default(),\n    babe: BabeConfig {\n      authorities: validators.iter().map(|validator| ((*validator).into(), 1)).collect(),\n      epoch_config: BABE_GENESIS_EPOCH_CONFIG,\n      _config: PhantomData,\n    },\n    grandpa: GrandpaConfig {\n      authorities: validators.into_iter().map(|validator| (validator.into(), 1)).collect(),\n      _config: PhantomData,\n    },\n  }\n}\n\nfn testnet_genesis(validators: Vec<&'static str>) -> RuntimeGenesisConfig {\n  let validators = validators\n    .into_iter()\n    .map(|validator| Public::decode(&mut hex::decode(validator).unwrap().as_slice()).unwrap())\n    .collect::<Vec<_>>();\n  let key_shares = NETWORKS\n    .iter()\n    .map(|network| match network {\n      NetworkId::Serai => (NetworkId::Serai, Amount(50_000 * 10_u64.pow(8))),\n      NetworkId::External(ExternalNetworkId::Bitcoin) => {\n        (NetworkId::External(ExternalNetworkId::Bitcoin), Amount(1_000_000 * 10_u64.pow(8)))\n      }\n      NetworkId::External(ExternalNetworkId::Ethereum) => {\n        (NetworkId::External(ExternalNetworkId::Ethereum), Amount(1_000_000 * 10_u64.pow(8)))\n      }\n      NetworkId::External(ExternalNetworkId::Monero) => {\n        (NetworkId::External(ExternalNetworkId::Monero), Amount(100_000 * 10_u64.pow(8)))\n      }\n    })\n    .collect::<Vec<_>>();\n\n  assert_eq!(validators.iter().collect::<HashSet<_>>().len(), validators.len());\n\n  RuntimeGenesisConfig {\n    system: SystemConfig { _config: PhantomData },\n\n    transaction_payment: Default::default(),\n\n    coins: CoinsConfig {\n      accounts: validators\n        .iter()\n        .map(|a| (*a, Balance { coin: Coin::Serai, amount: Amount(5_000_000 * 10_u64.pow(8)) }))\n        .collect(),\n      _ignore: Default::default(),\n    },\n\n    validator_sets: ValidatorSetsConfig {\n      networks: key_shares.clone(),\n      participants: validators.clone(),\n    },\n    emissions: EmissionsConfig { networks: key_shares, participants: validators.clone() },\n    signals: SignalsConfig::default(),\n    babe: BabeConfig {\n      authorities: validators.iter().map(|validator| ((*validator).into(), 1)).collect(),\n      epoch_config: BABE_GENESIS_EPOCH_CONFIG,\n      _config: PhantomData,\n    },\n    grandpa: GrandpaConfig {\n      authorities: validators.into_iter().map(|validator| (validator.into(), 1)).collect(),\n      _config: PhantomData,\n    },\n  }\n}\n\nfn genesis(\n  name: &'static str,\n  id: &'static str,\n  chain_type: ChainType,\n  protocol_id: &'static str,\n  config: &RuntimeGenesisConfig,\n) -> ChainSpec {\n  use sp_core::{\n    Encode,\n    traits::{RuntimeCode, WrappedRuntimeCode, CodeExecutor},\n  };\n  use sc_service::ChainSpec as _;\n\n  let bin = wasm_binary();\n  let hash = sp_core::blake2_256(&bin).to_vec();\n\n  let mut chain_spec = sc_chain_spec::ChainSpecBuilder::new(&bin, None)\n    .with_name(name)\n    .with_id(id)\n    .with_chain_type(chain_type)\n    .with_protocol_id(protocol_id)\n    .build();\n\n  let mut ext = sp_state_machine::BasicExternalities::new_empty();\n  let code_fetcher = WrappedRuntimeCode(bin.clone().into());\n  sc_executor::WasmExecutor::<sp_io::SubstrateHostFunctions>::builder()\n    .with_allow_missing_host_functions(true)\n    .build()\n    .call(\n      &mut ext,\n      &RuntimeCode { heap_pages: None, code_fetcher: &code_fetcher, hash },\n      \"GenesisApi_build\",\n      &config.encode(),\n      sp_core::traits::CallContext::Onchain,\n    )\n    .0\n    .unwrap();\n  let mut storage = ext.into_storages();\n  storage.top.insert(sp_core::storage::well_known_keys::CODE.to_vec(), bin);\n  chain_spec.set_storage(storage);\n\n  chain_spec\n}\n\npub fn development_config() -> ChainSpec {\n  genesis(\n    \"Development Network\",\n    \"devnet\",\n    ChainType::Development,\n    \"serai-devnet\",\n    &devnet_genesis(\n      &[\"Alice\"],\n      vec![\n        account_from_name(\"Alice\"),\n        account_from_name(\"Bob\"),\n        account_from_name(\"Charlie\"),\n        account_from_name(\"Dave\"),\n        account_from_name(\"Eve\"),\n        account_from_name(\"Ferdie\"),\n      ],\n    ),\n  )\n}\n\npub fn local_config() -> ChainSpec {\n  genesis(\n    \"Local Test Network\",\n    \"local\",\n    ChainType::Local,\n    \"serai-local\",\n    &devnet_genesis(\n      &[\"Alice\", \"Bob\", \"Charlie\", \"Dave\"],\n      vec![\n        account_from_name(\"Alice\"),\n        account_from_name(\"Bob\"),\n        account_from_name(\"Charlie\"),\n        account_from_name(\"Dave\"),\n        account_from_name(\"Eve\"),\n        account_from_name(\"Ferdie\"),\n      ],\n    ),\n  )\n}\n\n#[allow(clippy::redundant_closure_call)]\npub fn testnet_config() -> ChainSpec {\n  genesis(\n    \"Test Network 0\",\n    \"testnet-0\",\n    ChainType::Live,\n    \"serai-testnet-0\",\n    &(move || {\n      let _ = testnet_genesis(vec![]);\n      todo!(\"TODO\")\n    })(),\n  )\n}\n\npub fn bootnode_multiaddrs(id: &str) -> Vec<libp2p::Multiaddr> {\n  match id {\n    \"devnet\" | \"local\" => vec![],\n    \"testnet-0\" => todo!(\"TODO\"),\n    _ => panic!(\"unrecognized network ID\"),\n  }\n}\n"
  },
  {
    "path": "substrate/node/src/cli.rs",
    "content": "use sc_cli::RunCmd;\n\n#[derive(Debug, clap::Parser)]\npub struct Cli {\n  #[clap(subcommand)]\n  pub subcommand: Option<Subcommand>,\n\n  #[clap(flatten)]\n  pub run: RunCmd,\n}\n\n#[allow(clippy::large_enum_variant)]\n#[derive(Debug, clap::Subcommand)]\npub enum Subcommand {\n  // Key management CLI utilities\n  #[clap(subcommand)]\n  Key(sc_cli::KeySubcommand),\n\n  // Build a chain specification\n  BuildSpec(sc_cli::BuildSpecCmd),\n\n  // Validate blocks\n  CheckBlock(sc_cli::CheckBlockCmd),\n\n  // Export blocks\n  ExportBlocks(sc_cli::ExportBlocksCmd),\n\n  // Export the state of a given block into a chain spec\n  ExportState(sc_cli::ExportStateCmd),\n\n  // Import blocks\n  ImportBlocks(sc_cli::ImportBlocksCmd),\n\n  // Remove the entire chain\n  PurgeChain(sc_cli::PurgeChainCmd),\n\n  // Revert the chain to a previous state\n  Revert(sc_cli::RevertCmd),\n\n  // DB meta columns information\n  ChainInfo(sc_cli::ChainInfoCmd),\n}\n"
  },
  {
    "path": "substrate/node/src/command.rs",
    "content": "use std::sync::Arc;\n\nuse serai_runtime::Block;\n\nuse sc_service::{PruningMode, PartialComponents};\n\nuse sc_cli::SubstrateCli;\n\nuse crate::{\n  chain_spec,\n  cli::{Cli, Subcommand},\n  service::{self, FullClient},\n};\n\nimpl SubstrateCli for Cli {\n  fn impl_name() -> String {\n    \"Serai Node\".into()\n  }\n\n  fn impl_version() -> String {\n    env!(\"SUBSTRATE_CLI_IMPL_VERSION\").to_string()\n  }\n\n  fn description() -> String {\n    env!(\"CARGO_PKG_DESCRIPTION\").to_string()\n  }\n\n  fn author() -> String {\n    env!(\"CARGO_PKG_AUTHORS\").to_string()\n  }\n\n  fn support_url() -> String {\n    \"https://github.com/serai-dex/serai/issues/new\".to_string()\n  }\n\n  fn copyright_start_year() -> i32 {\n    2022\n  }\n\n  fn load_spec(&self, id: &str) -> Result<Box<dyn sc_service::ChainSpec>, String> {\n    match id {\n      \"dev\" | \"devnet\" => Ok(Box::new(chain_spec::development_config())),\n      \"local\" => Ok(Box::new(chain_spec::local_config())),\n      \"testnet\" => Ok(Box::new(chain_spec::testnet_config())),\n      _ => panic!(\"Unknown network ID\"),\n    }\n  }\n}\n\npub fn run() -> sc_cli::Result<()> {\n  let mut cli = Cli::from_args();\n\n  match &cli.subcommand {\n    Some(Subcommand::Key(cmd)) => cmd.run(&cli),\n\n    Some(Subcommand::BuildSpec(cmd)) => {\n      cli.create_runner(cmd)?.sync_run(|config| cmd.run(config.chain_spec, config.network))\n    }\n\n    Some(Subcommand::CheckBlock(cmd)) => cli.create_runner(cmd)?.async_run(|config| {\n      let PartialComponents { client, task_manager, import_queue, .. } =\n        service::new_partial(&config)?.0;\n      Ok((cmd.run(client, import_queue), task_manager))\n    }),\n\n    Some(Subcommand::ExportBlocks(cmd)) => cli.create_runner(cmd)?.async_run(|config| {\n      let PartialComponents { client, task_manager, .. } = service::new_partial(&config)?.0;\n      Ok((cmd.run(client, config.database), task_manager))\n    }),\n\n    Some(Subcommand::ExportState(cmd)) => cli.create_runner(cmd)?.async_run(|config| {\n      let PartialComponents { client, task_manager, .. } = service::new_partial(&config)?.0;\n      Ok((cmd.run(client, config.chain_spec), task_manager))\n    }),\n\n    Some(Subcommand::ImportBlocks(cmd)) => cli.create_runner(cmd)?.async_run(|config| {\n      let PartialComponents { client, task_manager, import_queue, .. } =\n        service::new_partial(&config)?.0;\n      Ok((cmd.run(client, import_queue), task_manager))\n    }),\n\n    Some(Subcommand::PurgeChain(cmd)) => {\n      cli.create_runner(cmd)?.sync_run(|config| cmd.run(config.database))\n    }\n\n    Some(Subcommand::Revert(cmd)) => cli.create_runner(cmd)?.async_run(|config| {\n      let PartialComponents { client, task_manager, backend, .. } =\n        service::new_partial(&config)?.0;\n      let aux_revert = Box::new(|client: Arc<FullClient>, backend, blocks| {\n        sc_consensus_babe::revert(client.clone(), backend, blocks)?;\n        sc_consensus_grandpa::revert(client, blocks)?;\n        Ok(())\n      });\n      Ok((cmd.run(client, backend, Some(aux_revert)), task_manager))\n    }),\n\n    Some(Subcommand::ChainInfo(cmd)) => {\n      cli.create_runner(cmd)?.sync_run(|config| cmd.run::<Block>(&config))\n    }\n\n    None => {\n      cli.run.network_params.node_key_params = sc_cli::NodeKeyParams {\n        node_key: None,\n        node_key_file: None,\n        node_key_type: sc_cli::arg_enums::NodeKeyType::Ed25519,\n        unsafe_force_node_key_generation: true,\n      };\n\n      cli.create_runner(&cli.run)?.run_node_until_exit(|mut config| async {\n        if config.role.is_authority() {\n          config.state_pruning = Some(PruningMode::ArchiveAll);\n        }\n        service::new_full(config).map_err(sc_cli::Error::Service)\n      })\n    }\n  }\n}\n"
  },
  {
    "path": "substrate/node/src/keystore.rs",
    "content": "use zeroize::Zeroize;\n\nuse sp_core::{crypto::*, sr25519};\nuse sp_keystore::*;\n\npub struct Keystore(sr25519::Pair);\n\nimpl Keystore {\n  pub fn from_env() -> Option<Self> {\n    let mut key_hex = serai_env::var(\"KEY\")?;\n    if key_hex.trim().is_empty() {\n      None?;\n    }\n    let mut key = hex::decode(&key_hex).expect(\"KEY from environment wasn't hex\");\n    key_hex.zeroize();\n\n    assert_eq!(key.len(), 32, \"KEY from environment wasn't 32 bytes\");\n    key.extend(sp_core::blake2_256(&key));\n\n    let res = Self(sr25519::Pair::from(schnorrkel::SecretKey::from_bytes(&key).unwrap()));\n    key.zeroize();\n    Some(res)\n  }\n}\n\nimpl sp_keystore::Keystore for Keystore {\n  fn sr25519_public_keys(&self, _: KeyTypeId) -> Vec<sr25519::Public> {\n    vec![self.0.public()]\n  }\n\n  fn sr25519_generate_new(&self, _: KeyTypeId, _: Option<&str>) -> Result<sr25519::Public, Error> {\n    panic!(\"asked to generate an sr25519 key\");\n  }\n\n  fn sr25519_sign(\n    &self,\n    _: KeyTypeId,\n    public: &sr25519::Public,\n    msg: &[u8],\n  ) -> Result<Option<sr25519::Signature>, Error> {\n    if public == &self.0.public() {\n      Ok(Some(self.0.sign(msg)))\n    } else {\n      Ok(None)\n    }\n  }\n\n  fn sr25519_vrf_sign(\n    &self,\n    _: KeyTypeId,\n    public: &sr25519::Public,\n    data: &sr25519::vrf::VrfSignData,\n  ) -> Result<Option<sr25519::vrf::VrfSignature>, Error> {\n    if public == &self.0.public() {\n      Ok(Some(self.0.vrf_sign(data)))\n    } else {\n      Ok(None)\n    }\n  }\n\n  fn sr25519_vrf_pre_output(\n    &self,\n    _: KeyTypeId,\n    public: &sr25519::Public,\n    input: &sr25519::vrf::VrfInput,\n  ) -> Result<Option<sr25519::vrf::VrfPreOutput>, Error> {\n    if public == &self.0.public() {\n      Ok(Some(self.0.vrf_pre_output(input)))\n    } else {\n      Ok(None)\n    }\n  }\n\n  fn insert(&self, _: KeyTypeId, _: &str, _: &[u8]) -> Result<(), ()> {\n    panic!(\"asked to insert a key\");\n  }\n\n  fn keys(&self, _: KeyTypeId) -> Result<Vec<Vec<u8>>, Error> {\n    Ok(vec![self.0.public().0.to_vec()])\n  }\n\n  fn has_keys(&self, public_keys: &[(Vec<u8>, KeyTypeId)]) -> bool {\n    let our_key = self.0.public().0;\n    for (public_key, _) in public_keys {\n      if our_key != public_key.as_slice() {\n        return false;\n      }\n    }\n    true\n  }\n}\n"
  },
  {
    "path": "substrate/node/src/main.rs",
    "content": "mod keystore;\n\nmod chain_spec;\nmod service;\n\nmod command;\n\nmod rpc;\nmod cli;\n\nfn main() -> sc_cli::Result<()> {\n  command::run()\n}\n"
  },
  {
    "path": "substrate/node/src/rpc.rs",
    "content": "use std::{sync::Arc, collections::HashSet};\n\nuse rand_core::{RngCore, OsRng};\n\nuse sp_core::Encode;\nuse sp_blockchain::{Error as BlockchainError, HeaderBackend, HeaderMetadata};\nuse sp_block_builder::BlockBuilder;\nuse sp_api::ProvideRuntimeApi;\n\nuse serai_runtime::{\n  primitives::{NetworkId, SubstrateAmount, PublicKey},\n  Nonce, Block, SeraiRuntimeApi,\n};\n\nuse tokio::sync::RwLock;\n\nuse jsonrpsee::RpcModule;\n\nuse sc_client_api::BlockBackend;\nuse sc_transaction_pool_api::TransactionPool;\n\npub struct FullDeps<C, P> {\n  pub id: String,\n  pub client: Arc<C>,\n  pub pool: Arc<P>,\n  pub authority_discovery: Option<sc_authority_discovery::Service>,\n}\n\npub fn create_full<\n  C: ProvideRuntimeApi<Block>\n    + HeaderBackend<Block>\n    + HeaderMetadata<Block, Error = BlockchainError>\n    + BlockBackend<Block>\n    + Send\n    + Sync\n    + 'static,\n  P: TransactionPool + 'static,\n>(\n  deps: FullDeps<C, P>,\n) -> Result<RpcModule<()>, Box<dyn std::error::Error + Send + Sync>>\nwhere\n  C::Api: frame_system_rpc_runtime_api::AccountNonceApi<Block, PublicKey, Nonce>\n    + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi<Block, SubstrateAmount>\n    + SeraiRuntimeApi<Block>\n    + BlockBuilder<Block>,\n{\n  use substrate_frame_rpc_system::{System, SystemApiServer};\n  use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer};\n\n  let mut module = RpcModule::new(());\n  let FullDeps { id, client, pool, authority_discovery } = deps;\n\n  module.merge(System::new(client.clone(), pool).into_rpc())?;\n  module.merge(TransactionPayment::new(client.clone()).into_rpc())?;\n\n  if let Some(authority_discovery) = authority_discovery {\n    let mut authority_discovery_module =\n      RpcModule::new((id, client.clone(), RwLock::new(authority_discovery)));\n    authority_discovery_module.register_async_method(\n      \"p2p_validators\",\n      |params, context, _ext| async move {\n        let [network]: [NetworkId; 1] = params.parse()?;\n        let (id, client, authority_discovery) = &*context;\n        let latest_block = client.info().best_hash;\n\n        let validators = client.runtime_api().validators(latest_block, network).map_err(|_| {\n          jsonrpsee::types::error::ErrorObjectOwned::owned(\n            -1,\n            format!(\n              \"couldn't get validators from the latest block, which is likely a fatal bug. {}\",\n              \"please report this at https://github.com/serai-dex/serai\",\n            ),\n            Option::<()>::None,\n          )\n        });\n        let validators = match validators {\n          Ok(validators) => validators,\n          Err(e) => return Err(e),\n        };\n        // Always return the protocol's bootnodes\n        let mut all_p2p_addresses = crate::chain_spec::bootnode_multiaddrs(id);\n        // Additionally returns validators found over the DHT\n        for validator in validators {\n          let mut returned_addresses = authority_discovery\n            .write()\n            .await\n            .get_addresses_by_authority_id(validator.into())\n            .await\n            .unwrap_or_else(HashSet::new)\n            .into_iter()\n            .collect::<Vec<_>>();\n          // Randomly select an address\n          // There should be one, there may be two if their IP address changed, and more should only\n          // occur if they have multiple proxies/an IP address changing frequently/some issue\n          // preventing consistent self-identification\n          // It isn't beneficial to use multiple addresses for a single peer here\n          if !returned_addresses.is_empty() {\n            all_p2p_addresses.push(\n              returned_addresses\n                .remove(usize::try_from(OsRng.next_u64() >> 32).unwrap() % returned_addresses.len())\n                .into(),\n            );\n          }\n        }\n        Ok(all_p2p_addresses)\n      },\n    )?;\n    module.merge(authority_discovery_module)?;\n  }\n\n  let mut block_bin_module = RpcModule::new(client);\n  block_bin_module.register_async_method(\n    \"chain_getBlockBin\",\n    |params, client, _ext| async move {\n      let [block_hash]: [String; 1] = params.parse()?;\n      let Some(block_hash) = hex::decode(&block_hash).ok().and_then(|bytes| {\n        <[u8; 32]>::try_from(bytes.as_slice())\n          .map(<Block as sp_runtime::traits::Block>::Hash::from)\n          .ok()\n      }) else {\n        return Err(jsonrpsee::types::error::ErrorObjectOwned::owned(\n          -1,\n          \"requested block hash wasn't a valid hash\",\n          Option::<()>::None,\n        ));\n      };\n      let Some(block) = client.block(block_hash).ok().flatten() else {\n        return Err(jsonrpsee::types::error::ErrorObjectOwned::owned(\n          -1,\n          \"couldn't find requested block\",\n          Option::<()>::None,\n        ));\n      };\n      Ok(hex::encode(block.block.encode()))\n    },\n  )?;\n  module.merge(block_bin_module)?;\n\n  Ok(module)\n}\n"
  },
  {
    "path": "substrate/node/src/service.rs",
    "content": "use std::{boxed::Box, sync::Arc};\n\nuse futures_util::stream::StreamExt;\n\nuse sp_timestamp::InherentDataProvider as TimestampInherent;\nuse sp_consensus_babe::{SlotDuration, inherents::InherentDataProvider as BabeInherent};\n\nuse sp_io::SubstrateHostFunctions;\nuse sc_executor::{sp_wasm_interface::ExtendedHostFunctions, WasmExecutor};\n\nuse sc_network::{Event, NetworkEventStream, NetworkBackend};\nuse sc_service::{error::Error as ServiceError, Configuration, TaskManager, TFullClient};\n\nuse sc_transaction_pool_api::OffchainTransactionPoolFactory;\nuse sc_client_api::BlockBackend;\n\nuse sc_telemetry::{Telemetry, TelemetryWorker};\n\nuse serai_runtime::{Block, RuntimeApi};\n\nuse sc_consensus_babe::{self, SlotProportion};\nuse sc_consensus_grandpa as grandpa;\n\n#[cfg(not(feature = \"runtime-benchmarks\"))]\npub type Executor = WasmExecutor<ExtendedHostFunctions<SubstrateHostFunctions, ()>>;\n#[cfg(feature = \"runtime-benchmarks\")]\npub type Executor = WasmExecutor<\n  ExtendedHostFunctions<SubstrateHostFunctions, frame_benchmarking::benchmarking::HostFunctions>,\n>;\n\ntype FullBackend = sc_service::TFullBackend<Block>;\npub type FullClient = TFullClient<Block, RuntimeApi, Executor>;\n\ntype SelectChain = sc_consensus::LongestChain<FullBackend, Block>;\ntype GrandpaBlockImport = grandpa::GrandpaBlockImport<FullBackend, Block, FullClient, SelectChain>;\ntype BabeBlockImport = sc_consensus_babe::BabeBlockImport<Block, FullClient, GrandpaBlockImport>;\n\ntype PartialComponents = sc_service::PartialComponents<\n  FullClient,\n  FullBackend,\n  SelectChain,\n  sc_consensus::DefaultImportQueue<Block>,\n  sc_transaction_pool::TransactionPoolWrapper<Block, FullClient>,\n  (\n    BabeBlockImport,\n    sc_consensus_babe::BabeLink<Block>,\n    grandpa::LinkHalf<Block, FullClient, SelectChain>,\n    grandpa::SharedVoterState,\n    Option<Telemetry>,\n  ),\n>;\n\nfn create_inherent_data_providers(\n  slot_duration: SlotDuration,\n) -> (BabeInherent, TimestampInherent) {\n  let timestamp = TimestampInherent::from_system_time();\n  (BabeInherent::from_timestamp_and_slot_duration(*timestamp, slot_duration), timestamp)\n}\n\npub fn new_partial(\n  config: &Configuration,\n) -> Result<(PartialComponents, Arc<dyn sp_keystore::Keystore>), ServiceError> {\n  let telemetry = config\n    .telemetry_endpoints\n    .clone()\n    .filter(|x| !x.is_empty())\n    .map(|endpoints| -> Result<_, sc_telemetry::Error> {\n      let worker = TelemetryWorker::new(16)?;\n      let telemetry = worker.handle().new_telemetry(endpoints);\n      Ok((worker, telemetry))\n    })\n    .transpose()?;\n\n  #[allow(deprecated)]\n  let executor = Executor::new(\n    config.executor.wasm_method,\n    config.executor.default_heap_pages,\n    config.executor.max_runtime_instances,\n    None,\n    config.executor.runtime_cache_size,\n  );\n\n  let (client, backend, keystore_container, task_manager) =\n    sc_service::new_full_parts::<Block, RuntimeApi, _>(\n      config,\n      telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),\n      executor,\n    )?;\n  let client = Arc::new(client);\n\n  let keystore: Arc<dyn sp_keystore::Keystore> =\n    if let Some(keystore) = crate::keystore::Keystore::from_env() {\n      Arc::new(keystore)\n    } else {\n      keystore_container.keystore()\n    };\n\n  let telemetry = telemetry.map(|(worker, telemetry)| {\n    task_manager.spawn_handle().spawn(\"telemetry\", None, worker.run());\n    telemetry\n  });\n\n  let select_chain = sc_consensus::LongestChain::new(backend.clone());\n\n  let transaction_pool = sc_transaction_pool::Builder::new(\n    task_manager.spawn_essential_handle(),\n    client.clone(),\n    config.role.is_authority().into(),\n  )\n  .with_options(config.transaction_pool.clone())\n  .with_prometheus(config.prometheus_registry())\n  .build();\n  let transaction_pool = Arc::new(transaction_pool);\n\n  let (grandpa_block_import, grandpa_link) = grandpa::block_import(\n    client.clone(),\n    u32::MAX,\n    &client,\n    select_chain.clone(),\n    telemetry.as_ref().map(Telemetry::handle),\n  )?;\n  let justification_import = grandpa_block_import.clone();\n\n  let (block_import, babe_link) = sc_consensus_babe::block_import(\n    sc_consensus_babe::configuration(&*client)?,\n    grandpa_block_import,\n    client.clone(),\n  )?;\n\n  let slot_duration = babe_link.config().slot_duration();\n  let (import_queue, babe_handle) =\n    sc_consensus_babe::import_queue(sc_consensus_babe::ImportQueueParams {\n      link: babe_link.clone(),\n      block_import: block_import.clone(),\n      justification_import: Some(Box::new(justification_import)),\n      client: client.clone(),\n      select_chain: select_chain.clone(),\n      create_inherent_data_providers: move |_, ()| async move {\n        Ok(create_inherent_data_providers(slot_duration))\n      },\n      spawner: &task_manager.spawn_essential_handle(),\n      registry: config.prometheus_registry(),\n      telemetry: telemetry.as_ref().map(Telemetry::handle),\n      offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(transaction_pool.clone()),\n    })?;\n  // This can't be dropped, or BABE breaks\n  // We don't have anything to do with it though\n  // This won't grow in size, so forgetting this isn't a disastrous memleak\n  std::mem::forget(babe_handle);\n\n  Ok((\n    sc_service::PartialComponents {\n      client,\n      backend,\n      task_manager,\n      keystore_container,\n      select_chain,\n      import_queue,\n      transaction_pool,\n      other: (block_import, babe_link, grandpa_link, grandpa::SharedVoterState::empty(), telemetry),\n    },\n    keystore,\n  ))\n}\n\npub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError> {\n  let (\n    sc_service::PartialComponents {\n      client,\n      backend,\n      mut task_manager,\n      keystore_container: _,\n      import_queue,\n      select_chain,\n      transaction_pool,\n      other: (block_import, babe_link, grandpa_link, shared_voter_state, mut telemetry),\n    },\n    keystore_container,\n  ) = new_partial(&config)?;\n\n  config.network.node_name = \"serai\".to_string();\n  config.network.client_version = \"0.1.0\".to_string();\n  config.network.listen_addresses =\n    vec![\"/ip4/0.0.0.0/tcp/30333\".parse().unwrap(), \"/ip6/::/tcp/30333\".parse().unwrap()];\n\n  type N = sc_network::service::NetworkWorker<Block, <Block as sp_runtime::traits::Block>::Hash>;\n  let mut net_config = sc_network::config::FullNetworkConfiguration::<_, _, N>::new(\n    &config.network,\n    config.prometheus_registry().cloned(),\n  );\n  let metrics = N::register_notification_metrics(config.prometheus_registry());\n\n  let grandpa_protocol_name =\n    grandpa::protocol_standard_name(&client.block_hash(0).unwrap().unwrap(), &config.chain_spec);\n  let (grandpa_protocol_config, grandpa_notification_service) =\n    sc_consensus_grandpa::grandpa_peers_set_config::<Block, N>(\n      grandpa_protocol_name.clone(),\n      metrics.clone(),\n      net_config.peer_store_handle(),\n    );\n  net_config.add_notification_protocol(grandpa_protocol_config);\n\n  let publish_non_global_ips = config.network.allow_non_globals_in_dht;\n\n  let (network, system_rpc_tx, tx_handler_controller, sync_service) =\n    sc_service::build_network(sc_service::BuildNetworkParams {\n      config: &config,\n      net_config,\n      client: client.clone(),\n      transaction_pool: transaction_pool.clone(),\n      spawn_handle: task_manager.spawn_handle(),\n      import_queue,\n      block_announce_validator_builder: None,\n      metrics,\n      block_relay: None,\n      warp_sync_config: None,\n    })?;\n\n  task_manager.spawn_handle().spawn(\"bootnodes\", \"bootnodes\", {\n    let network = network.clone();\n    let id = config.chain_spec.id().to_string();\n\n    async move {\n      // Transforms the above Multiaddrs into MultiaddrWithPeerIds\n      // While the PeerIds *should* be known in advance and hardcoded, that data wasn't collected in\n      // time and this fine for a testnet\n      let bootnodes = || async {\n        use libp2p::{\n          core::{\n            Endpoint,\n            transport::{PortUse, DialOpts},\n          },\n          Transport as TransportTrait,\n          tcp::tokio::Transport,\n          noise::Config,\n        };\n\n        let bootnode_multiaddrs = crate::chain_spec::bootnode_multiaddrs(&id);\n\n        let mut tasks = vec![];\n        for multiaddr in bootnode_multiaddrs {\n          tasks.push(tokio::time::timeout(\n            core::time::Duration::from_secs(10),\n            tokio::task::spawn(async move {\n              let Ok(noise) = Config::new(&sc_network::Keypair::generate_ed25519()) else { None? };\n              let mut transport = Transport::default()\n                .upgrade(libp2p::core::upgrade::Version::V1)\n                .authenticate(noise)\n                .multiplex(libp2p::yamux::Config::default());\n              let Ok(transport) = transport.dial(\n                multiaddr.clone(),\n                DialOpts { role: Endpoint::Dialer, port_use: PortUse::Reuse },\n              ) else {\n                None?\n              };\n              let Ok((peer_id, _)) = transport.await else { None? };\n              Some(sc_network::config::MultiaddrWithPeerId {\n                multiaddr: multiaddr.into(),\n                peer_id: peer_id.into(),\n              })\n            }),\n          ));\n        }\n\n        let mut res = vec![];\n        for task in tasks {\n          if let Ok(Ok(Some(bootnode))) = task.await {\n            res.push(bootnode);\n          }\n        }\n        res\n      };\n\n      use sc_network::{NetworkStatusProvider, NetworkPeers};\n      loop {\n        if let Ok(status) = network.status().await {\n          if status.num_connected_peers < 3 {\n            for bootnode in bootnodes().await {\n              let _ = network.add_reserved_peer(bootnode);\n            }\n          }\n        }\n        tokio::time::sleep(core::time::Duration::from_secs(60)).await;\n      }\n    }\n  });\n\n  let role = config.role;\n  let keystore = keystore_container;\n  if let Some(seed) = config.dev_key_seed.as_ref() {\n    let _ =\n      keystore.sr25519_generate_new(sp_core::crypto::key_types::AUTHORITY_DISCOVERY, Some(seed));\n  }\n  let prometheus_registry = config.prometheus_registry().cloned();\n\n  // TODO: Ensure we're considered as an authority is a validator of an external network\n  let authority_discovery = if role.is_authority() {\n    let (worker, service) = sc_authority_discovery::new_worker_and_service_with_config(\n      #[allow(clippy::field_reassign_with_default)]\n      {\n        let mut worker = sc_authority_discovery::WorkerConfig::default();\n        worker.publish_non_global_ips = publish_non_global_ips;\n        worker.strict_record_validation = true;\n        worker\n      },\n      client.clone(),\n      Arc::new(network.clone()),\n      Box::pin(network.event_stream(\"authority-discovery\").filter_map(|e| async move {\n        match e {\n          Event::Dht(e) => Some(e),\n          _ => None,\n        }\n      })),\n      sc_authority_discovery::Role::PublishAndDiscover(keystore.clone()),\n      prometheus_registry.clone(),\n      task_manager.spawn_handle(),\n    );\n    task_manager.spawn_handle().spawn(\n      \"authority-discovery-worker\",\n      Some(\"networking\"),\n      worker.run(),\n    );\n\n    Some(service)\n  } else {\n    None\n  };\n\n  let rpc_builder = {\n    let id = config.chain_spec.id().to_string();\n    let client = client.clone();\n    let pool = transaction_pool.clone();\n\n    Box::new(move |_| {\n      crate::rpc::create_full(crate::rpc::FullDeps {\n        id: id.clone(),\n        client: client.clone(),\n        pool: pool.clone(),\n        authority_discovery: authority_discovery.clone(),\n      })\n      .map_err(Into::into)\n    })\n  };\n\n  let enable_grandpa = !config.disable_grandpa;\n  let force_authoring = config.force_authoring;\n  let name = config.network.node_name.clone();\n\n  sc_service::spawn_tasks(sc_service::SpawnTasksParams {\n    config,\n    backend,\n    client: client.clone(),\n    keystore: keystore.clone(),\n    network: network.clone(),\n    rpc_builder,\n    transaction_pool: transaction_pool.clone(),\n    task_manager: &mut task_manager,\n    system_rpc_tx,\n    tx_handler_controller,\n    sync_service: sync_service.clone(),\n    telemetry: telemetry.as_mut(),\n  })?;\n\n  if let sc_service::config::Role::Authority { .. } = &role {\n    let slot_duration = babe_link.config().slot_duration();\n    let babe_config = sc_consensus_babe::BabeParams {\n      keystore: keystore.clone(),\n      client: client.clone(),\n      select_chain,\n      env: sc_basic_authorship::ProposerFactory::new(\n        task_manager.spawn_handle(),\n        client,\n        transaction_pool.clone(),\n        prometheus_registry.as_ref(),\n        telemetry.as_ref().map(Telemetry::handle),\n      ),\n      block_import,\n      sync_oracle: sync_service.clone(),\n      justification_sync_link: sync_service.clone(),\n      create_inherent_data_providers: move |_, ()| async move {\n        Ok(create_inherent_data_providers(slot_duration))\n      },\n      force_authoring,\n      backoff_authoring_blocks: None::<()>,\n      babe_link,\n      block_proposal_slot_portion: SlotProportion::new(0.5),\n      max_block_proposal_slot_portion: None,\n      telemetry: telemetry.as_ref().map(Telemetry::handle),\n    };\n\n    task_manager.spawn_essential_handle().spawn_blocking(\n      \"babe-proposer\",\n      Some(\"block-authoring\"),\n      sc_consensus_babe::start_babe(babe_config)?,\n    );\n  }\n\n  if enable_grandpa {\n    task_manager.spawn_essential_handle().spawn_blocking(\n      \"grandpa-voter\",\n      None,\n      grandpa::run_grandpa_voter(grandpa::GrandpaParams {\n        config: grandpa::Config {\n          gossip_duration: std::time::Duration::from_millis(333),\n          justification_generation_period: 512,\n          name: Some(name),\n          observer_enabled: false,\n          keystore: if role.is_authority() { Some(keystore) } else { None },\n          local_role: role,\n          telemetry: telemetry.as_ref().map(Telemetry::handle),\n          protocol_name: grandpa_protocol_name,\n        },\n        link: grandpa_link,\n        network,\n        sync: Arc::new(sync_service),\n        telemetry: telemetry.as_ref().map(Telemetry::handle),\n        voting_rule: grandpa::VotingRulesBuilder::default().build(),\n        prometheus_registry,\n        shared_voter_state,\n        offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(transaction_pool),\n        notification_service: grandpa_notification_service,\n      })?,\n    );\n  }\n\n  Ok(task_manager)\n}\n"
  },
  {
    "path": "substrate/primitives/Cargo.toml",
    "content": "[package]\nname = \"serai-primitives\"\nversion = \"0.1.0\"\ndescription = \"Primitives for the Serai blockchain\"\nlicense = \"MIT\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/substrate/primitives\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nedition = \"2021\"\nrust-version = \"1.74\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nzeroize = { version = \"^1.5\", features = [\"derive\"], optional = true }\n\nscale = { package = \"parity-scale-codec\", version = \"3\", default-features = false, features = [\"derive\"] }\n\nborsh = { version = \"1\", default-features = false, features = [\"derive\", \"de_strict_order\"], optional = true }\nserde = { version = \"1\", default-features = false, features = [\"derive\", \"alloc\"], optional = true }\n\nsp-application-crypto = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nsp-core = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nsp-runtime = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nsp-io = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nsp-std = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\n\nframe-support = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\n\n[dev-dependencies]\nrand_core = { version = \"0.6\", default-features = false, features = [\"getrandom\"] }\n\n[features]\nstd = [\"zeroize\", \"scale/std\", \"borsh?/std\", \"serde?/std\", \"sp-core/std\", \"sp-runtime/std\", \"sp-std/std\", \"frame-support/std\"]\nborsh = [\"dep:borsh\"]\nserde = [\"dep:serde\"]\ndefault = [\"std\"]\n"
  },
  {
    "path": "substrate/primitives/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2022-2023 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "substrate/primitives/src/account.rs",
    "content": "#[cfg(feature = \"std\")]\nuse zeroize::Zeroize;\n\n#[cfg(feature = \"borsh\")]\nuse borsh::{BorshSerialize, BorshDeserialize};\n#[cfg(feature = \"serde\")]\nuse serde::{Serialize, Deserialize};\n\nuse scale::{Encode, Decode, DecodeWithMemTracking, MaxEncodedLen};\n\nuse sp_core::sr25519::Public;\npub use sp_core::sr25519::Signature;\n#[cfg(feature = \"std\")]\nuse sp_core::{Pair as PairTrait, sr25519::Pair};\n\nuse sp_runtime::traits::{LookupError, Lookup, StaticLookup};\n\npub type PublicKey = Public;\n\n#[cfg(feature = \"borsh\")]\npub fn borsh_serialize_public<W: borsh::io::Write>(\n  public: &Public,\n  writer: &mut W,\n) -> Result<(), borsh::io::Error> {\n  borsh::BorshSerialize::serialize(&public.0, writer)\n}\n\n#[cfg(feature = \"borsh\")]\npub fn borsh_deserialize_public<R: borsh::io::Read>(\n  reader: &mut R,\n) -> Result<Public, borsh::io::Error> {\n  let public: [u8; 32] = borsh::BorshDeserialize::deserialize_reader(reader)?;\n  Ok(public.into())\n}\n\n#[cfg(feature = \"borsh\")]\npub fn borsh_serialize_signature<W: borsh::io::Write>(\n  signature: &Signature,\n  writer: &mut W,\n) -> Result<(), borsh::io::Error> {\n  borsh::BorshSerialize::serialize(&signature.0, writer)\n}\n\n#[cfg(feature = \"borsh\")]\npub fn borsh_deserialize_signature<R: borsh::io::Read>(\n  reader: &mut R,\n) -> Result<Signature, borsh::io::Error> {\n  let signature: [u8; 64] = borsh::BorshDeserialize::deserialize_reader(reader)?;\n  Ok(signature.into())\n}\n\n// TODO: Remove this for solely Public?\n#[derive(\n  Clone,\n  Copy,\n  PartialEq,\n  Eq,\n  PartialOrd,\n  Ord,\n  Debug,\n  Encode,\n  Decode,\n  DecodeWithMemTracking,\n  MaxEncodedLen,\n)]\n#[cfg_attr(feature = \"std\", derive(Zeroize))]\n#[cfg_attr(feature = \"borsh\", derive(BorshSerialize, BorshDeserialize))]\n#[cfg_attr(feature = \"serde\", derive(Serialize, Deserialize))]\npub struct SeraiAddress(pub [u8; 32]);\nimpl SeraiAddress {\n  pub fn new(key: [u8; 32]) -> SeraiAddress {\n    SeraiAddress(key)\n  }\n}\n\nimpl From<[u8; 32]> for SeraiAddress {\n  fn from(key: [u8; 32]) -> SeraiAddress {\n    SeraiAddress(key)\n  }\n}\n\nimpl From<PublicKey> for SeraiAddress {\n  fn from(key: PublicKey) -> SeraiAddress {\n    SeraiAddress(key.0)\n  }\n}\n\nimpl From<SeraiAddress> for PublicKey {\n  fn from(address: SeraiAddress) -> PublicKey {\n    PublicKey::from_raw(address.0)\n  }\n}\n\n#[cfg(feature = \"std\")]\nimpl std::fmt::Display for SeraiAddress {\n  fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n    // TODO: Bech32\n    write!(f, \"{:?}\", self.0)\n  }\n}\n\n#[cfg(feature = \"std\")]\npub fn insecure_pair_from_name(name: &str) -> Pair {\n  Pair::from_string(&format!(\"//{name}\"), None).unwrap()\n}\n\npub struct AccountLookup;\nimpl Lookup for AccountLookup {\n  type Source = SeraiAddress;\n  type Target = PublicKey;\n  fn lookup(&self, source: SeraiAddress) -> Result<PublicKey, LookupError> {\n    Ok(PublicKey::from_raw(source.0))\n  }\n}\nimpl StaticLookup for AccountLookup {\n  type Source = SeraiAddress;\n  type Target = PublicKey;\n  fn lookup(source: SeraiAddress) -> Result<PublicKey, LookupError> {\n    Ok(source.into())\n  }\n  fn unlookup(source: PublicKey) -> SeraiAddress {\n    source.into()\n  }\n}\n\npub const fn system_address(pallet: &'static [u8]) -> SeraiAddress {\n  let mut address = [0; 32];\n  let mut set = false;\n  // Implement a while loop since we can't use a for loop\n  let mut i = 0;\n  while i < pallet.len() {\n    address[i] = pallet[i];\n    if address[i] != 0 {\n      set = true;\n    }\n    i += 1;\n  }\n  // Make sure this address isn't the identity point\n  // Doesn't do address != [0; 32] since that's not const\n  assert!(set, \"address is the identity point\");\n  SeraiAddress(address)\n}\n"
  },
  {
    "path": "substrate/primitives/src/amount.rs",
    "content": "use core::{\n  ops::{Add, Sub, Mul},\n  fmt::Debug,\n};\n\n#[cfg(feature = \"std\")]\nuse zeroize::Zeroize;\n\n#[cfg(feature = \"borsh\")]\nuse borsh::{BorshSerialize, BorshDeserialize};\n#[cfg(feature = \"serde\")]\nuse serde::{Serialize, Deserialize};\n\nuse scale::{Encode, Decode, DecodeWithMemTracking, MaxEncodedLen};\n\n/// The type used for amounts within Substrate.\n// Distinct from Amount due to Substrate's requirements on this type.\n// While Amount could have all the necessary traits implemented, not only are they many, it'd make\n// Amount a large type with a variety of misc functions.\n// The current type's minimalism sets clear bounds on usage.\npub type SubstrateAmount = u64;\n/// The type used for amounts.\n#[derive(\n  Clone,\n  Copy,\n  PartialEq,\n  Eq,\n  PartialOrd,\n  Debug,\n  Encode,\n  Decode,\n  DecodeWithMemTracking,\n  MaxEncodedLen,\n)]\n#[cfg_attr(feature = \"std\", derive(Zeroize))]\n#[cfg_attr(feature = \"borsh\", derive(BorshSerialize, BorshDeserialize))]\n#[cfg_attr(feature = \"serde\", derive(Serialize, Deserialize))]\npub struct Amount(pub SubstrateAmount);\n\n// TODO: these impl shouldn't panic and return error to be dealt with.\n// Otherwise we might have a panic that stops the network.\nimpl Add for Amount {\n  type Output = Amount;\n  fn add(self, other: Amount) -> Amount {\n    // Explicitly use checked_add so even if range checks are disabled, this is still checked\n    Amount(self.0.checked_add(other.0).unwrap())\n  }\n}\n\nimpl Sub for Amount {\n  type Output = Amount;\n  fn sub(self, other: Amount) -> Amount {\n    Amount(self.0.checked_sub(other.0).unwrap())\n  }\n}\n\nimpl Mul for Amount {\n  type Output = Amount;\n  fn mul(self, other: Amount) -> Amount {\n    Amount(self.0.checked_mul(other.0).unwrap())\n  }\n}\n"
  },
  {
    "path": "substrate/primitives/src/balance.rs",
    "content": "use core::ops::{Add, Sub, Mul};\n\n#[cfg(feature = \"std\")]\nuse zeroize::Zeroize;\n\n#[cfg(feature = \"borsh\")]\nuse borsh::{BorshSerialize, BorshDeserialize};\n#[cfg(feature = \"serde\")]\nuse serde::{Serialize, Deserialize};\n\nuse scale::{Encode, Decode, DecodeWithMemTracking, MaxEncodedLen};\n\nuse crate::{Amount, Coin, ExternalCoin};\n\n/// The type used for balances (a Coin and Balance).\n#[derive(\n  Clone, Copy, PartialEq, Eq, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen,\n)]\n#[cfg_attr(feature = \"std\", derive(Zeroize))]\n#[cfg_attr(feature = \"borsh\", derive(BorshSerialize, BorshDeserialize))]\n#[cfg_attr(feature = \"serde\", derive(Serialize, Deserialize))]\npub struct Balance {\n  pub coin: Coin,\n  pub amount: Amount,\n}\n\n/// The type used for balances (a Coin and Balance).\n#[derive(\n  Clone, Copy, PartialEq, Eq, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen,\n)]\n#[cfg_attr(feature = \"std\", derive(Zeroize))]\n#[cfg_attr(feature = \"borsh\", derive(BorshSerialize, BorshDeserialize))]\n#[cfg_attr(feature = \"serde\", derive(Serialize, Deserialize))]\npub struct ExternalBalance {\n  pub coin: ExternalCoin,\n  pub amount: Amount,\n}\n\nimpl From<ExternalBalance> for Balance {\n  fn from(balance: ExternalBalance) -> Self {\n    Balance { coin: balance.coin.into(), amount: balance.amount }\n  }\n}\n\nimpl TryFrom<Balance> for ExternalBalance {\n  type Error = ();\n\n  fn try_from(balance: Balance) -> Result<Self, Self::Error> {\n    match balance.coin {\n      Coin::Serai => Err(())?,\n      Coin::External(coin) => Ok(ExternalBalance { coin, amount: balance.amount }),\n    }\n  }\n}\n\n// TODO: these impl either should be removed or return errors in case of overflows\nimpl Add<Amount> for Balance {\n  type Output = Balance;\n  fn add(self, other: Amount) -> Balance {\n    Balance { coin: self.coin, amount: self.amount + other }\n  }\n}\n\nimpl Sub<Amount> for Balance {\n  type Output = Balance;\n  fn sub(self, other: Amount) -> Balance {\n    Balance { coin: self.coin, amount: self.amount - other }\n  }\n}\n\nimpl Mul<Amount> for Balance {\n  type Output = Balance;\n  fn mul(self, other: Amount) -> Balance {\n    Balance { coin: self.coin, amount: self.amount * other }\n  }\n}\n"
  },
  {
    "path": "substrate/primitives/src/block.rs",
    "content": "#[cfg(feature = \"std\")]\nuse zeroize::Zeroize;\n\n#[cfg(feature = \"borsh\")]\nuse borsh::{BorshSerialize, BorshDeserialize};\n#[cfg(feature = \"serde\")]\nuse serde::{Serialize, Deserialize};\n\nuse scale::{Encode, Decode, DecodeWithMemTracking, MaxEncodedLen};\n\nuse sp_core::H256;\n\n/// The type used to identify block numbers.\n#[derive(\n  Clone,\n  Copy,\n  Default,\n  PartialEq,\n  Eq,\n  Hash,\n  Debug,\n  Encode,\n  Decode,\n  DecodeWithMemTracking,\n  MaxEncodedLen,\n)]\n#[cfg_attr(feature = \"std\", derive(Zeroize))]\n#[cfg_attr(feature = \"borsh\", derive(BorshSerialize, BorshDeserialize))]\n#[cfg_attr(feature = \"serde\", derive(Serialize, Deserialize))]\npub struct BlockNumber(pub u64);\nimpl From<u64> for BlockNumber {\n  fn from(number: u64) -> BlockNumber {\n    BlockNumber(number)\n  }\n}\n\n/// The type used to identify block hashes.\n// This may not be universally compatible\n// If a block exists with a hash which isn't 32-bytes, it can be hashed into a value with 32-bytes\n// This would require the processor to maintain a mapping of 32-byte IDs to actual hashes, which\n// would be fine\n#[derive(\n  Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen,\n)]\n#[cfg_attr(feature = \"std\", derive(Zeroize))]\n#[cfg_attr(feature = \"borsh\", derive(BorshSerialize, BorshDeserialize))]\n#[cfg_attr(feature = \"serde\", derive(Serialize, Deserialize))]\npub struct BlockHash(pub [u8; 32]);\n\nimpl AsRef<[u8]> for BlockHash {\n  fn as_ref(&self) -> &[u8] {\n    self.0.as_ref()\n  }\n}\n\nimpl From<[u8; 32]> for BlockHash {\n  fn from(hash: [u8; 32]) -> BlockHash {\n    BlockHash(hash)\n  }\n}\n\nimpl From<H256> for BlockHash {\n  fn from(hash: H256) -> BlockHash {\n    BlockHash(hash.into())\n  }\n}\n"
  },
  {
    "path": "substrate/primitives/src/constants.rs",
    "content": "use crate::BlockNumber;\n\n// 1 MB\npub const BLOCK_SIZE: u32 = 1024 * 1024;\n// 6 seconds\npub const TARGET_BLOCK_TIME: u64 = 6;\n\n/// Measured in blocks.\npub const MINUTES: BlockNumber = 60 / TARGET_BLOCK_TIME;\npub const HOURS: BlockNumber = 60 * MINUTES;\npub const DAYS: BlockNumber = 24 * HOURS;\npub const WEEKS: BlockNumber = 7 * DAYS;\n// Defines a month as 30 days, which is slightly inaccurate\npub const MONTHS: BlockNumber = 30 * DAYS;\n// Defines a year as 12 inaccurate months, which is 360 days literally (~1.5% off)\npub const YEARS: BlockNumber = 12 * MONTHS;\n\n/// 6 months of blocks\npub const GENESIS_SRI_TRICKLE_FEED: u64 = 6 * MONTHS;\n\n// 100 Million SRI\npub const GENESIS_SRI: u64 = 100_000_000 * 10_u64.pow(8);\n\n/// This needs to be long enough for arbitrage to occur and make holding any fake price up\n/// sufficiently unrealistic.\n#[allow(clippy::cast_possible_truncation)]\npub const ARBITRAGE_TIME: u16 = (2 * HOURS) as u16;\n\n/// Since we use the median price, double the window length.\n///\n/// We additionally +1 so there is a true median.\npub const MEDIAN_PRICE_WINDOW_LENGTH: u16 = (2 * ARBITRAGE_TIME) + 1;\n\n/// Amount of blocks per epoch in the fast-epoch feature that is used in tests.\npub const FAST_EPOCH_DURATION: u64 = 2 * MINUTES;\n\n/// Amount of blocks for the initial period era of the emissions under the fast-epoch feature.\npub const FAST_EPOCH_INITIAL_PERIOD: u64 = 2 * FAST_EPOCH_DURATION;\n"
  },
  {
    "path": "substrate/primitives/src/lib.rs",
    "content": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![cfg_attr(docsrs, feature(doc_cfg))]\n#![cfg_attr(not(feature = \"std\"), no_std)]\n\n#[cfg(feature = \"std\")]\nuse zeroize::Zeroize;\n\n#[cfg(feature = \"borsh\")]\nuse borsh::{BorshSerialize, BorshDeserialize};\n#[cfg(feature = \"serde\")]\nuse serde::{Serialize, Deserialize};\n\nuse scale::{Encode, Decode, DecodeWithMemTracking, MaxEncodedLen};\n\n#[cfg(test)]\nuse sp_io::TestExternalities;\n\n#[cfg(test)]\nuse frame_support::{pallet_prelude::*, Identity, traits::StorageInstance};\n\nuse sp_core::{ConstU32, bounded::BoundedVec};\npub use sp_application_crypto as crypto;\n\nmod amount;\npub use amount::*;\n\nmod block;\npub use block::*;\n\nmod networks;\npub use networks::*;\n\nmod balance;\npub use balance::*;\n\nmod account;\npub use account::*;\n\nmod constants;\npub use constants::*;\n\npub type BlockNumber = u64;\npub type Header = sp_runtime::generic::Header<BlockNumber, sp_runtime::traits::BlakeTwo256>;\n\n#[cfg(feature = \"borsh\")]\npub fn borsh_serialize_bounded_vec<W: borsh::io::Write, T: BorshSerialize, const B: u32>(\n  bounded: &BoundedVec<T, ConstU32<B>>,\n  writer: &mut W,\n) -> Result<(), borsh::io::Error> {\n  borsh::BorshSerialize::serialize(bounded.as_slice(), writer)\n}\n\n#[cfg(feature = \"borsh\")]\npub fn borsh_deserialize_bounded_vec<R: borsh::io::Read, T: BorshDeserialize, const B: u32>(\n  reader: &mut R,\n) -> Result<BoundedVec<T, ConstU32<B>>, borsh::io::Error> {\n  let vec: Vec<T> = borsh::BorshDeserialize::deserialize_reader(reader)?;\n  vec.try_into().map_err(|_| borsh::io::Error::other(\"bound exceeded\"))\n}\n\n// Monero, our current longest address candidate, has a longest address of featured\n// 1 (enum) + 1 (flags) + 64 (two keys) = 66\n// When JAMTIS arrives, it'll become 112 or potentially even 142 bytes\npub const MAX_ADDRESS_LEN: u32 = 196;\n\n#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen)]\n#[cfg_attr(feature = \"borsh\", derive(BorshSerialize, BorshDeserialize))]\n#[cfg_attr(feature = \"serde\", derive(Serialize, Deserialize))]\npub struct ExternalAddress(\n  #[cfg_attr(\n    feature = \"borsh\",\n    borsh(\n      serialize_with = \"borsh_serialize_bounded_vec\",\n      deserialize_with = \"borsh_deserialize_bounded_vec\"\n    )\n  )]\n  BoundedVec<u8, ConstU32<{ MAX_ADDRESS_LEN }>>,\n);\n#[cfg(feature = \"std\")]\nimpl Zeroize for ExternalAddress {\n  fn zeroize(&mut self) {\n    self.0.as_mut().zeroize()\n  }\n}\n\nimpl ExternalAddress {\n  #[cfg(feature = \"std\")]\n  pub fn new(address: Vec<u8>) -> Result<ExternalAddress, &'static str> {\n    Ok(ExternalAddress(address.try_into().map_err(|_| \"address length exceeds {MAX_ADDRESS_LEN}\")?))\n  }\n\n  pub fn address(&self) -> &[u8] {\n    self.0.as_ref()\n  }\n\n  #[cfg(feature = \"std\")]\n  pub fn consume(self) -> Vec<u8> {\n    self.0.into_inner()\n  }\n}\n\nimpl AsRef<[u8]> for ExternalAddress {\n  fn as_ref(&self) -> &[u8] {\n    self.0.as_ref()\n  }\n}\n\n// Should be enough for a Uniswap v3 call\npub const MAX_DATA_LEN: u32 = 512;\n#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen)]\n#[cfg_attr(feature = \"borsh\", derive(BorshSerialize, BorshDeserialize))]\n#[cfg_attr(feature = \"serde\", derive(Serialize, Deserialize))]\npub struct Data(\n  #[cfg_attr(\n    feature = \"borsh\",\n    borsh(\n      serialize_with = \"borsh_serialize_bounded_vec\",\n      deserialize_with = \"borsh_deserialize_bounded_vec\"\n    )\n  )]\n  BoundedVec<u8, ConstU32<{ MAX_DATA_LEN }>>,\n);\n\n#[cfg(feature = \"std\")]\nimpl Zeroize for Data {\n  fn zeroize(&mut self) {\n    self.0.as_mut().zeroize()\n  }\n}\n\nimpl Data {\n  #[cfg(feature = \"std\")]\n  pub fn new(data: Vec<u8>) -> Result<Data, &'static str> {\n    Ok(Data(data.try_into().map_err(|_| \"data length exceeds {MAX_DATA_LEN}\")?))\n  }\n\n  pub fn data(&self) -> &[u8] {\n    self.0.as_ref()\n  }\n\n  #[cfg(feature = \"std\")]\n  pub fn consume(self) -> Vec<u8> {\n    self.0.into_inner()\n  }\n}\n\nimpl AsRef<[u8]> for Data {\n  fn as_ref(&self) -> &[u8] {\n    self.0.as_ref()\n  }\n}\n\n/// Lexicographically reverses a given byte array.\npub fn reverse_lexicographic_order<const N: usize>(bytes: [u8; N]) -> [u8; N] {\n  let mut res = [0u8; N];\n  for (i, byte) in bytes.iter().enumerate() {\n    res[i] = !*byte;\n  }\n  res\n}\n\n#[test]\nfn test_reverse_lexicographic_order() {\n  TestExternalities::default().execute_with(|| {\n    use rand_core::{RngCore, OsRng};\n\n    struct Storage;\n    impl StorageInstance for Storage {\n      fn pallet_prefix() -> &'static str {\n        \"LexicographicOrder\"\n      }\n\n      const STORAGE_PREFIX: &'static str = \"storage\";\n    }\n    type Map = StorageMap<Storage, Identity, [u8; 8], (), OptionQuery>;\n\n    struct StorageReverse;\n    impl StorageInstance for StorageReverse {\n      fn pallet_prefix() -> &'static str {\n        \"LexicographicOrder\"\n      }\n\n      const STORAGE_PREFIX: &'static str = \"storagereverse\";\n    }\n    type MapReverse = StorageMap<StorageReverse, Identity, [u8; 8], (), OptionQuery>;\n\n    // populate the maps\n    let mut amounts = vec![];\n    for _ in 0 .. 100 {\n      amounts.push(OsRng.next_u64());\n    }\n\n    let mut amounts_sorted = amounts.clone();\n    amounts_sorted.sort();\n    for a in amounts {\n      Map::set(a.to_be_bytes(), Some(()));\n      MapReverse::set(reverse_lexicographic_order(a.to_be_bytes()), Some(()));\n    }\n\n    // retrive back and check whether they are sorted as expected\n    let total_size = amounts_sorted.len();\n    let mut map_iter = Map::iter_keys();\n    let mut reverse_map_iter = MapReverse::iter_keys();\n    for i in 0 .. amounts_sorted.len() {\n      let first = map_iter.next().unwrap();\n      let second = reverse_map_iter.next().unwrap();\n\n      assert_eq!(u64::from_be_bytes(first), amounts_sorted[i]);\n      assert_eq!(\n        u64::from_be_bytes(reverse_lexicographic_order(second)),\n        amounts_sorted[total_size - (i + 1)]\n      );\n    }\n  });\n}\n"
  },
  {
    "path": "substrate/primitives/src/networks.rs",
    "content": "#[cfg(feature = \"std\")]\nuse zeroize::Zeroize;\n\nuse scale::{Encode, EncodeLike, Decode, DecodeWithMemTracking, MaxEncodedLen};\n\n#[cfg(feature = \"borsh\")]\nuse borsh::{BorshSerialize, BorshDeserialize};\n#[cfg(feature = \"serde\")]\nuse serde::{Serialize, Deserialize};\n\nuse sp_core::{ConstU32, bounded::BoundedVec};\nuse sp_std::{vec, vec::Vec};\n\n#[cfg(feature = \"borsh\")]\nuse crate::{borsh_serialize_bounded_vec, borsh_deserialize_bounded_vec};\n\n/// The type used to identify external networks.\n#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, PartialOrd, Ord)]\n#[cfg_attr(feature = \"std\", derive(Zeroize))]\n#[cfg_attr(feature = \"serde\", derive(Serialize, Deserialize))]\npub enum ExternalNetworkId {\n  Bitcoin,\n  Ethereum,\n  Monero,\n}\n\nimpl Encode for ExternalNetworkId {\n  fn encode(&self) -> Vec<u8> {\n    match self {\n      ExternalNetworkId::Bitcoin => vec![1],\n      ExternalNetworkId::Ethereum => vec![2],\n      ExternalNetworkId::Monero => vec![3],\n    }\n  }\n}\n\nimpl Decode for ExternalNetworkId {\n  fn decode<I: scale::Input>(input: &mut I) -> Result<Self, scale::Error> {\n    let kind = input.read_byte()?;\n    match kind {\n      1 => Ok(Self::Bitcoin),\n      2 => Ok(Self::Ethereum),\n      3 => Ok(Self::Monero),\n      _ => Err(scale::Error::from(\"invalid format\")),\n    }\n  }\n}\n\nimpl DecodeWithMemTracking for ExternalNetworkId {}\n\nimpl MaxEncodedLen for ExternalNetworkId {\n  fn max_encoded_len() -> usize {\n    1\n  }\n}\n\nimpl EncodeLike for ExternalNetworkId {}\n\n#[cfg(feature = \"borsh\")]\nimpl BorshSerialize for ExternalNetworkId {\n  fn serialize<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {\n    writer.write_all(&self.encode())\n  }\n}\n\n#[cfg(feature = \"borsh\")]\nimpl BorshDeserialize for ExternalNetworkId {\n  fn deserialize_reader<R: std::io::Read>(reader: &mut R) -> std::io::Result<Self> {\n    let mut kind = [0; 1];\n    reader.read_exact(&mut kind)?;\n    ExternalNetworkId::decode(&mut kind.as_slice())\n      .map_err(|_| std::io::Error::other(\"invalid format\"))\n  }\n}\n\n/// The type used to identify networks.\n#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, PartialOrd, Ord)]\n#[cfg_attr(feature = \"std\", derive(Zeroize))]\n#[cfg_attr(feature = \"serde\", derive(Serialize, Deserialize))]\npub enum NetworkId {\n  Serai,\n  External(ExternalNetworkId),\n}\n\nimpl Encode for NetworkId {\n  fn encode(&self) -> Vec<u8> {\n    match self {\n      NetworkId::Serai => vec![0],\n      NetworkId::External(network) => network.encode(),\n    }\n  }\n}\n\nimpl Decode for NetworkId {\n  fn decode<I: scale::Input>(input: &mut I) -> Result<Self, scale::Error> {\n    let kind = input.read_byte()?;\n    match kind {\n      0 => Ok(Self::Serai),\n      _ => Ok(ExternalNetworkId::decode(&mut [kind].as_slice())?.into()),\n    }\n  }\n}\n\nimpl DecodeWithMemTracking for NetworkId {}\n\nimpl MaxEncodedLen for NetworkId {\n  fn max_encoded_len() -> usize {\n    1\n  }\n}\n\nimpl EncodeLike for NetworkId {}\n\n#[cfg(feature = \"borsh\")]\nimpl BorshSerialize for NetworkId {\n  fn serialize<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {\n    writer.write_all(&self.encode())\n  }\n}\n\n#[cfg(feature = \"borsh\")]\nimpl BorshDeserialize for NetworkId {\n  fn deserialize_reader<R: std::io::Read>(reader: &mut R) -> std::io::Result<Self> {\n    let mut kind = [0; 1];\n    reader.read_exact(&mut kind)?;\n    NetworkId::decode(&mut kind.as_slice()).map_err(|_| std::io::Error::other(\"invalid format\"))\n  }\n}\n\nimpl ExternalNetworkId {\n  pub fn coins(&self) -> Vec<ExternalCoin> {\n    match self {\n      Self::Bitcoin => vec![ExternalCoin::Bitcoin],\n      Self::Ethereum => vec![ExternalCoin::Ether, ExternalCoin::Dai],\n      Self::Monero => vec![ExternalCoin::Monero],\n    }\n  }\n}\n\nimpl NetworkId {\n  pub fn coins(&self) -> Vec<Coin> {\n    match self {\n      Self::Serai => vec![Coin::Serai],\n      Self::External(network) => {\n        network.coins().into_iter().map(core::convert::Into::into).collect()\n      }\n    }\n  }\n}\n\nimpl From<ExternalNetworkId> for NetworkId {\n  fn from(network: ExternalNetworkId) -> Self {\n    NetworkId::External(network)\n  }\n}\n\nimpl TryFrom<NetworkId> for ExternalNetworkId {\n  type Error = ();\n\n  fn try_from(network: NetworkId) -> Result<Self, Self::Error> {\n    match network {\n      NetworkId::Serai => Err(())?,\n      NetworkId::External(n) => Ok(n),\n    }\n  }\n}\n\npub const EXTERNAL_NETWORKS: [ExternalNetworkId; 3] =\n  [ExternalNetworkId::Bitcoin, ExternalNetworkId::Ethereum, ExternalNetworkId::Monero];\n\npub const NETWORKS: [NetworkId; 4] = [\n  NetworkId::Serai,\n  NetworkId::External(ExternalNetworkId::Bitcoin),\n  NetworkId::External(ExternalNetworkId::Ethereum),\n  NetworkId::External(ExternalNetworkId::Monero),\n];\n\npub const EXTERNAL_COINS: [ExternalCoin; 4] =\n  [ExternalCoin::Bitcoin, ExternalCoin::Ether, ExternalCoin::Dai, ExternalCoin::Monero];\n\npub const COINS: [Coin; 5] = [\n  Coin::Serai,\n  Coin::External(ExternalCoin::Bitcoin),\n  Coin::External(ExternalCoin::Ether),\n  Coin::External(ExternalCoin::Dai),\n  Coin::External(ExternalCoin::Monero),\n];\n\n/// The type used to identify external coins.\n#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]\n#[cfg_attr(feature = \"std\", derive(Zeroize))]\n#[cfg_attr(feature = \"serde\", derive(Serialize, Deserialize))]\npub enum ExternalCoin {\n  Bitcoin,\n  Ether,\n  Dai,\n  Monero,\n}\n\nimpl Encode for ExternalCoin {\n  fn encode(&self) -> Vec<u8> {\n    match self {\n      ExternalCoin::Bitcoin => vec![4],\n      ExternalCoin::Ether => vec![5],\n      ExternalCoin::Dai => vec![6],\n      ExternalCoin::Monero => vec![7],\n    }\n  }\n}\n\nimpl Decode for ExternalCoin {\n  fn decode<I: scale::Input>(input: &mut I) -> Result<Self, scale::Error> {\n    let kind = input.read_byte()?;\n    match kind {\n      4 => Ok(Self::Bitcoin),\n      5 => Ok(Self::Ether),\n      6 => Ok(Self::Dai),\n      7 => Ok(Self::Monero),\n      _ => Err(scale::Error::from(\"invalid format\")),\n    }\n  }\n}\n\nimpl DecodeWithMemTracking for ExternalCoin {}\n\nimpl MaxEncodedLen for ExternalCoin {\n  fn max_encoded_len() -> usize {\n    1\n  }\n}\n\nimpl EncodeLike for ExternalCoin {}\n\n#[cfg(feature = \"borsh\")]\nimpl BorshSerialize for ExternalCoin {\n  fn serialize<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {\n    writer.write_all(&self.encode())\n  }\n}\n\n#[cfg(feature = \"borsh\")]\nimpl BorshDeserialize for ExternalCoin {\n  fn deserialize_reader<R: std::io::Read>(reader: &mut R) -> std::io::Result<Self> {\n    let mut kind = [0; 1];\n    reader.read_exact(&mut kind)?;\n    ExternalCoin::decode(&mut kind.as_slice()).map_err(|_| std::io::Error::other(\"invalid format\"))\n  }\n}\n\n/// The type used to identify coins.\n#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]\n#[cfg_attr(feature = \"std\", derive(Zeroize))]\n#[cfg_attr(feature = \"serde\", derive(Serialize, Deserialize))]\npub enum Coin {\n  Serai,\n  External(ExternalCoin),\n}\n\nimpl Encode for Coin {\n  fn encode(&self) -> Vec<u8> {\n    match self {\n      Coin::Serai => vec![0],\n      Coin::External(ec) => ec.encode(),\n    }\n  }\n}\n\nimpl Decode for Coin {\n  fn decode<I: scale::Input>(input: &mut I) -> Result<Self, scale::Error> {\n    let kind = input.read_byte()?;\n    match kind {\n      0 => Ok(Self::Serai),\n      _ => Ok(ExternalCoin::decode(&mut [kind].as_slice())?.into()),\n    }\n  }\n}\n\nimpl DecodeWithMemTracking for Coin {}\n\nimpl MaxEncodedLen for Coin {\n  fn max_encoded_len() -> usize {\n    1\n  }\n}\n\nimpl EncodeLike for Coin {}\n\n#[cfg(feature = \"borsh\")]\nimpl BorshSerialize for Coin {\n  fn serialize<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {\n    writer.write_all(&self.encode())\n  }\n}\n\n#[cfg(feature = \"borsh\")]\nimpl BorshDeserialize for Coin {\n  fn deserialize_reader<R: std::io::Read>(reader: &mut R) -> std::io::Result<Self> {\n    let mut kind = [0; 1];\n    reader.read_exact(&mut kind)?;\n    Coin::decode(&mut kind.as_slice()).map_err(|_| std::io::Error::other(\"invalid format\"))\n  }\n}\n\nimpl From<ExternalCoin> for Coin {\n  fn from(coin: ExternalCoin) -> Self {\n    Coin::External(coin)\n  }\n}\n\nimpl TryFrom<Coin> for ExternalCoin {\n  type Error = ();\n\n  fn try_from(coin: Coin) -> Result<Self, Self::Error> {\n    match coin {\n      Coin::Serai => Err(())?,\n      Coin::External(c) => Ok(c),\n    }\n  }\n}\n\nimpl ExternalCoin {\n  pub fn network(&self) -> ExternalNetworkId {\n    match self {\n      ExternalCoin::Bitcoin => ExternalNetworkId::Bitcoin,\n      ExternalCoin::Ether | ExternalCoin::Dai => ExternalNetworkId::Ethereum,\n      ExternalCoin::Monero => ExternalNetworkId::Monero,\n    }\n  }\n\n  pub fn name(&self) -> &'static str {\n    match self {\n      ExternalCoin::Bitcoin => \"Bitcoin\",\n      ExternalCoin::Ether => \"Ether\",\n      ExternalCoin::Dai => \"Dai Stablecoin\",\n      ExternalCoin::Monero => \"Monero\",\n    }\n  }\n\n  pub fn symbol(&self) -> &'static str {\n    match self {\n      ExternalCoin::Bitcoin => \"BTC\",\n      ExternalCoin::Ether => \"ETH\",\n      ExternalCoin::Dai => \"DAI\",\n      ExternalCoin::Monero => \"XMR\",\n    }\n  }\n\n  pub fn decimals(&self) -> u32 {\n    match self {\n      // Ether and DAI have 18 decimals, yet we only track 8 in order to fit them within u64s\n      ExternalCoin::Bitcoin | ExternalCoin::Ether | ExternalCoin::Dai => 8,\n      ExternalCoin::Monero => 12,\n    }\n  }\n}\n\nimpl Coin {\n  pub fn native() -> Coin {\n    Coin::Serai\n  }\n\n  pub fn network(&self) -> NetworkId {\n    match self {\n      Coin::Serai => NetworkId::Serai,\n      Coin::External(c) => c.network().into(),\n    }\n  }\n\n  pub fn name(&self) -> &'static str {\n    match self {\n      Coin::Serai => \"Serai\",\n      Coin::External(c) => c.name(),\n    }\n  }\n\n  pub fn symbol(&self) -> &'static str {\n    match self {\n      Coin::Serai => \"SRI\",\n      Coin::External(c) => c.symbol(),\n    }\n  }\n\n  pub fn decimals(&self) -> u32 {\n    match self {\n      Coin::Serai => 8,\n      Coin::External(c) => c.decimals(),\n    }\n  }\n\n  pub fn is_native(&self) -> bool {\n    matches!(self, Coin::Serai)\n  }\n}\n\n// Max of 8 coins per network\n// Since Serai isn't interested in listing tokens, as on-chain DEXs will almost certainly have\n// more liquidity, the only reason we'd have so many coins from a network is if there's no DEX\n// on-chain\n// There's probably no chain with so many *worthwhile* coins and no on-chain DEX\n// This could probably be just 4, yet 8 is a hedge for the unforeseen\n// If necessary, this can be increased with a fork\npub const MAX_COINS_PER_NETWORK: u32 = 8;\n\n/// Network definition.\n#[derive(Clone, PartialEq, Eq, Debug)]\n#[cfg_attr(feature = \"borsh\", derive(BorshSerialize, BorshDeserialize))]\n#[cfg_attr(feature = \"serde\", derive(Serialize, Deserialize))]\npub struct Network {\n  #[cfg_attr(\n    feature = \"borsh\",\n    borsh(\n      serialize_with = \"borsh_serialize_bounded_vec\",\n      deserialize_with = \"borsh_deserialize_bounded_vec\"\n    )\n  )]\n  coins: BoundedVec<Coin, ConstU32<{ MAX_COINS_PER_NETWORK }>>,\n}\n\n#[cfg(feature = \"std\")]\nimpl Zeroize for Network {\n  fn zeroize(&mut self) {\n    for coin in self.coins.as_mut() {\n      coin.zeroize();\n    }\n    self.coins.truncate(0);\n  }\n}\n\nimpl Network {\n  #[cfg(feature = \"std\")]\n  pub fn new(coins: Vec<Coin>) -> Result<Network, &'static str> {\n    if coins.is_empty() {\n      Err(\"no coins provided\")?;\n    }\n\n    let network = coins[0].network();\n    for coin in coins.iter().skip(1) {\n      if coin.network() != network {\n        Err(\"coins have different networks\")?;\n      }\n    }\n\n    Ok(Network {\n      coins: coins.try_into().map_err(|_| \"coins length exceeds {MAX_COINS_PER_NETWORK}\")?,\n    })\n  }\n\n  pub fn coins(&self) -> &[Coin] {\n    &self.coins\n  }\n}\n"
  },
  {
    "path": "substrate/runtime/Cargo.toml",
    "content": "[package]\nname = \"serai-runtime\"\nversion = \"0.1.0\"\ndescription = \"Serai network node runtime, built over Substrate\"\nlicense = \"AGPL-3.0-only\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/substrate/runtime\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nedition = \"2021\"\nrust-version = \"1.74\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[package.metadata.cargo-machete]\nignored = [\"scale\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nhashbrown = { version = \"0.14\", default-features = false, features = [\"ahash\", \"inline-more\"] }\n\nscale = { package = \"parity-scale-codec\", version = \"3\", default-features = false, features = [\"derive\"] }\n\nsp-core = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nsp-std = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\n\nsp-offchain = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nsp-version = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nsp-inherents = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\n\nsp-session = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nsp-consensus-babe = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nsp-consensus-grandpa = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\n\nsp-authority-discovery = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\n\nsp-transaction-pool = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nsp-block-builder = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\n\nsp-runtime = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nsp-api = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\n\nframe-system = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nframe-support = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nframe-executive = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nframe-benchmarking = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false, optional = true }\n\nserai-primitives = { path = \"../primitives\", default-features = false }\nserai-abi = { path = \"../abi\", default-features = false, features = [\"serde\"] }\n\npallet-timestamp = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\npallet-authorship = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\n\npallet-transaction-payment = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\n\ncoins-pallet = { package = \"serai-coins-pallet\", path = \"../coins/pallet\", default-features = false }\ndex-pallet = { package = \"serai-dex-pallet\", path = \"../dex/pallet\", default-features = false }\n\nvalidator-sets-pallet = { package = \"serai-validator-sets-pallet\", path = \"../validator-sets/pallet\", default-features = false }\ngenesis-liquidity-pallet = { package = \"serai-genesis-liquidity-pallet\", path = \"../genesis-liquidity/pallet\", default-features = false }\nemissions-pallet = { package = \"serai-emissions-pallet\", path = \"../emissions/pallet\", default-features = false }\n\neconomic-security-pallet = { package = \"serai-economic-security-pallet\", path = \"../economic-security/pallet\", default-features = false }\n\nin-instructions-pallet = { package = \"serai-in-instructions-pallet\", path = \"../in-instructions/pallet\", default-features = false }\n\nsignals-pallet = { package = \"serai-signals-pallet\", path = \"../signals/pallet\", default-features = false }\n\npallet-session = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\npallet-babe = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\npallet-grandpa = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\n\nframe-system-rpc-runtime-api = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\npallet-transaction-payment-rpc-runtime-api = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\n\n[build-dependencies]\nsubstrate-wasm-builder = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\" }\n\n[features]\nstd = [\n  \"scale/std\",\n\n  \"sp-core/std\",\n  \"sp-std/std\",\n\n  \"sp-offchain/std\",\n  \"sp-version/std\",\n  \"sp-inherents/std\",\n\n  \"sp-session/std\",\n  \"sp-consensus-babe/std\",\n  \"sp-consensus-grandpa/std\",\n\n  \"sp-authority-discovery/std\",\n\n  \"sp-transaction-pool/std\",\n  \"sp-block-builder/std\",\n\n  \"sp-runtime/std\",\n  \"sp-api/std\",\n\n  \"frame-system/std\",\n  \"frame-support/std\",\n  \"frame-executive/std\",\n\n  \"serai-primitives/std\",\n  \"serai-abi/std\",\n  \"serai-abi/serde\",\n\n  \"pallet-timestamp/std\",\n  \"pallet-authorship/std\",\n\n  \"pallet-transaction-payment/std\",\n\n  \"coins-pallet/std\",\n  \"dex-pallet/std\",\n\n  \"validator-sets-pallet/std\",\n  \"genesis-liquidity-pallet/std\",\n  \"emissions-pallet/std\",\n\n  \"economic-security-pallet/std\",\n\n  \"in-instructions-pallet/std\",\n\n  \"signals-pallet/std\",\n\n  \"pallet-session/std\",\n  \"pallet-babe/std\",\n  \"pallet-grandpa/std\",\n\n  \"frame-system-rpc-runtime-api/std\",\n  \"pallet-transaction-payment-rpc-runtime-api/std\",\n]\n\nfast-epoch = [\n  \"genesis-liquidity-pallet/fast-epoch\",\n  \"emissions-pallet/fast-epoch\",\n]\n\nruntime-benchmarks = [\n  \"sp-runtime/runtime-benchmarks\",\n\n  \"frame-system/runtime-benchmarks\",\n  \"frame-support/runtime-benchmarks\",\n  \"frame-benchmarking/runtime-benchmarks\",\n\n  \"pallet-timestamp/runtime-benchmarks\",\n\n  \"pallet-babe/runtime-benchmarks\",\n  \"pallet-grandpa/runtime-benchmarks\",\n]\n\ndefault = [\"std\"]\n"
  },
  {
    "path": "substrate/runtime/LICENSE",
    "content": "AGPL-3.0-only license\n\nCopyright (c) 2022-2023 Luke Parker\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License Version 3 as\npublished by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n"
  },
  {
    "path": "substrate/runtime/build.rs",
    "content": "fn main() {\n  #[cfg(feature = \"std\")]\n  substrate_wasm_builder::WasmBuilder::build_using_defaults();\n}\n"
  },
  {
    "path": "substrate/runtime/src/abi.rs",
    "content": "use core::marker::PhantomData;\n\nuse scale::{Encode, Decode};\n\nuse serai_abi::Call;\n\nuse crate::{\n  Vec,\n  primitives::{PublicKey, SeraiAddress},\n  timestamp, coins, dex, genesis_liquidity,\n  validator_sets::{self, MembershipProof},\n  in_instructions, signals, babe, grandpa, RuntimeCall,\n};\n\nimpl From<Call> for RuntimeCall {\n  fn from(call: Call) -> RuntimeCall {\n    match call {\n      Call::Timestamp(serai_abi::timestamp::Call::set { now }) => {\n        RuntimeCall::Timestamp(timestamp::Call::set { now })\n      }\n      Call::Coins(coins) => match coins {\n        serai_abi::coins::Call::transfer { to, balance } => {\n          RuntimeCall::Coins(coins::Call::transfer { to: to.into(), balance })\n        }\n        serai_abi::coins::Call::burn { balance } => {\n          RuntimeCall::Coins(coins::Call::burn { balance })\n        }\n        serai_abi::coins::Call::burn_with_instruction { instruction } => {\n          RuntimeCall::Coins(coins::Call::burn_with_instruction { instruction })\n        }\n      },\n      Call::LiquidityTokens(lt) => match lt {\n        serai_abi::liquidity_tokens::Call::transfer { to, balance } => {\n          RuntimeCall::LiquidityTokens(coins::Call::transfer { to: to.into(), balance })\n        }\n        serai_abi::liquidity_tokens::Call::burn { balance } => {\n          RuntimeCall::LiquidityTokens(coins::Call::burn { balance })\n        }\n      },\n      Call::Dex(dex) => match dex {\n        serai_abi::dex::Call::add_liquidity {\n          coin,\n          coin_desired,\n          sri_desired,\n          coin_min,\n          sri_min,\n          mint_to,\n        } => RuntimeCall::Dex(dex::Call::add_liquidity {\n          coin,\n          coin_desired,\n          sri_desired,\n          coin_min,\n          sri_min,\n          mint_to: mint_to.into(),\n        }),\n        serai_abi::dex::Call::remove_liquidity {\n          coin,\n          lp_token_burn,\n          coin_min_receive,\n          sri_min_receive,\n          withdraw_to,\n        } => RuntimeCall::Dex(dex::Call::remove_liquidity {\n          coin,\n          lp_token_burn,\n          coin_min_receive,\n          sri_min_receive,\n          withdraw_to: withdraw_to.into(),\n        }),\n        serai_abi::dex::Call::swap_exact_tokens_for_tokens {\n          path,\n          amount_in,\n          amount_out_min,\n          send_to,\n        } => RuntimeCall::Dex(dex::Call::swap_exact_tokens_for_tokens {\n          path,\n          amount_in,\n          amount_out_min,\n          send_to: send_to.into(),\n        }),\n        serai_abi::dex::Call::swap_tokens_for_exact_tokens {\n          path,\n          amount_out,\n          amount_in_max,\n          send_to,\n        } => RuntimeCall::Dex(dex::Call::swap_tokens_for_exact_tokens {\n          path,\n          amount_out,\n          amount_in_max,\n          send_to: send_to.into(),\n        }),\n      },\n      Call::ValidatorSets(vs) => match vs {\n        serai_abi::validator_sets::Call::set_keys {\n          network,\n          removed_participants,\n          key_pair,\n          signature,\n        } => RuntimeCall::ValidatorSets(validator_sets::Call::set_keys {\n          network,\n          removed_participants: <_>::try_from(\n            removed_participants.into_iter().map(PublicKey::from).collect::<Vec<_>>(),\n          )\n          .unwrap(),\n          key_pair,\n          signature,\n        }),\n        serai_abi::validator_sets::Call::report_slashes { network, slashes, signature } => {\n          RuntimeCall::ValidatorSets(validator_sets::Call::report_slashes {\n            network,\n            slashes: <_>::try_from(\n              slashes\n                .into_iter()\n                .map(|(addr, slash)| (PublicKey::from(addr), slash))\n                .collect::<Vec<_>>(),\n            )\n            .unwrap(),\n            signature,\n          })\n        }\n        serai_abi::validator_sets::Call::allocate { network, amount } => {\n          RuntimeCall::ValidatorSets(validator_sets::Call::allocate { network, amount })\n        }\n        serai_abi::validator_sets::Call::deallocate { network, amount } => {\n          RuntimeCall::ValidatorSets(validator_sets::Call::deallocate { network, amount })\n        }\n        serai_abi::validator_sets::Call::claim_deallocation { network, session } => {\n          RuntimeCall::ValidatorSets(validator_sets::Call::claim_deallocation { network, session })\n        }\n      },\n      Call::GenesisLiquidity(gl) => match gl {\n        serai_abi::genesis_liquidity::Call::remove_coin_liquidity { balance } => {\n          RuntimeCall::GenesisLiquidity(genesis_liquidity::Call::remove_coin_liquidity { balance })\n        }\n        serai_abi::genesis_liquidity::Call::oraclize_values { values, signature } => {\n          RuntimeCall::GenesisLiquidity(genesis_liquidity::Call::oraclize_values {\n            values,\n            signature,\n          })\n        }\n      },\n      Call::InInstructions(ii) => match ii {\n        serai_abi::in_instructions::Call::execute_batch { batch } => {\n          RuntimeCall::InInstructions(in_instructions::Call::execute_batch { batch })\n        }\n      },\n      Call::Signals(signals) => match signals {\n        serai_abi::signals::Call::register_retirement_signal { in_favor_of } => {\n          RuntimeCall::Signals(signals::Call::register_retirement_signal { in_favor_of })\n        }\n        serai_abi::signals::Call::revoke_retirement_signal { retirement_signal_id } => {\n          RuntimeCall::Signals(signals::Call::revoke_retirement_signal { retirement_signal_id })\n        }\n        serai_abi::signals::Call::favor { signal_id, for_network } => {\n          RuntimeCall::Signals(signals::Call::favor { signal_id, for_network })\n        }\n        serai_abi::signals::Call::revoke_favor { signal_id, for_network } => {\n          RuntimeCall::Signals(signals::Call::revoke_favor { signal_id, for_network })\n        }\n        serai_abi::signals::Call::stand_against { signal_id, for_network } => {\n          RuntimeCall::Signals(signals::Call::stand_against { signal_id, for_network })\n        }\n      },\n      Call::Babe(babe) => match babe {\n        serai_abi::babe::Call::report_equivocation(report) => {\n          RuntimeCall::Babe(babe::Call::report_equivocation {\n            // TODO: Find a better way to go from Proof<[u8; 32]> to Proof<H256>\n            equivocation_proof: <_>::decode(&mut report.equivocation_proof.encode().as_slice())\n              .unwrap(),\n            key_owner_proof: MembershipProof(report.key_owner_proof.into(), PhantomData),\n          })\n        }\n        serai_abi::babe::Call::report_equivocation_unsigned(report) => {\n          RuntimeCall::Babe(babe::Call::report_equivocation_unsigned {\n            // TODO: Find a better way to go from Proof<[u8; 32]> to Proof<H256>\n            equivocation_proof: <_>::decode(&mut report.equivocation_proof.encode().as_slice())\n              .unwrap(),\n            key_owner_proof: MembershipProof(report.key_owner_proof.into(), PhantomData),\n          })\n        }\n      },\n      Call::Grandpa(grandpa) => match grandpa {\n        serai_abi::grandpa::Call::report_equivocation(report) => {\n          RuntimeCall::Grandpa(grandpa::Call::report_equivocation {\n            // TODO: Find a better way to go from Proof<[u8; 32]> to Proof<H256>\n            equivocation_proof: <_>::decode(&mut report.equivocation_proof.encode().as_slice())\n              .unwrap(),\n            key_owner_proof: MembershipProof(report.key_owner_proof.into(), PhantomData),\n          })\n        }\n        serai_abi::grandpa::Call::report_equivocation_unsigned(report) => {\n          RuntimeCall::Grandpa(grandpa::Call::report_equivocation_unsigned {\n            // TODO: Find a better way to go from Proof<[u8; 32]> to Proof<H256>\n            equivocation_proof: <_>::decode(&mut report.equivocation_proof.encode().as_slice())\n              .unwrap(),\n            key_owner_proof: MembershipProof(report.key_owner_proof.into(), PhantomData),\n          })\n        }\n      },\n    }\n  }\n}\n\nimpl TryInto<Call> for RuntimeCall {\n  type Error = ();\n\n  fn try_into(self) -> Result<Call, ()> {\n    Ok(match self {\n      RuntimeCall::System(_) => Err(())?,\n      RuntimeCall::Timestamp(timestamp::Call::set { now }) => {\n        Call::Timestamp(serai_abi::timestamp::Call::set { now })\n      }\n      RuntimeCall::Coins(call) => Call::Coins(match call {\n        coins::Call::transfer { to, balance } => {\n          serai_abi::coins::Call::transfer { to: to.into(), balance }\n        }\n        coins::Call::burn { balance } => serai_abi::coins::Call::burn { balance },\n        coins::Call::burn_with_instruction { instruction } => {\n          serai_abi::coins::Call::burn_with_instruction { instruction }\n        }\n      }),\n      RuntimeCall::LiquidityTokens(call) => Call::LiquidityTokens(match call {\n        coins::Call::transfer { to, balance } => {\n          serai_abi::liquidity_tokens::Call::transfer { to: to.into(), balance }\n        }\n        coins::Call::burn { balance } => serai_abi::liquidity_tokens::Call::burn { balance },\n        _ => Err(())?,\n      }),\n      RuntimeCall::Dex(call) => Call::Dex(match call {\n        dex::Call::add_liquidity {\n          coin,\n          coin_desired,\n          sri_desired,\n          coin_min,\n          sri_min,\n          mint_to,\n        } => serai_abi::dex::Call::add_liquidity {\n          coin,\n          coin_desired,\n          sri_desired,\n          coin_min,\n          sri_min,\n          mint_to: mint_to.into(),\n        },\n        dex::Call::remove_liquidity {\n          coin,\n          lp_token_burn,\n          coin_min_receive,\n          sri_min_receive,\n          withdraw_to,\n        } => serai_abi::dex::Call::remove_liquidity {\n          coin,\n          lp_token_burn,\n          coin_min_receive,\n          sri_min_receive,\n          withdraw_to: withdraw_to.into(),\n        },\n        dex::Call::swap_exact_tokens_for_tokens { path, amount_in, amount_out_min, send_to } => {\n          serai_abi::dex::Call::swap_exact_tokens_for_tokens {\n            path,\n            amount_in,\n            amount_out_min,\n            send_to: send_to.into(),\n          }\n        }\n        dex::Call::swap_tokens_for_exact_tokens { path, amount_out, amount_in_max, send_to } => {\n          serai_abi::dex::Call::swap_tokens_for_exact_tokens {\n            path,\n            amount_out,\n            amount_in_max,\n            send_to: send_to.into(),\n          }\n        }\n      }),\n      RuntimeCall::GenesisLiquidity(call) => Call::GenesisLiquidity(match call {\n        genesis_liquidity::Call::remove_coin_liquidity { balance } => {\n          serai_abi::genesis_liquidity::Call::remove_coin_liquidity { balance }\n        }\n        genesis_liquidity::Call::oraclize_values { values, signature } => {\n          serai_abi::genesis_liquidity::Call::oraclize_values { values, signature }\n        }\n      }),\n      RuntimeCall::ValidatorSets(call) => Call::ValidatorSets(match call {\n        validator_sets::Call::set_keys { network, removed_participants, key_pair, signature } => {\n          serai_abi::validator_sets::Call::set_keys {\n            network,\n            removed_participants: <_>::try_from(\n              removed_participants.into_iter().map(SeraiAddress::from).collect::<Vec<_>>(),\n            )\n            .unwrap(),\n            key_pair,\n            signature,\n          }\n        }\n        validator_sets::Call::report_slashes { network, slashes, signature } => {\n          serai_abi::validator_sets::Call::report_slashes {\n            network,\n            slashes: <_>::try_from(\n              slashes\n                .into_iter()\n                .map(|(addr, slash)| (SeraiAddress::from(addr), slash))\n                .collect::<Vec<_>>(),\n            )\n            .unwrap(),\n            signature,\n          }\n        }\n        validator_sets::Call::allocate { network, amount } => {\n          serai_abi::validator_sets::Call::allocate { network, amount }\n        }\n        validator_sets::Call::deallocate { network, amount } => {\n          serai_abi::validator_sets::Call::deallocate { network, amount }\n        }\n        validator_sets::Call::claim_deallocation { network, session } => {\n          serai_abi::validator_sets::Call::claim_deallocation { network, session }\n        }\n      }),\n      RuntimeCall::InInstructions(call) => Call::InInstructions(match call {\n        in_instructions::Call::execute_batch { batch } => {\n          serai_abi::in_instructions::Call::execute_batch { batch }\n        }\n      }),\n      RuntimeCall::Signals(call) => Call::Signals(match call {\n        signals::Call::register_retirement_signal { in_favor_of } => {\n          serai_abi::signals::Call::register_retirement_signal { in_favor_of }\n        }\n        signals::Call::revoke_retirement_signal { retirement_signal_id } => {\n          serai_abi::signals::Call::revoke_retirement_signal { retirement_signal_id }\n        }\n        signals::Call::favor { signal_id, for_network } => {\n          serai_abi::signals::Call::favor { signal_id, for_network }\n        }\n        signals::Call::revoke_favor { signal_id, for_network } => {\n          serai_abi::signals::Call::revoke_favor { signal_id, for_network }\n        }\n        signals::Call::stand_against { signal_id, for_network } => {\n          serai_abi::signals::Call::stand_against { signal_id, for_network }\n        }\n      }),\n      RuntimeCall::Babe(call) => Call::Babe(match call {\n        babe::Call::report_equivocation { equivocation_proof, key_owner_proof } => {\n          serai_abi::babe::Call::report_equivocation(serai_abi::babe::ReportEquivocation {\n            // TODO: Find a better way to go from Proof<H256> to Proof<[u8; 32]>\n            equivocation_proof: <_>::decode(&mut equivocation_proof.encode().as_slice()).unwrap(),\n            key_owner_proof: key_owner_proof.0.into(),\n          })\n        }\n        babe::Call::report_equivocation_unsigned { equivocation_proof, key_owner_proof } => {\n          serai_abi::babe::Call::report_equivocation_unsigned(serai_abi::babe::ReportEquivocation {\n            // TODO: Find a better way to go from Proof<H256> to Proof<[u8; 32]>\n            equivocation_proof: <_>::decode(&mut equivocation_proof.encode().as_slice()).unwrap(),\n            key_owner_proof: key_owner_proof.0.into(),\n          })\n        }\n        _ => Err(())?,\n      }),\n      RuntimeCall::Grandpa(call) => Call::Grandpa(match call {\n        grandpa::Call::report_equivocation { equivocation_proof, key_owner_proof } => {\n          serai_abi::grandpa::Call::report_equivocation(serai_abi::grandpa::ReportEquivocation {\n            // TODO: Find a better way to go from Proof<H256> to Proof<[u8; 32]>\n            equivocation_proof: <_>::decode(&mut equivocation_proof.encode().as_slice()).unwrap(),\n            key_owner_proof: key_owner_proof.0.into(),\n          })\n        }\n        grandpa::Call::report_equivocation_unsigned { equivocation_proof, key_owner_proof } => {\n          serai_abi::grandpa::Call::report_equivocation_unsigned(\n            serai_abi::grandpa::ReportEquivocation {\n              // TODO: Find a better way to go from Proof<H256> to Proof<[u8; 32]>\n              equivocation_proof: <_>::decode(&mut equivocation_proof.encode().as_slice()).unwrap(),\n              key_owner_proof: key_owner_proof.0.into(),\n            },\n          )\n        }\n        _ => Err(())?,\n      }),\n    })\n  }\n}\n"
  },
  {
    "path": "substrate/runtime/src/lib.rs",
    "content": "#![allow(deprecated)]\n#![cfg_attr(docsrs, feature(doc_cfg))]\n#![cfg_attr(docsrs, feature(doc_cfg))]\n#![cfg_attr(not(feature = \"std\"), no_std)]\n#![recursion_limit = \"256\"]\n\n#[cfg(feature = \"std\")]\ninclude!(concat!(env!(\"OUT_DIR\"), \"/wasm_binary.rs\"));\n\nuse core::marker::PhantomData;\n\n// Re-export all components\npub use serai_primitives as primitives;\npub use primitives::{BlockNumber, Header};\n\npub use frame_system as system;\npub use frame_support as support;\n\npub use pallet_timestamp as timestamp;\n\npub use pallet_transaction_payment as transaction_payment;\n\npub use coins_pallet as coins;\npub use dex_pallet as dex;\n\npub use validator_sets_pallet as validator_sets;\n\npub use in_instructions_pallet as in_instructions;\n\npub use signals_pallet as signals;\n\npub use pallet_babe as babe;\npub use pallet_grandpa as grandpa;\n\npub use genesis_liquidity_pallet as genesis_liquidity;\npub use emissions_pallet as emissions;\n\npub use economic_security_pallet as economic_security;\n\nuse sp_std::prelude::*;\n\nuse sp_version::RuntimeVersion;\n\nuse sp_runtime::{\n  create_runtime_str, generic, impl_opaque_keys, KeyTypeId,\n  traits::{Convert, BlakeTwo256, Block as BlockT},\n  transaction_validity::{TransactionSource, TransactionValidity},\n  BoundedVec, Perbill, ApplyExtrinsicResult,\n};\n\n#[allow(unused_imports)]\nuse primitives::{\n  NetworkId, PublicKey, AccountLookup, SubstrateAmount, Coin, EXTERNAL_NETWORKS,\n  MEDIAN_PRICE_WINDOW_LENGTH, HOURS, DAYS, MINUTES, TARGET_BLOCK_TIME, BLOCK_SIZE,\n  FAST_EPOCH_DURATION,\n};\n\nuse support::{\n  traits::{ConstU8, ConstU16, ConstU32, ConstU64, Contains},\n  weights::{\n    constants::{RocksDbWeight, WEIGHT_REF_TIME_PER_SECOND},\n    IdentityFee, Weight,\n  },\n  parameter_types, construct_runtime,\n};\n\nuse validator_sets::MembershipProof;\n\nuse sp_authority_discovery::AuthorityId as AuthorityDiscoveryId;\nuse babe::AuthorityId as BabeId;\nuse grandpa::AuthorityId as GrandpaId;\n\nmod abi;\n\n/// Nonce of a transaction in the chain, for a given account.\npub type Nonce = u32;\n\n/// A hash of some data used by the chain.\npub type Hash = sp_core::H256;\n\npub type SignedExtra = (\n  system::CheckNonZeroSender<Runtime>,\n  system::CheckSpecVersion<Runtime>,\n  system::CheckTxVersion<Runtime>,\n  system::CheckGenesis<Runtime>,\n  system::CheckEra<Runtime>,\n  system::CheckNonce<Runtime>,\n  system::CheckWeight<Runtime>,\n  transaction_payment::ChargeTransactionPayment<Runtime>,\n);\n\npub type Transaction = serai_abi::tx::Transaction<RuntimeCall, SignedExtra>;\npub type Block = generic::Block<Header, Transaction>;\npub type BlockId = generic::BlockId<Block>;\n\npub mod opaque {\n  use super::*;\n\n  impl_opaque_keys! {\n    pub struct SessionKeys {\n      pub babe: Babe,\n      pub grandpa: Grandpa,\n    }\n  }\n}\n\n#[sp_version::runtime_version]\npub const VERSION: RuntimeVersion = RuntimeVersion {\n  spec_name: create_runtime_str!(\"serai\"),\n  impl_name: create_runtime_str!(\"core\"),\n  spec_version: 1,\n  impl_version: 1,\n  authoring_version: 1,\n  apis: RUNTIME_API_VERSIONS,\n  transaction_version: 1,\n  system_version: 1,\n};\n\npub const PRIMARY_PROBABILITY: (u64, u64) = (1, 4);\npub const BABE_GENESIS_EPOCH_CONFIG: sp_consensus_babe::BabeEpochConfiguration =\n  sp_consensus_babe::BabeEpochConfiguration {\n    c: PRIMARY_PROBABILITY,\n    allowed_slots: sp_consensus_babe::AllowedSlots::PrimaryAndSecondaryPlainSlots,\n  };\n\nconst NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75);\n\nparameter_types! {\n  pub const BlockHashCount: BlockNumber = 2400;\n  pub const Version: RuntimeVersion = VERSION;\n\n  // 1 MB block size limit\n  pub BlockLength: system::limits::BlockLength =\n    system::limits::BlockLength::max_with_normal_ratio(BLOCK_SIZE, NORMAL_DISPATCH_RATIO);\n  pub BlockWeights: system::limits::BlockWeights =\n    system::limits::BlockWeights::with_sensible_defaults(\n      Weight::from_parts(2u64 * WEIGHT_REF_TIME_PER_SECOND, u64::MAX),\n      NORMAL_DISPATCH_RATIO,\n    );\n}\n\npub struct CallFilter;\nimpl Contains<RuntimeCall> for CallFilter {\n  fn contains(call: &RuntimeCall) -> bool {\n    // If the call is defined in our ABI, it's allowed\n    let call: Result<serai_abi::Call, ()> = call.clone().try_into();\n    call.is_ok()\n  }\n}\n\nimpl system::Config for Runtime {\n  type BaseCallFilter = CallFilter;\n  type BlockWeights = BlockWeights;\n  type BlockLength = BlockLength;\n  type AccountId = PublicKey;\n  type RuntimeCall = RuntimeCall;\n  type Lookup = AccountLookup;\n  type Hash = Hash;\n  type Hashing = BlakeTwo256;\n  type Nonce = Nonce;\n  type Block = Block;\n  type RuntimeOrigin = RuntimeOrigin;\n  type RuntimeEvent = RuntimeEvent;\n  type BlockHashCount = BlockHashCount;\n  type DbWeight = RocksDbWeight;\n  type Version = Version;\n  type PalletInfo = PalletInfo;\n\n  type OnNewAccount = ();\n  type OnKilledAccount = ();\n  type OnSetCode = ();\n\n  type AccountData = ();\n  type SystemWeightInfo = ();\n\n  type MaxConsumers = support::traits::ConstU32<16>;\n\n  type RuntimeTask = ();\n  type ExtensionsWeightInfo = (); // TODO\n  type SingleBlockMigrations = ();\n  type MultiBlockMigrator = ();\n  type PreInherents = ();\n  type PostInherents = ();\n  type PostTransactions = ();\n}\n\nimpl timestamp::Config for Runtime {\n  type Moment = u64;\n  type OnTimestampSet = Babe;\n  type MinimumPeriod = ConstU64<{ (TARGET_BLOCK_TIME * 1000) / 2 }>;\n  type WeightInfo = ();\n}\n\nimpl transaction_payment::Config for Runtime {\n  type RuntimeEvent = RuntimeEvent;\n  type OnChargeTransaction = Coins;\n  type OperationalFeeMultiplier = ConstU8<5>;\n  type WeightToFee = IdentityFee<SubstrateAmount>;\n  type LengthToFee = IdentityFee<SubstrateAmount>;\n  type FeeMultiplierUpdate = ();\n  type WeightInfo = ();\n}\n\nimpl coins::Config for Runtime {\n  type AllowMint = ValidatorSets;\n}\n\nimpl coins::Config<coins::Instance1> for Runtime {\n  type AllowMint = ();\n}\n\nimpl dex::Config for Runtime {\n  type LPFee = ConstU32<3>; // 0.3%\n  type MintMinLiquidity = ConstU64<10000>;\n\n  type MaxSwapPathLength = ConstU32<3>; // coin1 -> SRI -> coin2\n\n  type MedianPriceWindowLength = ConstU16<{ MEDIAN_PRICE_WINDOW_LENGTH }>;\n\n  type WeightInfo = dex::weights::SubstrateWeight<Runtime>;\n}\n\nimpl validator_sets::Config for Runtime {\n  type ShouldEndSession = Babe;\n}\n\npub struct IdentityValidatorIdOf;\nimpl Convert<PublicKey, Option<PublicKey>> for IdentityValidatorIdOf {\n  fn convert(key: PublicKey) -> Option<PublicKey> {\n    Some(key)\n  }\n}\n\nimpl signals::Config for Runtime {\n  // 1 week\n  #[allow(clippy::cast_possible_truncation)]\n  type RetirementValidityDuration = ConstU32<{ (7 * 24 * 60 * 60) / (TARGET_BLOCK_TIME as u32) }>;\n  // 2 weeks\n  #[allow(clippy::cast_possible_truncation)]\n  type RetirementLockInDuration = ConstU32<{ (2 * 7 * 24 * 60 * 60) / (TARGET_BLOCK_TIME as u32) }>;\n}\n\nimpl in_instructions::Config for Runtime {}\n\nimpl genesis_liquidity::Config for Runtime {}\n\nimpl emissions::Config for Runtime {}\n\nimpl economic_security::Config for Runtime {}\n\n// for validating equivocation evidences.\n// The following runtime construction doesn't actually implement the pallet as doing so is\n// unnecessary\n// TODO: Replace the requirement on Config for a requirement on FindAuthor directly\nimpl pallet_authorship::Config for Runtime {\n  type FindAuthor = ValidatorSets;\n  type EventHandler = ();\n}\n\n// Maximum number of authorities per session.\npub type MaxAuthorities = ConstU32<{ validator_sets::primitives::MAX_KEY_SHARES_PER_SET }>;\n\n/// Longevity of an offence report.\npub type ReportLongevity = <Runtime as pallet_babe::Config>::EpochDuration;\n\nimpl babe::Config for Runtime {\n  #[cfg(feature = \"fast-epoch\")]\n  type EpochDuration = ConstU64<{ FAST_EPOCH_DURATION }>;\n\n  #[cfg(not(feature = \"fast-epoch\"))]\n  type EpochDuration = ConstU64<{ 4 * 7 * DAYS }>;\n\n  type ExpectedBlockTime = ConstU64<{ TARGET_BLOCK_TIME * 1000 }>;\n  type EpochChangeTrigger = babe::ExternalTrigger;\n  type DisabledValidators = ValidatorSets;\n\n  type WeightInfo = ();\n  type MaxAuthorities = MaxAuthorities;\n  type MaxNominators = ConstU32<1>;\n\n  type KeyOwnerProof = MembershipProof<Self>;\n  type EquivocationReportSystem =\n    babe::EquivocationReportSystem<Self, ValidatorSets, ValidatorSets, ReportLongevity>;\n}\n\nimpl grandpa::Config for Runtime {\n  type RuntimeEvent = RuntimeEvent;\n\n  type WeightInfo = ();\n  type MaxAuthorities = MaxAuthorities;\n  type MaxNominators = ConstU32<1>;\n\n  type MaxSetIdSessionEntries = ConstU64<0>;\n  type KeyOwnerProof = MembershipProof<Self>;\n  type EquivocationReportSystem =\n    grandpa::EquivocationReportSystem<Self, ValidatorSets, ValidatorSets, ReportLongevity>;\n}\n\n#[doc(hidden)]\npub struct GetCurrentSessionForSubstrate;\nimpl pallet_session::GetCurrentSessionForSubstrate for GetCurrentSessionForSubstrate {\n  fn get() -> u32 {\n    validator_sets::Pallet::<Runtime>::latest_decided_session(NetworkId::Serai).unwrap().0 - 1\n  }\n}\nimpl pallet_session::Config for Runtime {\n  type Session = GetCurrentSessionForSubstrate;\n}\n\npub type Executive = frame_executive::Executive<\n  Runtime,\n  Block,\n  system::ChainContext<Runtime>,\n  Runtime,\n  AllPalletsWithSystem,\n>;\n\nconstruct_runtime!(\n  pub enum Runtime {\n    System: system,\n\n    Timestamp: timestamp,\n\n    TransactionPayment: transaction_payment,\n\n    Coins: coins,\n    LiquidityTokens: coins::<Instance1>::{Pallet, Call, Storage, Event<T>},\n    Dex: dex,\n\n    ValidatorSets: validator_sets,\n    GenesisLiquidity: genesis_liquidity,\n    Emissions: emissions,\n\n    EconomicSecurity: economic_security,\n\n    InInstructions: in_instructions,\n\n    Signals: signals,\n\n    Babe: babe,\n    Grandpa: grandpa,\n  }\n);\n\n#[cfg(feature = \"runtime-benchmarks\")]\n#[macro_use]\nextern crate frame_benchmarking;\n\n#[cfg(feature = \"runtime-benchmarks\")]\nmod benches {\n  define_benchmarks!(\n    [frame_benchmarking, BaselineBench::<Runtime>]\n\n    [system, SystemBench::<Runtime>]\n\n    [pallet_timestamp, Timestamp]\n\n    [balances, Balances]\n\n    [babe, Babe]\n    [grandpa, Grandpa]\n  );\n}\n\nsp_api::decl_runtime_apis! {\n  #[api_version(1)]\n  pub trait SeraiRuntimeApi {\n    fn validators(network_id: NetworkId) -> Vec<PublicKey>;\n  }\n\n  #[api_version(1)]\n  pub trait GenesisApi {\n    fn build(genesis: RuntimeGenesisConfig);\n  }\n}\n\nsp_api::impl_runtime_apis! {\n  impl sp_api::Core<Block> for Runtime {\n    fn version() -> RuntimeVersion {\n      VERSION\n    }\n\n    fn execute_block(block: Block) {\n      for tx in &block.extrinsics {\n        if let Some(signer) = tx.signer() {\n          let signer = PublicKey::from(signer.0);\n          let mut info = frame_system::Account::<Runtime>::get(signer);\n          if info.providers == 0 {\n            info.providers = 1;\n            frame_system::Account::<Runtime>::set(signer, info);\n          }\n        }\n      }\n      Executive::execute_block(block);\n    }\n\n    fn initialize_block(header: &Header) -> sp_runtime::ExtrinsicInclusionMode {\n      Executive::initialize_block(header);\n      sp_runtime::ExtrinsicInclusionMode::AllExtrinsics\n    }\n  }\n\n  impl sp_block_builder::BlockBuilder<Block> for Runtime {\n    fn apply_extrinsic(extrinsic: <Block as BlockT>::Extrinsic) -> ApplyExtrinsicResult {\n      if let Some(signer) = extrinsic.signer() {\n        let signer = PublicKey::from(signer.0);\n        let mut info = frame_system::Account::<Runtime>::get(signer);\n        if info.providers == 0 {\n          info.providers = 1;\n          frame_system::Account::<Runtime>::set(signer, info);\n        }\n      }\n      Executive::apply_extrinsic(extrinsic)\n    }\n\n    fn finalize_block() -> Header {\n      Executive::finalize_block()\n    }\n\n    fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<<Block as BlockT>::Extrinsic> {\n      data.create_extrinsics()\n    }\n\n    fn check_inherents(\n      block: Block,\n      data: sp_inherents::InherentData,\n    ) -> sp_inherents::CheckInherentsResult {\n      data.check_extrinsics(&block)\n    }\n  }\n\n  impl sp_transaction_pool::runtime_api::TaggedTransactionQueue<Block> for Runtime {\n    fn validate_transaction(\n      source: TransactionSource,\n      tx: <Block as BlockT>::Extrinsic,\n      block_hash: <Block as BlockT>::Hash,\n    ) -> TransactionValidity {\n      if let Some(signer) = tx.signer() {\n        let signer = PublicKey::from(signer.0);\n        let mut info = frame_system::Account::<Runtime>::get(signer);\n        if info.providers == 0 {\n          info.providers = 1;\n          frame_system::Account::<Runtime>::set(signer, info);\n        }\n      }\n      Executive::validate_transaction(source, tx, block_hash)\n    }\n  }\n\n  impl sp_offchain::OffchainWorkerApi<Block> for Runtime {\n    fn offchain_worker(header: &Header) {\n      Executive::offchain_worker(header)\n    }\n  }\n\n  impl sp_session::SessionKeys<Block> for Runtime {\n    fn generate_session_keys(seed: Option<Vec<u8>>) -> Vec<u8> {\n      opaque::SessionKeys::generate(seed)\n    }\n\n    fn decode_session_keys(\n      encoded: Vec<u8>,\n    ) -> Option<Vec<(Vec<u8>, KeyTypeId)>> {\n      opaque::SessionKeys::decode_into_raw_public_keys(&encoded)\n    }\n  }\n\n  impl sp_consensus_babe::BabeApi<Block> for Runtime {\n    fn configuration() -> sp_consensus_babe::BabeConfiguration {\n      use support::traits::Get;\n\n      let epoch_config = Babe::epoch_config().unwrap_or(BABE_GENESIS_EPOCH_CONFIG);\n      sp_consensus_babe::BabeConfiguration {\n        slot_duration: Babe::slot_duration(),\n        epoch_length: <Runtime as babe::Config>::EpochDuration::get(),\n        c: epoch_config.c,\n        authorities: Babe::authorities().to_vec(),\n        randomness: Babe::randomness(),\n        allowed_slots: epoch_config.allowed_slots,\n      }\n    }\n\n    fn current_epoch_start() -> sp_consensus_babe::Slot {\n      Babe::current_epoch_start()\n    }\n\n    fn current_epoch() -> sp_consensus_babe::Epoch {\n      Babe::current_epoch()\n    }\n\n    fn next_epoch() -> sp_consensus_babe::Epoch {\n      Babe::next_epoch()\n    }\n\n    // This refers to a key being 'owned' by an authority in a system with multiple keys per\n    // validator\n    // Since we do not have such an infrastructure, we do not need this\n    fn generate_key_ownership_proof(\n      _slot: sp_consensus_babe::Slot,\n      _authority_id: BabeId,\n    ) -> Option<sp_consensus_babe::OpaqueKeyOwnershipProof> {\n      Some(sp_consensus_babe::OpaqueKeyOwnershipProof::new(vec![]))\n    }\n\n    fn submit_report_equivocation_unsigned_extrinsic(\n      equivocation_proof: sp_consensus_babe::EquivocationProof<Header>,\n      _: sp_consensus_babe::OpaqueKeyOwnershipProof,\n    ) -> Option<()> {\n      let proof = MembershipProof(equivocation_proof.offender.clone().into(), PhantomData);\n      Babe::submit_unsigned_equivocation_report(equivocation_proof, proof)\n    }\n  }\n\n  impl sp_consensus_grandpa::GrandpaApi<Block> for Runtime {\n    fn grandpa_authorities() -> sp_consensus_grandpa::AuthorityList {\n      Grandpa::grandpa_authorities()\n    }\n\n    fn current_set_id() -> sp_consensus_grandpa::SetId {\n      Grandpa::current_set_id()\n    }\n\n    fn generate_key_ownership_proof(\n      _set_id: sp_consensus_grandpa::SetId,\n      _authority_id: GrandpaId,\n    ) -> Option<sp_consensus_grandpa::OpaqueKeyOwnershipProof> {\n      Some(sp_consensus_grandpa::OpaqueKeyOwnershipProof::new(vec![]))\n    }\n\n    fn submit_report_equivocation_unsigned_extrinsic(\n      equivocation_proof: sp_consensus_grandpa::EquivocationProof<<Block as BlockT>::Hash, u64>,\n      _: sp_consensus_grandpa::OpaqueKeyOwnershipProof,\n    ) -> Option<()> {\n      let proof = MembershipProof(equivocation_proof.offender().clone().into(), PhantomData);\n      Grandpa::submit_unsigned_equivocation_report(equivocation_proof, proof)\n    }\n  }\n\n  impl frame_system_rpc_runtime_api::AccountNonceApi<Block, PublicKey, Nonce> for Runtime {\n    fn account_nonce(account: PublicKey) -> Nonce {\n      System::account_nonce(account)\n    }\n  }\n\n  impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi<\n    Block,\n    SubstrateAmount\n  > for Runtime {\n    fn query_info(\n      uxt: <Block as BlockT>::Extrinsic,\n      len: u32,\n    ) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo<SubstrateAmount> {\n      TransactionPayment::query_info(uxt, len)\n    }\n\n    fn query_fee_details(\n      uxt: <Block as BlockT>::Extrinsic,\n      len: u32,\n    ) -> transaction_payment::FeeDetails<SubstrateAmount> {\n      TransactionPayment::query_fee_details(uxt, len)\n    }\n\n    fn query_weight_to_fee(weight: Weight) -> SubstrateAmount {\n      TransactionPayment::weight_to_fee(weight)\n    }\n\n    fn query_length_to_fee(length: u32) -> SubstrateAmount {\n      TransactionPayment::length_to_fee(length)\n    }\n  }\n\n  impl sp_authority_discovery::AuthorityDiscoveryApi<Block> for Runtime {\n    fn authorities() -> Vec<AuthorityDiscoveryId> {\n      // Converts to `[u8; 32]` so it can be hashed\n      let serai_validators = Babe::authorities()\n        .into_iter()\n        .map(|(id, _)| id.into_inner().0)\n        .collect::<hashbrown::HashSet<_>>();\n      let mut all = serai_validators;\n      for network in EXTERNAL_NETWORKS {\n        // Returning the latest-decided, not latest and active, means the active set\n        // may fail to peer find if there isn't sufficient overlap. If a large amount reboot,\n        // forcing some validators to successfully peer find in order for the threshold to become\n        // online again, this may cause a liveness failure.\n        //\n        // This is assumed not to matter in real life, yet an interesting note.\n        let participants =\n          ValidatorSets::participants_for_latest_decided_set(NetworkId::from(network))\n            .map_or(vec![], BoundedVec::into_inner);\n        for (participant, _) in participants {\n          all.insert(participant.0);\n        }\n      }\n      all.into_iter().map(|id| AuthorityDiscoveryId::from(PublicKey::from_raw(id))).collect()\n    }\n  }\n\n  impl crate::SeraiRuntimeApi<Block> for Runtime {\n    fn validators(network_id: NetworkId) -> Vec<PublicKey> {\n      if network_id == NetworkId::Serai {\n        Babe::authorities()\n          .into_iter()\n          .map(|(id, _)| id.into_inner())\n          .collect()\n      } else {\n        ValidatorSets::participants_for_latest_decided_set(network_id)\n          .map_or(\n            vec![],\n            |vec| vec.into_inner().into_iter().map(|(validator, _)| validator).collect()\n          )\n      }\n    }\n  }\n\n  impl crate::GenesisApi<Block> for Runtime {\n    fn build(genesis: RuntimeGenesisConfig) {\n      <RuntimeGenesisConfig as frame_support::traits::BuildGenesisConfig>::build(&genesis)\n    }\n  }\n\n  impl dex::DexApi<Block> for Runtime {\n    fn quote_price_exact_tokens_for_tokens(\n      coin1: Coin,\n      coin2: Coin,\n      amount: SubstrateAmount,\n      include_fee: bool\n    ) -> Option<SubstrateAmount> {\n      Dex::quote_price_exact_tokens_for_tokens(coin1, coin2, amount, include_fee)\n    }\n\n    fn quote_price_tokens_for_exact_tokens(\n      coin1: Coin,\n      coin2: Coin,\n      amount: SubstrateAmount,\n      include_fee: bool\n    ) -> Option<SubstrateAmount> {\n      Dex::quote_price_tokens_for_exact_tokens(coin1, coin2, amount, include_fee)\n    }\n\n    fn get_reserves(coin1: Coin, coin2: Coin) -> Option<(SubstrateAmount, SubstrateAmount)> {\n      Dex::get_reserves(&coin1, &coin2).ok()\n    }\n  }\n}\n\nimpl<LocalCall> frame_system::offchain::CreateTransactionBase<LocalCall> for Runtime\nwhere\n  RuntimeCall: From<LocalCall>,\n{\n  type Extrinsic = <Block as BlockT>::Extrinsic;\n  type RuntimeCall = RuntimeCall;\n}\n\nimpl<LocalCall> frame_system::offchain::CreateBare<LocalCall> for Runtime\nwhere\n  RuntimeCall: From<LocalCall>,\n{\n  fn create_bare(call: RuntimeCall) -> <Block as BlockT>::Extrinsic {\n    <<Block as BlockT>::Extrinsic as frame_support::traits::InherentBuilder>::new_inherent(call)\n  }\n}\n"
  },
  {
    "path": "substrate/signals/pallet/Cargo.toml",
    "content": "[package]\nname = \"serai-signals-pallet\"\nversion = \"0.1.0\"\ndescription = \"Signals pallet\"\nlicense = \"AGPL-3.0-only\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/substrate/signals/pallet\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nedition = \"2021\"\nrust-version = \"1.74\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[package.metadata.cargo-machete]\nignored = [\"scale\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nscale = { package = \"parity-scale-codec\", version = \"3\", default-features = false, features = [\"derive\"] }\n\nsp-core = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nsp-io = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\n\nframe-system = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nframe-support = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\n\nserai-primitives = { path = \"../../primitives\", default-features = false }\nserai-signals-primitives = { path = \"../primitives\", default-features = false }\n\nvalidator-sets-pallet = { package = \"serai-validator-sets-pallet\", path = \"../../validator-sets/pallet\", default-features = false }\nin-instructions-pallet = { package = \"serai-in-instructions-pallet\", path = \"../../in-instructions/pallet\", default-features = false }\n\n[features]\nstd = [\n  \"scale/std\",\n\n  \"sp-core/std\",\n  \"sp-io/std\",\n\n  \"frame-system/std\",\n  \"frame-support/std\",\n\n  \"serai-primitives/std\",\n  \"serai-signals-primitives/std\",\n\n  \"validator-sets-pallet/std\",\n  \"in-instructions-pallet/std\",\n]\n\nruntime-benchmarks = [\n  \"frame-system/runtime-benchmarks\",\n  \"frame-support/runtime-benchmarks\",\n]\n\n# TODO\ntry-runtime = []\n\ndefault = [\"std\"]\n"
  },
  {
    "path": "substrate/signals/pallet/LICENSE",
    "content": "AGPL-3.0-only license\n\nCopyright (c) 2023 Luke Parker\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License Version 3 as\npublished by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n"
  },
  {
    "path": "substrate/signals/pallet/src/lib.rs",
    "content": "#![cfg_attr(not(feature = \"std\"), no_std)]\n\n#[allow(\n  deprecated,\n  unreachable_patterns,\n  clippy::let_unit_value,\n  clippy::cast_possible_truncation,\n  clippy::ignored_unit_patterns\n)] // TODO\n#[frame_support::pallet]\npub mod pallet {\n  use sp_core::sr25519::Public;\n  use sp_io::hashing::blake2_256;\n\n  use frame_system::pallet_prelude::*;\n  // False positive\n  #[allow(unused)]\n  use frame_support::{pallet_prelude::*, sp_runtime};\n\n  use serai_primitives::*;\n  use serai_signals_primitives::SignalId;\n  use validator_sets_pallet::{primitives::ValidatorSet, Config as VsConfig, Pallet as VsPallet};\n  use in_instructions_pallet::{Config as IiConfig, Pallet as InInstructions};\n\n  #[pallet::config]\n  pub trait Config: frame_system::Config<AccountId = Public> + VsConfig + IiConfig {\n    type RetirementValidityDuration: Get<u32>;\n    type RetirementLockInDuration: Get<u32>;\n  }\n\n  #[pallet::genesis_config]\n  #[derive(Debug)]\n  pub struct GenesisConfig<T: Config> {\n    _config: PhantomData<T>,\n  }\n  impl<T: Config> Default for GenesisConfig<T> {\n    fn default() -> Self {\n      GenesisConfig { _config: PhantomData }\n    }\n  }\n  #[pallet::genesis_build]\n  impl<T: Config> BuildGenesisConfig for GenesisConfig<T> {\n    fn build(&self) {\n      // Assert the validity duration is less than the lock-in duration so lock-in periods\n      // automatically invalidate other retirement signals\n      assert!(T::RetirementValidityDuration::get() < T::RetirementLockInDuration::get());\n    }\n  }\n\n  #[pallet::pallet]\n  pub struct Pallet<T>(PhantomData<T>);\n\n  #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen)]\n  pub struct RegisteredRetirementSignal<T: Config> {\n    in_favor_of: [u8; 32],\n    registrant: T::AccountId,\n    registered_at: BlockNumberFor<T>,\n  }\n\n  impl<T: Config> RegisteredRetirementSignal<T> {\n    fn id(&self) -> [u8; 32] {\n      let mut preimage = b\"Signal\".to_vec();\n      preimage.extend(&self.encode());\n      blake2_256(&preimage)\n    }\n  }\n\n  #[pallet::storage]\n  type RegisteredRetirementSignals<T: Config> =\n    StorageMap<_, Blake2_128Concat, [u8; 32], RegisteredRetirementSignal<T>, OptionQuery>;\n\n  #[pallet::storage]\n  pub type Favors<T: Config> = StorageDoubleMap<\n    _,\n    Blake2_128Concat,\n    (SignalId, NetworkId),\n    Blake2_128Concat,\n    T::AccountId,\n    (),\n    OptionQuery,\n  >;\n\n  #[pallet::storage]\n  pub type SetsInFavor<T: Config> =\n    StorageMap<_, Blake2_128Concat, (SignalId, ValidatorSet), (), OptionQuery>;\n\n  #[pallet::storage]\n  pub type LockedInRetirement<T: Config> =\n    StorageValue<_, ([u8; 32], BlockNumberFor<T>), OptionQuery>;\n\n  #[pallet::event]\n  #[pallet::generate_deposit(pub(super) fn deposit_event)]\n  pub enum Event<T: Config> {\n    RetirementSignalRegistered {\n      signal_id: [u8; 32],\n      in_favor_of: [u8; 32],\n      registrant: T::AccountId,\n    },\n    RetirementSignalRevoked {\n      signal_id: [u8; 32],\n    },\n    SignalFavored {\n      signal_id: SignalId,\n      by: T::AccountId,\n      for_network: NetworkId,\n    },\n    SetInFavor {\n      signal_id: SignalId,\n      set: ValidatorSet,\n    },\n    RetirementSignalLockedIn {\n      signal_id: [u8; 32],\n    },\n    SetNoLongerInFavor {\n      signal_id: SignalId,\n      set: ValidatorSet,\n    },\n    FavorRevoked {\n      signal_id: SignalId,\n      by: T::AccountId,\n      for_network: NetworkId,\n    },\n    AgainstSignal {\n      signal_id: SignalId,\n      who: T::AccountId,\n      for_network: NetworkId,\n    },\n  }\n\n  #[pallet::error]\n  pub enum Error<T> {\n    RetirementSignalLockedIn,\n    RetirementSignalAlreadyRegistered,\n    NotRetirementSignalRegistrant,\n    NonExistentRetirementSignal,\n    ExpiredRetirementSignal,\n    NotValidator,\n    RevokingNonExistentFavor,\n  }\n\n  // 80% threshold\n  // TODO: Use 34% for halting a set (not 80%)\n  const REQUIREMENT_NUMERATOR: u64 = 4;\n  const REQUIREMENT_DIVISOR: u64 = 5;\n\n  impl<T: Config> Pallet<T> {\n    // Returns true if this network's current set is in favor of the signal.\n    //\n    // Must only be called for networks which have a set decided.\n    fn tally_for_network(signal_id: SignalId, network: NetworkId) -> bool {\n      let this_network_session = VsPallet::<T>::latest_decided_session(network).unwrap();\n      let this_set = ValidatorSet { network, session: this_network_session };\n\n      // This is a bounded O(n) (which is still acceptable) due to the infeasibility of caching\n      // here\n      // TODO: Make caching feasible? Do a first-pass with cache then actual pass before\n      // execution?\n      let mut iter = Favors::<T>::iter_prefix_values((signal_id, network));\n      let mut needed_favor = (VsPallet::<T>::total_allocated_stake(network).unwrap().0 *\n        REQUIREMENT_NUMERATOR)\n        .div_ceil(REQUIREMENT_DIVISOR);\n      while iter.next().is_some() && (needed_favor != 0) {\n        let item_key = iter.last_raw_key();\n        // `.len() - 32` is safe because AccountId is bound to being Public, which is 32 bytes\n        let account = T::AccountId::decode(&mut &item_key[(item_key.len() - 32) ..]).unwrap();\n        if VsPallet::<T>::in_latest_decided_set(network, account) {\n          // This call uses the current allocation, not the allocation at the time of set\n          // decision\n          // This is deemed safe due to the validator-set pallet's deallocation scheduling\n          // unwrap is safe due to being in the latest decided set\n          needed_favor =\n            needed_favor.saturating_sub(VsPallet::<T>::allocation((network, account)).unwrap().0);\n        }\n      }\n\n      if needed_favor == 0 {\n        // Set the set as in favor until someone triggers a re-tally\n        //\n        // Since a re-tally is an extra step we can't assume will occur, this effectively means a\n        // network in favor across any point in its Session is in favor for its entire Session\n        // While a malicious actor could increase their stake, favor a signal, then deallocate,\n        // this is largely prevented by deallocation scheduling\n        //\n        // At any given point, only just under 50% of a set can be immediately deallocated\n        // (if each validator has just under two key shares, they can deallocate the entire amount\n        // above a single key share)\n        //\n        // This means that if a signal has a 67% adoption threshold, and someone executes this\n        // attack, they still have a majority of the allocated stake (though less of a majority\n        // than desired)\n        //\n        // With the 80% threshold, removing 39.9% creates a 40.1% to 20% ratio, which is still\n        // the BFT threshold of 67%\n        if !SetsInFavor::<T>::contains_key((signal_id, this_set)) {\n          SetsInFavor::<T>::set((signal_id, this_set), Some(()));\n          Self::deposit_event(Event::SetInFavor { signal_id, set: this_set });\n        }\n        true\n      } else {\n        if SetsInFavor::<T>::contains_key((signal_id, this_set)) {\n          // This should no longer be under the current tally\n          SetsInFavor::<T>::remove((signal_id, this_set));\n          Self::deposit_event(Event::SetNoLongerInFavor { signal_id, set: this_set });\n        }\n        false\n      }\n    }\n\n    fn tally_for_all_networks(signal_id: SignalId) -> bool {\n      let mut total_in_favor_stake = 0;\n      let mut total_allocated_stake = 0;\n      for network in serai_primitives::NETWORKS {\n        let Some(latest_decided_session) = VsPallet::<T>::latest_decided_session(network) else {\n          continue;\n        };\n        // If it has a session, it should have a total allocated stake value\n        let network_stake = VsPallet::<T>::total_allocated_stake(network).unwrap();\n        if SetsInFavor::<T>::contains_key((\n          signal_id,\n          ValidatorSet { network, session: latest_decided_session },\n        )) {\n          total_in_favor_stake += network_stake.0;\n        }\n        total_allocated_stake += network_stake.0;\n      }\n\n      total_in_favor_stake >=\n        (total_allocated_stake * REQUIREMENT_NUMERATOR).div_ceil(REQUIREMENT_DIVISOR)\n    }\n\n    fn revoke_favor_internal(\n      account: T::AccountId,\n      signal_id: SignalId,\n      for_network: NetworkId,\n    ) -> DispatchResult {\n      if !Favors::<T>::contains_key((signal_id, for_network), account) {\n        Err::<(), _>(Error::<T>::RevokingNonExistentFavor)?;\n      }\n      Favors::<T>::remove((signal_id, for_network), account);\n      Self::deposit_event(Event::<T>::FavorRevoked { signal_id, by: account, for_network });\n      // tally_for_network assumes the network is active, which is implied by having prior set a\n      // favor for it\n      // Technically, this tally may make the network in favor and justify re-tallying for all\n      // networks\n      // Its assumed not to\n      Self::tally_for_network(signal_id, for_network);\n      Ok(())\n    }\n  }\n\n  #[pallet::call]\n  impl<T: Config> Pallet<T> {\n    /// Register a retirement signal, declaring the consensus protocol this signal is in favor of.\n    ///\n    /// Retirement signals are registered so that the proposer, presumably a developer, can revoke\n    /// the signal if there's a fault discovered.\n    #[pallet::call_index(0)]\n    #[pallet::weight(0)] // TODO\n    pub fn register_retirement_signal(\n      origin: OriginFor<T>,\n      in_favor_of: [u8; 32],\n    ) -> DispatchResult {\n      // Don't allow retirement signals to be registered once a retirement has been locked in\n      if LockedInRetirement::<T>::exists() {\n        Err::<(), _>(Error::<T>::RetirementSignalLockedIn)?;\n      }\n\n      let account = ensure_signed(origin)?;\n\n      // Bind the signal ID to the proposer\n      // This prevents a malicious actor from frontrunning a proposal, causing them to be the\n      // registrant, just to cancel it later\n      let signal = RegisteredRetirementSignal {\n        in_favor_of,\n        registrant: account,\n        registered_at: frame_system::Pallet::<T>::block_number(),\n      };\n      let signal_id = signal.id();\n\n      if RegisteredRetirementSignals::<T>::get(signal_id).is_some() {\n        Err::<(), _>(Error::<T>::RetirementSignalAlreadyRegistered)?;\n      }\n\n      Self::deposit_event(Event::<T>::RetirementSignalRegistered {\n        signal_id,\n        in_favor_of,\n        registrant: account,\n      });\n      RegisteredRetirementSignals::<T>::set(signal_id, Some(signal));\n      Ok(())\n    }\n\n    #[pallet::call_index(1)]\n    #[pallet::weight(0)] // TODO\n    pub fn revoke_retirement_signal(\n      origin: OriginFor<T>,\n      retirement_signal_id: [u8; 32],\n    ) -> DispatchResult {\n      let account = ensure_signed(origin)?;\n      let Some(registered_signal) = RegisteredRetirementSignals::<T>::get(retirement_signal_id)\n      else {\n        return Err::<(), _>(Error::<T>::NonExistentRetirementSignal.into());\n      };\n      if account != registered_signal.registrant {\n        Err::<(), _>(Error::<T>::NotRetirementSignalRegistrant)?;\n      }\n      RegisteredRetirementSignals::<T>::remove(retirement_signal_id);\n\n      // If this signal was locked in, remove it\n      // This lets a post-lock-in discovered fault be prevented from going live without\n      // intervention by all validators\n      if LockedInRetirement::<T>::get().map(|(signal_id, _block_number)| signal_id) ==\n        Some(retirement_signal_id)\n      {\n        LockedInRetirement::<T>::kill();\n      }\n\n      Self::deposit_event(Event::<T>::RetirementSignalRevoked { signal_id: retirement_signal_id });\n      Ok(())\n    }\n\n    #[pallet::call_index(2)]\n    #[pallet::weight(0)] // TODO\n    pub fn favor(\n      origin: OriginFor<T>,\n      signal_id: SignalId,\n      for_network: NetworkId,\n    ) -> DispatchResult {\n      let account = ensure_signed(origin)?;\n\n      // If this is a retirement signal, perform the relevant checks\n      if let SignalId::Retirement(signal_id) = signal_id {\n        // Make sure a retirement hasn't already been locked in\n        if LockedInRetirement::<T>::exists() {\n          Err::<(), _>(Error::<T>::RetirementSignalLockedIn)?;\n        }\n\n        // Make sure this is a registered retirement\n        // We don't have to do this for a `Halt` signal as `Halt` doesn't have the registration\n        // process\n        let Some(registered_signal) = RegisteredRetirementSignals::<T>::get(signal_id) else {\n          return Err::<(), _>(Error::<T>::NonExistentRetirementSignal.into());\n        };\n\n        // Check the signal isn't out of date\n        // This isn't truly necessary since we only track votes from the most recent validator\n        // sets, ensuring modern relevancy\n        // The reason to still have it is because locking in a dated runtime may cause a corrupt\n        // blockchain and lead to a failure in system integrity\n        // `Halt`, which doesn't have this check, at worst causes temporary downtime\n        if (registered_signal.registered_at + T::RetirementValidityDuration::get().into()) <\n          frame_system::Pallet::<T>::block_number()\n        {\n          Err::<(), _>(Error::<T>::ExpiredRetirementSignal)?;\n        }\n      }\n\n      // Check the signer is a validator\n      // Technically, in the case of Serai, this will check they're planned to be in the next set,\n      // not that they are in the current set\n      // This is a practical requirement due to the lack of tracking historical allocations, and\n      // fine for the purposes here\n      if !VsPallet::<T>::in_latest_decided_set(for_network, account) {\n        Err::<(), _>(Error::<T>::NotValidator)?;\n      }\n\n      // Set them as in-favor\n      // Doesn't error if they already voted in order to let any validator trigger a re-tally\n      if !Favors::<T>::contains_key((signal_id, for_network), account) {\n        Favors::<T>::set((signal_id, for_network), account, Some(()));\n        Self::deposit_event(Event::SignalFavored { signal_id, by: account, for_network });\n      }\n\n      // Check if the network is in favor\n      // tally_for_network expects the network to be active, which is implied by being in the\n      // latest decided set\n      let network_in_favor = Self::tally_for_network(signal_id, for_network);\n\n      // If this network is in favor, check if enough networks are\n      // We could optimize this by only running the following code when the network is *newly* in\n      // favor\n      // Re-running the following code ensures that if networks' allocated stakes change relative\n      // to each other, any new votes will cause a re-tally\n      if network_in_favor {\n        // If enough are, lock in the signal\n        if Self::tally_for_all_networks(signal_id) {\n          match signal_id {\n            SignalId::Retirement(signal_id) => {\n              LockedInRetirement::<T>::set(Some((\n                signal_id,\n                frame_system::Pallet::<T>::block_number() +\n                  T::RetirementLockInDuration::get().into(),\n              )));\n              Self::deposit_event(Event::RetirementSignalLockedIn { signal_id });\n            }\n            SignalId::Halt(network) => {\n              InInstructions::<T>::halt(network)?;\n            }\n          }\n        }\n      }\n\n      Ok(())\n    }\n\n    /// Revoke favor into an abstaining position.\n    #[pallet::call_index(3)]\n    #[pallet::weight(0)] // TODO\n    pub fn revoke_favor(\n      origin: OriginFor<T>,\n      signal_id: SignalId,\n      for_network: NetworkId,\n    ) -> DispatchResult {\n      if matches!(&signal_id, SignalId::Retirement(_)) && LockedInRetirement::<T>::exists() {\n        Err::<(), _>(Error::<T>::RetirementSignalLockedIn)?;\n      }\n\n      // Doesn't check the signal exists due to later checking the favor exists\n      // While the signal may have been revoked, making this pointless, it's not worth the storage\n      // read on every call to check\n      // Since revoke will re-tally, this does technically mean a network will become in-favor of a\n      // revoked signal. Since revoke won't re-tally for all networks/lock-in, this is also fine\n\n      Self::revoke_favor_internal(ensure_signed(origin)?, signal_id, for_network)\n    }\n\n    /// Emit an event standing against the signal.\n    ///\n    /// If the origin is currently in favor of the signal, their favor will be revoked.\n    #[pallet::call_index(4)]\n    #[pallet::weight(0)] // TODO\n    pub fn stand_against(\n      origin: OriginFor<T>,\n      signal_id: SignalId,\n      for_network: NetworkId,\n    ) -> DispatchResult {\n      if LockedInRetirement::<T>::exists() {\n        Err::<(), _>(Error::<T>::RetirementSignalLockedIn)?;\n      }\n\n      let account = ensure_signed(origin)?;\n      // If currently in favor, revoke the favor\n      if Favors::<T>::contains_key((signal_id, for_network), account) {\n        Self::revoke_favor_internal(account, signal_id, for_network)?;\n      } else {\n        // Check this Signal exists (which would've been implied by Favors for it existing)\n        if let SignalId::Retirement(signal_id) = signal_id {\n          if RegisteredRetirementSignals::<T>::get(signal_id).is_none() {\n            Err::<(), _>(Error::<T>::NonExistentRetirementSignal)?;\n          }\n        }\n      }\n\n      // Emit an event that we're against the signal\n      // No actual effects happen besides this\n      Self::deposit_event(Event::<T>::AgainstSignal { signal_id, who: account, for_network });\n      Ok(())\n    }\n  }\n\n  #[pallet::hooks]\n  impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {\n    fn on_initialize(current_number: BlockNumberFor<T>) -> Weight {\n      // If this is the block at which a locked-in signal has been set for long enough, panic\n      // This will prevent this block from executing and halt the chain\n      if let Some((signal, block_number)) = LockedInRetirement::<T>::get() {\n        if block_number == current_number {\n          panic!(\n            \"locked-in signal {} has been set for too long\",\n            sp_core::hexdisplay::HexDisplay::from(&signal),\n          );\n        }\n      }\n      Weight::zero() // TODO\n    }\n  }\n}\n\npub use pallet::*;\n"
  },
  {
    "path": "substrate/signals/primitives/Cargo.toml",
    "content": "[package]\nname = \"serai-signals-primitives\"\nversion = \"0.1.0\"\ndescription = \"Signals primitives\"\nlicense = \"MIT\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/substrate/signals/primitives\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nedition = \"2021\"\nrust-version = \"1.74\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nzeroize = { version = \"^1.5\", features = [\"derive\"], optional = true }\n\nscale = { package = \"parity-scale-codec\", version = \"3\", default-features = false, features = [\"derive\"] }\n\nborsh = { version = \"1\", default-features = false, features = [\"derive\", \"de_strict_order\"], optional = true }\nserde = { version = \"1\", default-features = false, features = [\"derive\", \"alloc\"], optional = true }\n\nserai-primitives = { path = \"../../primitives\", version = \"0.1\", default-features = false }\n\n[features]\nstd = [\n  \"zeroize\",\n\n  \"scale/std\",\n\n  \"borsh?/std\",\n  \"serde?/std\",\n\n  \"serai-primitives/std\",\n]\n\nborsh = [\"dep:borsh\"]\nserde = [\"dep:serde\"]\n\ndefault = [\"std\"]\n"
  },
  {
    "path": "substrate/signals/primitives/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2023 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "substrate/signals/primitives/src/lib.rs",
    "content": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![cfg_attr(docsrs, feature(doc_cfg))]\n#![cfg_attr(not(feature = \"std\"), no_std)]\n#![expect(clippy::cast_possible_truncation)]\n\nuse scale::{Encode, Decode, DecodeWithMemTracking, MaxEncodedLen};\n\nuse serai_primitives::ExternalNetworkId;\n\n#[derive(\n  Clone, Copy, PartialEq, Eq, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen,\n)]\n#[cfg_attr(feature = \"std\", derive(zeroize::Zeroize))]\n#[cfg_attr(feature = \"borsh\", derive(borsh::BorshSerialize, borsh::BorshDeserialize))]\n#[cfg_attr(feature = \"serde\", derive(serde::Serialize, serde::Deserialize))]\npub enum SignalId {\n  Retirement([u8; 32]),\n  Halt(ExternalNetworkId),\n}\n"
  },
  {
    "path": "substrate/validator-sets/pallet/Cargo.toml",
    "content": "[package]\nname = \"serai-validator-sets-pallet\"\nversion = \"0.1.0\"\ndescription = \"Validator sets pallet\"\nlicense = \"AGPL-3.0-only\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/substrate/validator-sets/pallet\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nedition = \"2021\"\nrust-version = \"1.74\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[package.metadata.cargo-machete]\nignored = [\"scale\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nhashbrown = { version = \"0.14\", default-features = false, features = [\"ahash\", \"inline-more\"] }\n\nscale = { package = \"parity-scale-codec\", version = \"3\", default-features = false, features = [\"derive\"] }\n\nsp-core = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nsp-io = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nsp-std = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nsp-application-crypto = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nsp-runtime = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nsp-session = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nsp-staking = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\n\nframe-system = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nframe-support = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\n\npallet-session = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\npallet-babe = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\npallet-grandpa = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\n\nserai-primitives = { path = \"../../primitives\", default-features = false }\nvalidator-sets-primitives = { package = \"serai-validator-sets-primitives\", path = \"../primitives\", default-features = false }\n\ncoins-pallet = { package = \"serai-coins-pallet\", path = \"../../coins/pallet\", default-features = false }\ndex-pallet = { package = \"serai-dex-pallet\", path = \"../../dex/pallet\", default-features = false }\n\n[features]\nstd = [\n  \"scale/std\",\n\n  \"sp-core/std\",\n  \"sp-io/std\",\n  \"sp-std/std\",\n  \"sp-application-crypto/std\",\n  \"sp-session/std\",\n  \"sp-runtime/std\",\n  \"sp-staking/std\",\n\n  \"frame-system/std\",\n  \"frame-support/std\",\n\n  \"pallet-session/std\",\n  \"pallet-babe/std\",\n  \"pallet-grandpa/std\",\n\n  \"serai-primitives/std\",\n  \"validator-sets-primitives/std\",\n\n  \"coins-pallet/std\",\n  \"dex-pallet/std\",\n]\n\n# TODO\ntry-runtime = []\n\nruntime-benchmarks = [\n  \"frame-system/runtime-benchmarks\",\n  \"frame-support/runtime-benchmarks\",\n]\n\ndefault = [\"std\"]\n"
  },
  {
    "path": "substrate/validator-sets/pallet/LICENSE",
    "content": "AGPL-3.0-only license\n\nCopyright (c) 2022-2023 Luke Parker\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License Version 3 as\npublished by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n"
  },
  {
    "path": "substrate/validator-sets/pallet/src/lib.rs",
    "content": "#![cfg_attr(not(feature = \"std\"), no_std)]\n\nuse core::marker::PhantomData;\n\nuse scale::{Encode, Decode, DecodeWithMemTracking};\n\nuse sp_std::{vec, vec::Vec};\nuse sp_core::sr25519::{Public, Signature};\nuse sp_application_crypto::RuntimePublic;\nuse sp_session::{GetSessionNumber, GetValidatorCount};\nuse pallet_session::ShouldEndSession;\nuse sp_runtime::{KeyTypeId, ConsensusEngineId, traits::IsMember};\nuse sp_staking::offence::{ReportOffence, Offence, OffenceError};\n\nuse frame_system::{pallet_prelude::*, RawOrigin};\nuse frame_support::{\n  pallet_prelude::*,\n  sp_runtime::SaturatedConversion,\n  traits::{DisabledValidators, KeyOwnerProofSystem, FindAuthor, OneSessionHandler},\n  BoundedVec, WeakBoundedVec, StoragePrefixedMap,\n};\n\nuse serai_primitives::*;\npub use validator_sets_primitives as primitives;\nuse primitives::*;\n\nuse coins_pallet::{Pallet as Coins, AllowMint};\nuse dex_pallet::Pallet as Dex;\n\nuse pallet_babe::{\n  Pallet as Babe, AuthorityId as BabeAuthorityId, EquivocationOffence as BabeEquivocationOffence,\n};\nuse pallet_grandpa::{\n  Pallet as Grandpa, AuthorityId as GrandpaAuthorityId,\n  EquivocationOffence as GrandpaEquivocationOffence,\n};\n\n#[derive(Debug, Encode, Decode, DecodeWithMemTracking, PartialEq, Eq, Clone)]\npub struct MembershipProof<T: pallet::Config>(pub Public, pub PhantomData<T>);\nimpl<T: pallet::Config> GetSessionNumber for MembershipProof<T> {\n  fn session(&self) -> u32 {\n    let current = Pallet::<T>::session(NetworkId::Serai).unwrap().0;\n    if Babe::<T>::is_member(&BabeAuthorityId::from(self.0)) {\n      current\n    } else {\n      // if it isn't in the current session, it should have been in the previous one.\n      current - 1\n    }\n  }\n}\nimpl<T: pallet::Config> GetValidatorCount for MembershipProof<T> {\n  // We only implement and this interface to satisfy trait requirements\n  // Although this might return the wrong count if the offender was in the previous set, we don't\n  // rely on it and Substrate only relies on it to offer economic calculations we also don't rely\n  // on\n  fn validator_count(&self) -> u32 {\n    u32::try_from(Babe::<T>::authorities().len()).unwrap()\n  }\n}\n\n#[allow(\n  deprecated,\n  unreachable_patterns,\n  clippy::let_unit_value,\n  clippy::cast_possible_truncation,\n  clippy::ignored_unit_patterns\n)] // TODO\n#[frame_support::pallet]\npub mod pallet {\n  use super::*;\n\n  #[pallet::config]\n  pub trait Config:\n    frame_system::Config<AccountId = Public>\n    + coins_pallet::Config\n    + dex_pallet::Config\n    + pallet_session::Config\n    + pallet_babe::Config\n    + pallet_grandpa::Config\n  {\n    type ShouldEndSession: ShouldEndSession<BlockNumberFor<Self>>;\n  }\n\n  #[pallet::genesis_config]\n  #[derive(Clone, Debug)]\n  pub struct GenesisConfig<T: Config> {\n    /// Networks to spawn Serai with, and the stake requirement per key share.\n    ///\n    /// Every participant at genesis will automatically be assumed to have this much stake.\n    /// This stake cannot be withdrawn however as there's no actual stake behind it.\n    pub networks: Vec<(NetworkId, Amount)>,\n    /// List of participants to place in the initial validator sets.\n    pub participants: Vec<T::AccountId>,\n  }\n\n  impl<T: Config> Default for GenesisConfig<T> {\n    fn default() -> Self {\n      GenesisConfig { networks: Default::default(), participants: Default::default() }\n    }\n  }\n\n  #[pallet::pallet]\n  pub struct Pallet<T>(PhantomData<T>);\n\n  /// The current session for a network.\n  // Uses Identity for the lookup to avoid a hash of a severely limited fixed key-space.\n  #[pallet::storage]\n  #[pallet::getter(fn session)]\n  pub type CurrentSession<T: Config> = StorageMap<_, Identity, NetworkId, Session, OptionQuery>;\n  impl<T: Config> Pallet<T> {\n    pub fn latest_decided_session(network: NetworkId) -> Option<Session> {\n      let session = Self::session(network);\n      // we already decided about the next session for serai.\n      if network == NetworkId::Serai {\n        return session.map(|s| Session(s.0 + 1));\n      }\n      session\n    }\n  }\n\n  /// The allocation required per key share.\n  // Uses Identity for the lookup to avoid a hash of a severely limited fixed key-space.\n  #[pallet::storage]\n  #[pallet::getter(fn allocation_per_key_share)]\n  pub type AllocationPerKeyShare<T: Config> =\n    StorageMap<_, Identity, NetworkId, Amount, OptionQuery>;\n  /// The validators selected to be in-set (and their key shares), regardless of if removed.\n  ///\n  /// This method allows iterating over all validators and their stake.\n  #[pallet::storage]\n  #[pallet::getter(fn participants_for_latest_decided_set)]\n  pub(crate) type Participants<T: Config> = StorageMap<\n    _,\n    Identity,\n    NetworkId,\n    BoundedVec<(Public, u64), ConstU32<{ MAX_KEY_SHARES_PER_SET }>>,\n    OptionQuery,\n  >;\n  /// The validators selected to be in-set, regardless of if removed.\n  ///\n  /// This method allows quickly checking for presence in-set and looking up a validator's key\n  /// shares.\n  // Uses Identity for NetworkId to avoid a hash of a severely limited fixed key-space.\n  #[pallet::storage]\n  pub(crate) type InSet<T: Config> =\n    StorageDoubleMap<_, Identity, NetworkId, Blake2_128Concat, Public, u64, OptionQuery>;\n\n  impl<T: Config> Pallet<T> {\n    // This exists as InSet, for Serai, is the validators set for the next session, *not* the\n    // current set's validators\n    #[inline]\n    fn in_active_serai_set(account: Public) -> bool {\n      // TODO: is_member is internally O(n). Update Babe to use an O(1) storage lookup?\n      Babe::<T>::is_member(&BabeAuthorityId::from(account))\n    }\n\n    /// Returns true if the account is included in an active set.\n    ///\n    /// This will still include participants which were removed from the DKG.\n    pub fn in_active_set(network: NetworkId, account: Public) -> bool {\n      if network == NetworkId::Serai {\n        Self::in_active_serai_set(account)\n      } else {\n        InSet::<T>::contains_key(network, account)\n      }\n    }\n\n    /// Returns true if the account has been definitively included in an active or upcoming set.\n    ///\n    /// This will still include participants which were removed from the DKG.\n    pub fn in_set(network: NetworkId, account: Public) -> bool {\n      if InSet::<T>::contains_key(network, account) {\n        return true;\n      }\n\n      if network == NetworkId::Serai {\n        return Self::in_active_serai_set(account);\n      }\n\n      false\n    }\n\n    /// Returns true if the account is present in the latest decided set.\n    ///\n    /// This is useful when working with `allocation` and `total_allocated_stake`, which return the\n    /// latest information.\n    pub fn in_latest_decided_set(network: NetworkId, account: Public) -> bool {\n      InSet::<T>::contains_key(network, account)\n    }\n  }\n\n  /// The total stake allocated to this network by the active set of validators.\n  #[pallet::storage]\n  #[pallet::getter(fn total_allocated_stake)]\n  pub type TotalAllocatedStake<T: Config> = StorageMap<_, Identity, NetworkId, Amount, OptionQuery>;\n\n  /// The current amount allocated to a validator set by a validator.\n  #[pallet::storage]\n  #[pallet::getter(fn allocation)]\n  pub type Allocations<T: Config> =\n    StorageMap<_, Blake2_128Concat, (NetworkId, Public), Amount, OptionQuery>;\n  /// A sorted view of the current allocations premised on the underlying DB itself being sorted.\n  /*\n    This uses Identity so we can take advantage of the DB's lexicographic ordering to iterate over\n    the key space from highest-to-lowest allocated.\n\n    This does remove the protection using a hash algorithm here offers against spam attacks (by\n    flooding the DB with layers, increasing lookup time and merkle proof sizes, not that we use\n    merkle proofs as Polkadot does).\n\n    Since amounts are represented with just 8 bytes, only 16 nibbles are presents. This caps the\n    potential depth caused by spam at 16 layers (as the underlying DB operates on nibbles).\n\n    While there is an entire 32-byte public key after this, a Blake hash of the key is inserted\n    after the amount to prevent the key from also being used to cause layer spam.\n\n    There's also a minimum stake requirement, which further reduces the potential for spam.\n  */\n  #[pallet::storage]\n  type SortedAllocations<T: Config> =\n    StorageMap<_, Identity, (NetworkId, [u8; 8], [u8; 16], Public), (), OptionQuery>;\n  impl<T: Config> Pallet<T> {\n    #[inline]\n    fn sorted_allocation_key(\n      network: NetworkId,\n      key: Public,\n      amount: Amount,\n    ) -> (NetworkId, [u8; 8], [u8; 16], Public) {\n      let amount = reverse_lexicographic_order(amount.0.to_be_bytes());\n      let hash = sp_io::hashing::blake2_128(&(network, amount, key).encode());\n      (network, amount, hash, key)\n    }\n    fn recover_amount_from_sorted_allocation_key(key: &[u8]) -> Amount {\n      let distance_from_end = 8 + 16 + 32;\n      let start_pos = key.len() - distance_from_end;\n      let mut raw: [u8; 8] = key[start_pos .. (start_pos + 8)].try_into().unwrap();\n      for byte in &mut raw {\n        *byte = !*byte;\n      }\n      Amount(u64::from_be_bytes(raw))\n    }\n    fn recover_key_from_sorted_allocation_key(key: &[u8]) -> Public {\n      let key: [u8; 32] = key[(key.len() - 32) ..].try_into().unwrap();\n      key.into()\n    }\n    // Returns if this validator already had an allocation set.\n    fn set_allocation(network: NetworkId, key: Public, amount: Amount) -> bool {\n      let prior = Allocations::<T>::take((network, key));\n      if let Some(amount) = prior {\n        SortedAllocations::<T>::remove(Self::sorted_allocation_key(network, key, amount));\n      }\n      if amount.0 != 0 {\n        Allocations::<T>::set((network, key), Some(amount));\n        SortedAllocations::<T>::set(Self::sorted_allocation_key(network, key, amount), Some(()));\n      }\n      prior.is_some()\n    }\n  }\n\n  // Doesn't use PrefixIterator as we need to yield the keys *and* values\n  // PrefixIterator only yields the values\n  struct SortedAllocationsIter<T: Config> {\n    _t: PhantomData<T>,\n    prefix: Vec<u8>,\n    last: Vec<u8>,\n    allocation_per_key_share: Amount,\n  }\n  impl<T: Config> SortedAllocationsIter<T> {\n    fn new(network: NetworkId) -> Self {\n      let mut prefix = SortedAllocations::<T>::final_prefix().to_vec();\n      prefix.extend(&network.encode());\n      Self {\n        _t: PhantomData,\n        prefix: prefix.clone(),\n        last: prefix,\n        allocation_per_key_share: Pallet::<T>::allocation_per_key_share(network).expect(\n          \"SortedAllocationsIter iterating over a network without a set allocation per key share\",\n        ),\n      }\n    }\n  }\n  impl<T: Config> Iterator for SortedAllocationsIter<T> {\n    type Item = (Public, Amount);\n    fn next(&mut self) -> Option<Self::Item> {\n      let next = sp_io::storage::next_key(&self.last)?;\n      if !next.starts_with(&self.prefix) {\n        None?;\n      }\n      let key = Pallet::<T>::recover_key_from_sorted_allocation_key(&next);\n      let amount = Pallet::<T>::recover_amount_from_sorted_allocation_key(&next);\n\n      // We may have validators present, with less than the minimum allocation, due to block\n      // rewards\n      if amount.0 < self.allocation_per_key_share.0 {\n        None?;\n      }\n\n      self.last = next;\n      Some((key, amount))\n    }\n  }\n\n  /// Pending deallocations, keyed by the Session they become unlocked on.\n  #[pallet::storage]\n  type PendingDeallocations<T: Config> = StorageDoubleMap<\n    _,\n    Blake2_128Concat,\n    (NetworkId, Public),\n    Identity,\n    Session,\n    Amount,\n    OptionQuery,\n  >;\n\n  /// The generated key pair for a given validator set instance.\n  #[pallet::storage]\n  #[pallet::getter(fn keys)]\n  pub type Keys<T: Config> =\n    StorageMap<_, Twox64Concat, ExternalValidatorSet, KeyPair, OptionQuery>;\n\n  /// The key for validator sets which can (and still need to) publish their slash reports.\n  #[pallet::storage]\n  pub type PendingSlashReport<T: Config> =\n    StorageMap<_, Identity, ExternalNetworkId, Public, OptionQuery>;\n\n  /// Disabled validators.\n  #[pallet::storage]\n  pub type SeraiDisabledIndices<T: Config> = StorageMap<_, Identity, u32, Public, OptionQuery>;\n\n  /// Mapping from session to its starting block number.\n  #[pallet::storage]\n  #[pallet::getter(fn session_begin_block)]\n  pub type SessionBeginBlock<T: Config> =\n    StorageDoubleMap<_, Identity, NetworkId, Identity, Session, u64, ValueQuery>;\n\n  #[pallet::event]\n  #[pallet::generate_deposit(pub(super) fn deposit_event)]\n  pub enum Event<T: Config> {\n    NewSet {\n      set: ValidatorSet,\n    },\n    ParticipantRemoved {\n      set: ValidatorSet,\n      removed: T::AccountId,\n    },\n    KeyGen {\n      set: ExternalValidatorSet,\n      key_pair: KeyPair,\n    },\n    AcceptedHandover {\n      set: ValidatorSet,\n    },\n    SetRetired {\n      set: ValidatorSet,\n    },\n    AllocationIncreased {\n      validator: T::AccountId,\n      network: NetworkId,\n      amount: Amount,\n    },\n    AllocationDecreased {\n      validator: T::AccountId,\n      network: NetworkId,\n      amount: Amount,\n      delayed_until: Option<Session>,\n    },\n    DeallocationClaimed {\n      validator: T::AccountId,\n      network: NetworkId,\n      session: Session,\n    },\n  }\n\n  impl<T: Config> Pallet<T> {\n    fn new_set(network: NetworkId) {\n      // TODO: prevent new set if it doesn't have enough stake for economic security.\n\n      // Update CurrentSession\n      let session = {\n        let new_session =\n          CurrentSession::<T>::get(network).map_or(Session(0), |session| Session(session.0 + 1));\n        CurrentSession::<T>::set(network, Some(new_session));\n        new_session\n      };\n\n      // Clear the current InSet\n      assert_eq!(\n        InSet::<T>::clear_prefix(network, MAX_KEY_SHARES_PER_SET, None).maybe_cursor,\n        None\n      );\n\n      let allocation_per_key_share = Self::allocation_per_key_share(network).unwrap().0;\n\n      let mut participants = vec![];\n      {\n        let mut iter = SortedAllocationsIter::<T>::new(network);\n        let mut key_shares = 0;\n        while key_shares < u64::from(MAX_KEY_SHARES_PER_SET) {\n          let Some((key, amount)) = iter.next() else { break };\n\n          let these_key_shares =\n            (amount.0 / allocation_per_key_share).min(u64::from(MAX_KEY_SHARES_PER_SET));\n          participants.push((key, these_key_shares));\n\n          key_shares += these_key_shares;\n        }\n        amortize_excess_key_shares(&mut participants);\n      }\n\n      for (key, shares) in &participants {\n        InSet::<T>::set(network, key, Some(*shares));\n      }\n\n      let set = ValidatorSet { network, session };\n      Pallet::<T>::deposit_event(Event::NewSet { set });\n\n      Participants::<T>::set(network, Some(participants.try_into().unwrap()));\n      SessionBeginBlock::<T>::set(\n        network,\n        session,\n        <frame_system::Pallet<T>>::block_number().saturated_into::<u64>(),\n      );\n    }\n  }\n\n  #[pallet::error]\n  pub enum Error<T> {\n    /// Validator Set doesn't exist.\n    NonExistentValidatorSet,\n    /// Not enough allocation to obtain a key share in the set.\n    InsufficientAllocation,\n    /// Trying to deallocate more than allocated.\n    NotEnoughAllocated,\n    /// Allocation would cause the validator set to no longer achieve fault tolerance.\n    AllocationWouldRemoveFaultTolerance,\n    /// Allocation would cause the validator set to never be able to achieve fault tolerance.\n    AllocationWouldPreventFaultTolerance,\n    /// Deallocation would remove the participant from the set, despite the validator not\n    /// specifying so.\n    DeallocationWouldRemoveParticipant,\n    /// Deallocation would cause the validator set to no longer achieve fault tolerance.\n    DeallocationWouldRemoveFaultTolerance,\n    /// Deallocation to be claimed doesn't exist.\n    NonExistentDeallocation,\n    /// Validator Set already generated keys.\n    AlreadyGeneratedKeys,\n    /// An invalid MuSig signature was provided.\n    BadSignature,\n    /// Validator wasn't registered or active.\n    NonExistentValidator,\n    /// Deallocation would take the stake below what is required.\n    DeallocationWouldRemoveEconomicSecurity,\n  }\n\n  #[pallet::hooks]\n  impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {\n    fn on_initialize(n: BlockNumberFor<T>) -> Weight {\n      if T::ShouldEndSession::should_end_session(n) {\n        Self::rotate_session();\n        // TODO: set the proper weights\n        T::BlockWeights::get().max_block\n      } else {\n        Weight::zero()\n      }\n    }\n  }\n\n  #[pallet::genesis_build]\n  impl<T: Config> BuildGenesisConfig for GenesisConfig<T> {\n    fn build(&self) {\n      for (id, stake) in self.networks.clone() {\n        AllocationPerKeyShare::<T>::set(id, Some(stake));\n        for participant in self.participants.clone() {\n          if Pallet::<T>::set_allocation(id, participant, stake) {\n            panic!(\"participants contained duplicates\");\n          }\n        }\n        Pallet::<T>::new_set(id);\n      }\n    }\n  }\n\n  impl<T: Config> Pallet<T> {\n    fn account() -> T::AccountId {\n      system_address(b\"ValidatorSets\").into()\n    }\n\n    // is_bft returns if the network is able to survive any single node becoming byzantine.\n    fn is_bft(network: NetworkId) -> bool {\n      let allocation_per_key_share = AllocationPerKeyShare::<T>::get(network).unwrap().0;\n\n      let mut validators_len = 0;\n      let mut top = None;\n      let mut key_shares = 0;\n      for (_, amount) in SortedAllocationsIter::<T>::new(network) {\n        validators_len += 1;\n\n        key_shares += amount.0 / allocation_per_key_share;\n        if top.is_none() {\n          top = Some(key_shares);\n        }\n\n        if key_shares > u64::from(MAX_KEY_SHARES_PER_SET) {\n          break;\n        }\n      }\n\n      let Some(top) = top else { return false };\n\n      // key_shares may be over MAX_KEY_SHARES_PER_SET, which will cause a round robin reduction of\n      // each validator's key shares until their sum is MAX_KEY_SHARES_PER_SET\n      // post_amortization_key_shares_for_top_validator yields what the top validator's key shares\n      // would be after such a reduction, letting us evaluate this correctly\n      let top = post_amortization_key_shares_for_top_validator(validators_len, top, key_shares);\n      (top * 3) < key_shares.min(MAX_KEY_SHARES_PER_SET.into())\n    }\n\n    fn increase_allocation(\n      network: NetworkId,\n      account: T::AccountId,\n      amount: Amount,\n      block_reward: bool,\n    ) -> DispatchResult {\n      let old_allocation = Self::allocation((network, account)).unwrap_or(Amount(0)).0;\n      let new_allocation = old_allocation + amount.0;\n      let allocation_per_key_share = Self::allocation_per_key_share(network).unwrap().0;\n      // If this is a block reward, we always allow it to be allocated\n      if (new_allocation < allocation_per_key_share) && (!block_reward) {\n        Err(Error::<T>::InsufficientAllocation)?;\n      }\n\n      let increased_key_shares =\n        (old_allocation / allocation_per_key_share) < (new_allocation / allocation_per_key_share);\n\n      // Check if the net exhibited the ability to handle any single node becoming byzantine\n      let mut was_bft = None;\n      if increased_key_shares {\n        was_bft = Some(Self::is_bft(network));\n      }\n\n      // Increase the allocation now\n      Self::set_allocation(network, account, Amount(new_allocation));\n      Self::deposit_event(Event::AllocationIncreased { validator: account, network, amount });\n\n      // Error if the net no longer can handle any single node becoming byzantine\n      if let Some(was_bft) = was_bft {\n        if was_bft && (!Self::is_bft(network)) {\n          Err(Error::<T>::AllocationWouldRemoveFaultTolerance)?;\n        }\n      }\n\n      // The above is_bft calls are only used to check a BFT net doesn't become non-BFT\n      // Check here if this call would prevent a non-BFT net from *ever* becoming BFT\n      if (new_allocation / allocation_per_key_share) >= (MAX_KEY_SHARES_PER_SET / 3).into() {\n        Err(Error::<T>::AllocationWouldPreventFaultTolerance)?;\n      }\n\n      // If they're in the current set, and the current set has completed its handover (so its\n      // currently being tracked by TotalAllocatedStake), update the TotalAllocatedStake\n      if let Some(session) = Self::session(network) {\n        if InSet::<T>::contains_key(network, account) && Self::handover_completed(network, session)\n        {\n          TotalAllocatedStake::<T>::set(\n            network,\n            Some(Amount(TotalAllocatedStake::<T>::get(network).unwrap_or(Amount(0)).0 + amount.0)),\n          );\n        }\n      }\n\n      Ok(())\n    }\n\n    fn session_to_unlock_on_for_current_set(network: NetworkId) -> Option<Session> {\n      let mut to_unlock_on = Self::session(network)?;\n      // Move to the next session, as deallocating currently in-use stake is obviously invalid\n      to_unlock_on.0 += 1;\n      if network == NetworkId::Serai {\n        // Since the next Serai set will already have been decided, we can only deallocate one\n        // session later\n        to_unlock_on.0 += 1;\n      }\n      // Increase the session by one, creating a cooldown period\n      to_unlock_on.0 += 1;\n      Some(to_unlock_on)\n    }\n\n    /// Decreases a validator's allocation to a set.\n    ///\n    /// Errors if the capacity provided by this allocation is in use.\n    ///\n    /// Errors if a partial decrease of allocation which puts the remaining allocation below the\n    /// minimum requirement.\n    ///\n    /// The capacity prior provided by the allocation is immediately removed, in order to ensure it\n    /// doesn't become used (preventing deallocation).\n    ///\n    /// Returns if the amount is immediately eligible for deallocation.\n    fn decrease_allocation(\n      network: NetworkId,\n      account: T::AccountId,\n      amount: Amount,\n    ) -> Result<bool, DispatchError> {\n      // Check it's safe to decrease this set's stake by this amount\n      if let NetworkId::External(n) = network {\n        let new_total_staked = Self::total_allocated_stake(NetworkId::from(n))\n          .unwrap()\n          .0\n          .checked_sub(amount.0)\n          .ok_or(Error::<T>::NotEnoughAllocated)?;\n        let required_stake = Self::required_stake_for_network(n);\n        if new_total_staked < required_stake {\n          Err(Error::<T>::DeallocationWouldRemoveEconomicSecurity)?;\n        }\n      }\n\n      let old_allocation =\n        Self::allocation((network, account)).ok_or(Error::<T>::NonExistentValidator)?.0;\n      let new_allocation =\n        old_allocation.checked_sub(amount.0).ok_or(Error::<T>::NotEnoughAllocated)?;\n\n      // If we're not removing the entire allocation, yet the allocation is no longer at or above\n      // the threshold for a key share, error\n      let allocation_per_key_share = Self::allocation_per_key_share(network).unwrap().0;\n      if (new_allocation != 0) && (new_allocation < allocation_per_key_share) {\n        Err(Error::<T>::DeallocationWouldRemoveParticipant)?;\n      }\n\n      let decreased_key_shares =\n        (old_allocation / allocation_per_key_share) > (new_allocation / allocation_per_key_share);\n\n      // If this decreases the validator's key shares, error if the new set is unable to handle\n      // byzantine faults\n      let mut was_bft = None;\n      if decreased_key_shares {\n        was_bft = Some(Self::is_bft(network));\n      }\n\n      // Decrease the allocation now\n      // Since we don't also update TotalAllocatedStake here, TotalAllocatedStake may be greater\n      // than the sum of all allocations, according to the Allocations StorageMap\n      // This is intentional as this allocation has only been queued for deallocation at this time\n      Self::set_allocation(network, account, Amount(new_allocation));\n\n      if let Some(was_bft) = was_bft {\n        if was_bft && (!Self::is_bft(network)) {\n          Err(Error::<T>::DeallocationWouldRemoveFaultTolerance)?;\n        }\n      }\n\n      // If we're not in-set, allow immediate deallocation\n      if !Self::in_set(network, account) {\n        Self::deposit_event(Event::AllocationDecreased {\n          validator: account,\n          network,\n          amount,\n          delayed_until: None,\n        });\n        return Ok(true);\n      }\n\n      // Set it to PendingDeallocations, letting it be released upon a future session\n      // This unwrap should be fine as this account is active, meaning a session has occurred\n      let to_unlock_on = Self::session_to_unlock_on_for_current_set(network).unwrap();\n      let existing =\n        PendingDeallocations::<T>::get((network, account), to_unlock_on).unwrap_or(Amount(0));\n      PendingDeallocations::<T>::set(\n        (network, account),\n        to_unlock_on,\n        Some(Amount(existing.0 + amount.0)),\n      );\n\n      Self::deposit_event(Event::AllocationDecreased {\n        validator: account,\n        network,\n        amount,\n        delayed_until: Some(to_unlock_on),\n      });\n\n      Ok(false)\n    }\n\n    // Checks if this session has completed the handover from the prior session.\n    fn handover_completed(network: NetworkId, session: Session) -> bool {\n      let Some(current_session) = Self::session(network) else { return false };\n\n      // If the session we've been queried about is old, it must have completed its handover\n      if current_session.0 > session.0 {\n        return true;\n      }\n      // If the session we've been queried about has yet to start, it can't have completed its\n      // handover\n      if current_session.0 < session.0 {\n        return false;\n      }\n\n      let NetworkId::External(n) = network else {\n        // Handover is automatically complete for Serai as it doesn't have a handover protocol\n        return true;\n      };\n\n      // The current session must have set keys for its handover to be completed\n      if !Keys::<T>::contains_key(ExternalValidatorSet { network: n, session }) {\n        return false;\n      }\n\n      // This must be the first session (which has set keys) OR the prior session must have been\n      // retired (signified by its keys no longer being present)\n      (session.0 == 0) ||\n        (!Keys::<T>::contains_key(ExternalValidatorSet {\n          network: n,\n          session: Session(session.0 - 1),\n        }))\n    }\n\n    fn new_session() {\n      for network in serai_primitives::NETWORKS {\n        // If this network hasn't started sessions yet, don't start one now\n        let Some(current_session) = Self::session(network) else { continue };\n        // Only spawn a new set if:\n        // - This is Serai, as we need to rotate Serai upon a new session (per Babe)\n        // - The current set was actually established with a completed handover protocol\n        if (network == NetworkId::Serai) || Self::handover_completed(network, current_session) {\n          Pallet::<T>::new_set(network);\n          // let the Dex know session is rotated.\n          Dex::<T>::on_new_session(network);\n        }\n      }\n    }\n\n    fn set_total_allocated_stake(network: NetworkId) {\n      let participants = Participants::<T>::get(network)\n        .expect(\"setting TotalAllocatedStake for a network without participants\");\n      let total_stake = participants.iter().fold(0, |acc, (addr, _)| {\n        acc + Allocations::<T>::get((network, addr)).unwrap_or(Amount(0)).0\n      });\n      TotalAllocatedStake::<T>::set(network, Some(Amount(total_stake)));\n    }\n\n    // TODO: This is called retire_set, yet just starts retiring the set\n    // Update the nomenclature within this function\n    pub fn retire_set(set: ValidatorSet) {\n      // Serai doesn't set keys and network slashes are handled by BABE/GRANDPA\n      if let NetworkId::External(n) = set.network {\n        // If the prior prior set didn't report, emit they're retired now\n        if PendingSlashReport::<T>::get(n).is_some() {\n          Self::deposit_event(Event::SetRetired {\n            set: ValidatorSet { network: set.network, session: Session(set.session.0 - 1) },\n          });\n        }\n\n        // This overwrites the prior value as the prior to-report set's stake presumably just\n        // unlocked, making their report unenforceable\n        let keys =\n          Keys::<T>::take(ExternalValidatorSet { network: n, session: set.session }).unwrap();\n        PendingSlashReport::<T>::set(n, Some(keys.0));\n      } else {\n        // emit the event for serai network\n        Self::deposit_event(Event::SetRetired { set });\n      }\n\n      // We're retiring this set because the set after it accepted the handover\n      Self::deposit_event(Event::AcceptedHandover {\n        set: ValidatorSet { network: set.network, session: Session(set.session.0 + 1) },\n      });\n\n      // Update the total allocated stake to be for the current set\n      Self::set_total_allocated_stake(set.network);\n    }\n\n    /// Take the amount deallocatable.\n    ///\n    /// `session` refers to the Session the stake becomes deallocatable on.\n    fn take_deallocatable_amount(\n      network: NetworkId,\n      session: Session,\n      key: Public,\n    ) -> Option<Amount> {\n      // Check this Session has properly started, completing the handover from the prior session.\n      if !Self::handover_completed(network, session) {\n        return None;\n      }\n      PendingDeallocations::<T>::take((network, key), session)\n    }\n\n    fn rotate_session() {\n      // next serai validators that is in the queue.\n      let now_validators = Participants::<T>::get(NetworkId::Serai)\n        .expect(\"no Serai participants upon rotate_session\");\n      let prior_serai_session = Self::session(NetworkId::Serai).unwrap();\n\n      // TODO: T::SessionHandler::on_before_session_ending() was here.\n      // end the current serai session.\n      Self::retire_set(ValidatorSet { network: NetworkId::Serai, session: prior_serai_session });\n\n      // make a new session and get the next validator set.\n      Self::new_session();\n\n      // Update Babe and Grandpa\n      let session = prior_serai_session.0 + 1;\n      let next_validators = Participants::<T>::get(NetworkId::Serai).unwrap();\n      Babe::<T>::enact_epoch_change(\n        WeakBoundedVec::force_from(\n          now_validators.iter().copied().map(|(id, w)| (BabeAuthorityId::from(id), w)).collect(),\n          None,\n        ),\n        WeakBoundedVec::force_from(\n          next_validators.iter().copied().map(|(id, w)| (BabeAuthorityId::from(id), w)).collect(),\n          None,\n        ),\n        Some(session),\n      );\n      fn grandpa_map(i: &(Public, u64)) -> (&Public, GrandpaAuthorityId) {\n        (&i.0, i.0.into())\n      }\n      Grandpa::<T>::on_new_session(\n        true,\n        now_validators.iter().map(grandpa_map),\n        next_validators.iter().map(grandpa_map),\n      );\n\n      // Clear SeraiDisabledIndices, only preserving keys still present in the new session\n      // First drain so we don't mutate as we iterate\n      let mut disabled = vec![];\n      for (_, validator) in SeraiDisabledIndices::<T>::drain() {\n        disabled.push(validator);\n      }\n      for disabled in disabled {\n        Self::disable_serai_validator(disabled);\n      }\n    }\n\n    /// Returns the required stake in terms SRI for a given `Balance`.\n    pub fn required_stake(balance: &ExternalBalance) -> SubstrateAmount {\n      use dex_pallet::HigherPrecisionBalance;\n\n      // This is inclusive to an increase in accuracy\n      let sri_per_coin = Dex::<T>::security_oracle_value(balance.coin).unwrap_or(Amount(0));\n\n      // See dex-pallet for the reasoning on these\n      let coin_decimals = balance.coin.decimals().max(5);\n      let accuracy_increase = HigherPrecisionBalance::from(SubstrateAmount::pow(10, coin_decimals));\n\n      let total_coin_value = u64::try_from(\n        HigherPrecisionBalance::from(balance.amount.0) *\n          HigherPrecisionBalance::from(sri_per_coin.0) /\n          accuracy_increase,\n      )\n      .unwrap_or(u64::MAX);\n\n      // required stake formula (COIN_VALUE * 1.5) + margin(20%)\n      let required_stake = total_coin_value.saturating_mul(3).saturating_div(2);\n      required_stake.saturating_add(total_coin_value.saturating_div(5))\n    }\n\n    /// Returns the current total required stake for a given `network`.\n    pub fn required_stake_for_network(network: ExternalNetworkId) -> SubstrateAmount {\n      let mut total_required = 0;\n      for coin in network.coins() {\n        let supply = Coins::<T>::supply(Coin::from(coin));\n        total_required += Self::required_stake(&ExternalBalance { coin, amount: Amount(supply) });\n      }\n      total_required\n    }\n\n    pub fn distribute_block_rewards(\n      network: NetworkId,\n      account: T::AccountId,\n      amount: Amount,\n    ) -> DispatchResult {\n      // TODO: Should this call be part of the `increase_allocation` since we have to have it\n      // before each call to it?\n      Coins::<T>::transfer_internal(\n        account,\n        Self::account(),\n        Balance { coin: Coin::Serai, amount },\n      )?;\n      Self::increase_allocation(network, account, amount, true)\n    }\n\n    fn can_slash_serai_validator(validator: Public) -> bool {\n      // Checks if they're active or actively deallocating (letting us still slash them)\n      // We could check if they're upcoming/still allocating, yet that'd mean the equivocation is\n      // invalid (as they aren't actively signing anything) or severely dated\n      // It's not an edge case worth being comprehensive to due to the complexity of being so\n      Babe::<T>::is_member(&BabeAuthorityId::from(validator)) ||\n        PendingDeallocations::<T>::iter_prefix((NetworkId::Serai, validator)).next().is_some()\n    }\n\n    fn slash_serai_validator(validator: Public) {\n      let network = NetworkId::Serai;\n\n      let mut allocation = Self::allocation((network, validator)).unwrap_or(Amount(0));\n      // reduce the current allocation to 0.\n      Self::set_allocation(network, validator, Amount(0));\n\n      // Take the pending deallocation from the current session\n      allocation.0 += PendingDeallocations::<T>::take(\n        (network, validator),\n        Self::session_to_unlock_on_for_current_set(network).unwrap(),\n      )\n      .unwrap_or(Amount(0))\n      .0;\n\n      // Reduce the TotalAllocatedStake for the network, if in set\n      // TotalAllocatedStake is the sum of allocations and pending deallocations from the current\n      // session, since pending deallocations can still be slashed and therefore still contribute\n      // to economic security, hence the allocation calculations above being above and the ones\n      // below being below\n      if InSet::<T>::contains_key(NetworkId::Serai, validator) {\n        let current_staked = Self::total_allocated_stake(network).unwrap();\n        TotalAllocatedStake::<T>::set(network, Some(current_staked - allocation));\n      }\n\n      // Clear any other pending deallocations.\n      for (_, pending) in PendingDeallocations::<T>::drain_prefix((network, validator)) {\n        allocation.0 += pending.0;\n      }\n\n      // burn the allocation from the stake account\n      Coins::<T>::burn(\n        RawOrigin::Signed(Self::account()).into(),\n        Balance { coin: Coin::Serai, amount: allocation },\n      )\n      .unwrap();\n    }\n\n    /// Disable a Serai validator, preventing them from further authoring blocks.\n    ///\n    /// Returns true if the validator-to-disable was actually a validator.\n    /// Returns false if they weren't.\n    fn disable_serai_validator(validator: Public) -> bool {\n      if let Some(index) =\n        Babe::<T>::authorities().into_iter().position(|(id, _)| id.into_inner() == validator)\n      {\n        SeraiDisabledIndices::<T>::set(u32::try_from(index).unwrap(), Some(validator));\n\n        let session = Self::session(NetworkId::Serai).unwrap();\n        Self::deposit_event(Event::ParticipantRemoved {\n          set: ValidatorSet { network: NetworkId::Serai, session },\n          removed: validator,\n        });\n\n        true\n      } else {\n        false\n      }\n    }\n  }\n\n  #[pallet::call]\n  impl<T: Config> Pallet<T> {\n    #[pallet::call_index(0)]\n    #[pallet::weight(0)] // TODO\n    pub fn set_keys(\n      origin: OriginFor<T>,\n      network: ExternalNetworkId,\n      removed_participants: BoundedVec<Public, ConstU32<{ MAX_KEY_SHARES_PER_SET / 3 }>>,\n      key_pair: KeyPair,\n      signature: Signature,\n    ) -> DispatchResult {\n      ensure_none(origin)?;\n\n      // signature isn't checked as this is an unsigned transaction, and validate_unsigned\n      // (called by pre_dispatch) checks it\n      let _ = signature;\n\n      let session = Self::session(NetworkId::from(network)).unwrap();\n      let set = ExternalValidatorSet { network, session };\n\n      Keys::<T>::set(set, Some(key_pair.clone()));\n\n      // If this is the first ever set for this network, set TotalAllocatedStake now\n      // We generally set TotalAllocatedStake when the prior set retires, and the new set is fully\n      // active and liable. Since this is the first set, there is no prior set to wait to retire\n      if session == Session(0) {\n        Self::set_total_allocated_stake(NetworkId::from(network));\n      }\n\n      // This does not remove from TotalAllocatedStake or InSet in order to:\n      // 1) Not decrease the stake present in this set. This means removed participants are\n      //    still liable for the economic security of the external network. This prevents\n      //    a decided set, which is economically secure, from falling below the threshold.\n      // 2) Not allow parties removed to immediately deallocate, per commentary on deallocation\n      //    scheduling (https://github.com/serai-dex/serai/issues/394).\n      for removed in removed_participants {\n        Self::deposit_event(Event::ParticipantRemoved { set: set.into(), removed });\n      }\n      Self::deposit_event(Event::KeyGen { set, key_pair });\n\n      Ok(())\n    }\n\n    #[pallet::call_index(1)]\n    #[pallet::weight(0)] // TODO\n    pub fn report_slashes(\n      origin: OriginFor<T>,\n      network: ExternalNetworkId,\n      slashes: BoundedVec<(Public, u32), ConstU32<{ MAX_KEY_SHARES_PER_SET / 3 }>>,\n      signature: Signature,\n    ) -> DispatchResult {\n      ensure_none(origin)?;\n\n      // signature isn't checked as this is an unsigned transaction, and validate_unsigned\n      // (called by pre_dispatch) checks it\n      let _ = signature;\n\n      // TODO: Handle slashes\n      let _ = slashes;\n\n      // Emit set retireed\n      Pallet::<T>::deposit_event(Event::SetRetired {\n        set: ValidatorSet {\n          network: network.into(),\n          session: Session(Self::session(NetworkId::from(network)).unwrap().0 - 1),\n        },\n      });\n\n      Ok(())\n    }\n\n    #[pallet::call_index(2)]\n    #[pallet::weight(0)] // TODO\n    pub fn allocate(origin: OriginFor<T>, network: NetworkId, amount: Amount) -> DispatchResult {\n      let validator = ensure_signed(origin)?;\n      Coins::<T>::transfer_internal(\n        validator,\n        Self::account(),\n        Balance { coin: Coin::Serai, amount },\n      )?;\n      Self::increase_allocation(network, validator, amount, false)\n    }\n\n    #[pallet::call_index(3)]\n    #[pallet::weight(0)] // TODO\n    pub fn deallocate(origin: OriginFor<T>, network: NetworkId, amount: Amount) -> DispatchResult {\n      let account = ensure_signed(origin)?;\n\n      let can_immediately_deallocate = Self::decrease_allocation(network, account, amount)?;\n      if can_immediately_deallocate {\n        Coins::<T>::transfer_internal(\n          Self::account(),\n          account,\n          Balance { coin: Coin::Serai, amount },\n        )?;\n      }\n\n      Ok(())\n    }\n\n    #[pallet::call_index(4)]\n    #[pallet::weight((0, DispatchClass::Operational))] // TODO\n    pub fn claim_deallocation(\n      origin: OriginFor<T>,\n      network: NetworkId,\n      session: Session,\n    ) -> DispatchResult {\n      let account = ensure_signed(origin)?;\n      let Some(amount) = Self::take_deallocatable_amount(network, session, account) else {\n        Err(Error::<T>::NonExistentDeallocation)?\n      };\n      Coins::<T>::transfer_internal(\n        Self::account(),\n        account,\n        Balance { coin: Coin::Serai, amount },\n      )?;\n      Self::deposit_event(Event::DeallocationClaimed { validator: account, network, session });\n      Ok(())\n    }\n  }\n\n  #[pallet::validate_unsigned]\n  impl<T: Config> ValidateUnsigned for Pallet<T> {\n    type Call = Call<T>;\n\n    fn validate_unsigned(_: TransactionSource, call: &Self::Call) -> TransactionValidity {\n      // Match to be exhaustive\n      match call {\n        Call::set_keys { network, ref removed_participants, ref key_pair, ref signature } => {\n          let network = *network;\n\n          // Confirm this set has a session\n          let Some(current_session) = Self::session(NetworkId::from(network)) else {\n            Err(InvalidTransaction::Custom(1))?\n          };\n\n          let set = ExternalValidatorSet { network, session: current_session };\n\n          // Confirm it has yet to set keys\n          if Keys::<T>::get(set).is_some() {\n            Err(InvalidTransaction::Stale)?;\n          }\n\n          // This is a needed precondition as this uses storage variables for the latest decided\n          // session on this assumption\n          assert_eq!(Pallet::<T>::latest_decided_session(network.into()), Some(current_session));\n\n          // This does not slash the removed participants as that'll be done at the end of the\n          // set's lifetime\n          let mut removed = hashbrown::HashSet::new();\n          for participant in removed_participants {\n            // Confirm this wasn't duplicated\n            if removed.contains(&participant.0) {\n              Err(InvalidTransaction::Custom(2))?;\n            }\n            removed.insert(participant.0);\n          }\n\n          let participants = Participants::<T>::get(NetworkId::from(network))\n            .expect(\"session existed without participants\");\n\n          let mut all_key_shares = 0;\n          let mut signers = vec![];\n          let mut signing_key_shares = 0;\n          for participant in participants {\n            let participant = participant.0;\n            let shares = InSet::<T>::get(NetworkId::from(network), participant)\n              .expect(\"participant from Participants wasn't InSet\");\n            all_key_shares += shares;\n\n            if removed.contains(&participant.0) {\n              continue;\n            }\n\n            signers.push(participant);\n            signing_key_shares += shares;\n          }\n\n          {\n            let f = all_key_shares - signing_key_shares;\n            if signing_key_shares < ((2 * f) + 1) {\n              Err(InvalidTransaction::Custom(3))?;\n            }\n          }\n\n          // Verify the signature with the MuSig key of the signers\n          // We theoretically don't need set_keys_message to bind to removed_participants, as the\n          // key we're signing with effectively already does so, yet there's no reason not to\n          if !musig_key(set.into(), &signers)\n            .verify(&set_keys_message(&set, removed_participants, key_pair), signature)\n          {\n            Err(InvalidTransaction::BadProof)?;\n          }\n\n          ValidTransaction::with_tag_prefix(\"ValidatorSets\")\n            .and_provides((0, set))\n            .longevity(u64::MAX)\n            .propagate(true)\n            .build()\n        }\n        Call::report_slashes { network, ref slashes, ref signature } => {\n          let network = *network;\n          let Some(key) = PendingSlashReport::<T>::take(network) else {\n            // Assumed already published\n            Err(InvalidTransaction::Stale)?\n          };\n\n          // There must have been a previous session is PendingSlashReport is populated\n          let set = ExternalValidatorSet {\n            network,\n            session: Session(Self::session(NetworkId::from(network)).unwrap().0 - 1),\n          };\n          if !key.verify(&report_slashes_message(&set, slashes), signature) {\n            Err(InvalidTransaction::BadProof)?;\n          }\n\n          ValidTransaction::with_tag_prefix(\"ValidatorSets\")\n            .and_provides((1, set))\n            .longevity(MAX_KEY_SHARES_PER_SET.into())\n            .propagate(true)\n            .build()\n        }\n        Call::allocate { .. } | Call::deallocate { .. } | Call::claim_deallocation { .. } => {\n          Err(InvalidTransaction::Call)?\n        }\n        Call::__Ignore(_, _) => unreachable!(),\n      }\n    }\n\n    // Explicitly provide a pre-dispatch which calls validate_unsigned\n    fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> {\n      Self::validate_unsigned(TransactionSource::InBlock, call).map(|_| ()).map_err(Into::into)\n    }\n  }\n\n  impl<T: Config> AllowMint for Pallet<T> {\n    fn is_allowed(balance: &ExternalBalance) -> bool {\n      // get the required stake\n      let current_required = Self::required_stake_for_network(balance.coin.network());\n      let new_required = current_required + Self::required_stake(balance);\n\n      // get the total stake for the network & compare.\n      let staked =\n        Self::total_allocated_stake(NetworkId::from(balance.coin.network())).unwrap_or(Amount(0));\n      staked.0 >= new_required\n    }\n  }\n\n  #[rustfmt::skip]\n  impl<T: Config, V: Into<Public> + From<Public>> KeyOwnerProofSystem<(KeyTypeId, V)> for Pallet<T> {\n    type Proof = MembershipProof<T>;\n    type IdentificationTuple = Public;\n\n    fn prove(key: (KeyTypeId, V)) -> Option<Self::Proof> {\n      Some(MembershipProof(key.1.into(), PhantomData))\n    }\n\n    fn check_proof(key: (KeyTypeId, V), proof: Self::Proof) -> Option<Self::IdentificationTuple> {\n      let validator = key.1.into();\n\n      // check the offender and the proof offender are the same.\n      if validator != proof.0 {\n        return None;\n      }\n\n      // check validator is valid\n      if !Self::can_slash_serai_validator(validator) {\n        return None;\n      }\n\n      Some(validator)\n    }\n  }\n\n  impl<T: Config> ReportOffence<Public, Public, BabeEquivocationOffence<Public>> for Pallet<T> {\n    /// Report an `offence` and reward given `reporters`.\n    fn report_offence(\n      _: Vec<Public>,\n      offence: BabeEquivocationOffence<Public>,\n    ) -> Result<(), OffenceError> {\n      // slash the offender\n      let offender = offence.offender;\n      Self::slash_serai_validator(offender);\n\n      // disable it\n      Self::disable_serai_validator(offender);\n\n      Ok(())\n    }\n\n    fn is_known_offence(\n      offenders: &[Public],\n      _: &<BabeEquivocationOffence<Public> as Offence<Public>>::TimeSlot,\n    ) -> bool {\n      for offender in offenders {\n        // It's not a known offence if we can still slash them\n        if Self::can_slash_serai_validator(*offender) {\n          return false;\n        }\n      }\n      true\n    }\n  }\n\n  impl<T: Config> ReportOffence<Public, Public, GrandpaEquivocationOffence<Public>> for Pallet<T> {\n    /// Report an `offence` and reward given `reporters`.\n    fn report_offence(\n      _: Vec<Public>,\n      offence: GrandpaEquivocationOffence<Public>,\n    ) -> Result<(), OffenceError> {\n      // slash the offender\n      let offender = offence.offender;\n      Self::slash_serai_validator(offender);\n\n      // disable it\n      Self::disable_serai_validator(offender);\n\n      Ok(())\n    }\n\n    fn is_known_offence(\n      offenders: &[Public],\n      _slot: &<GrandpaEquivocationOffence<Public> as Offence<Public>>::TimeSlot,\n    ) -> bool {\n      for offender in offenders {\n        if Self::can_slash_serai_validator(*offender) {\n          return false;\n        }\n      }\n      true\n    }\n  }\n\n  impl<T: Config> FindAuthor<Public> for Pallet<T> {\n    fn find_author<'a, I>(digests: I) -> Option<Public>\n    where\n      I: 'a + IntoIterator<Item = (ConsensusEngineId, &'a [u8])>,\n    {\n      let i = Babe::<T>::find_author(digests)?;\n      Some(Babe::<T>::authorities()[i as usize].0.clone().into())\n    }\n  }\n\n  impl<T: Config> DisabledValidators for Pallet<T> {\n    fn is_disabled(index: u32) -> bool {\n      SeraiDisabledIndices::<T>::get(index).is_some()\n    }\n    fn disabled_validators() -> Vec<u32> {\n      // TODO: Use a storage iterator here\n      let mut res = vec![];\n      for i in 0 .. MAX_KEY_SHARES_PER_SET {\n        let i = i.into();\n        if Self::is_disabled(i) {\n          res.push(i);\n        }\n      }\n      res\n    }\n  }\n}\n\npub use pallet::*;\n"
  },
  {
    "path": "substrate/validator-sets/primitives/Cargo.toml",
    "content": "[package]\nname = \"serai-validator-sets-primitives\"\nversion = \"0.1.0\"\ndescription = \"Primitives for validator sets\"\nlicense = \"MIT\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/substrate/validator-sets/primitives\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nedition = \"2021\"\nrust-version = \"1.74\"\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nzeroize = { version = \"^1.5\", features = [\"derive\"], optional = true }\n\ndalek-ff-group = { path = \"../../../crypto/dalek-ff-group\", default-features = false, features = [\"alloc\"] }\nciphersuite = { path = \"../../../crypto/ciphersuite\", version = \"0.4\", default-features = false, features = [\"alloc\"] }\ndkg-musig = { path = \"../../../crypto/dkg/musig\", default-features = false }\n\nborsh = { version = \"1\", default-features = false, features = [\"derive\", \"de_strict_order\"], optional = true }\nserde = { version = \"1\", default-features = false, features = [\"derive\", \"alloc\"], optional = true }\n\nscale = { package = \"parity-scale-codec\", version = \"3\", default-features = false, features = [\"derive\", \"max-encoded-len\"] }\n\nsp-core = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\nsp-std = { git = \"https://github.com/serai-dex/patch-polkadot-sdk\", rev = \"da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b\", default-features = false }\n\nserai-primitives = { path = \"../../primitives\", default-features = false }\n\n[features]\nstd = [\"zeroize\", \"ciphersuite/std\", \"dkg-musig/std\", \"borsh?/std\", \"serde?/std\", \"scale/std\", \"sp-core/std\", \"sp-std/std\", \"serai-primitives/std\"]\nborsh = [\"dep:borsh\", \"serai-primitives/borsh\"]\nserde = [\"dep:serde\", \"serai-primitives/serde\"]\ndefault = [\"std\"]\n"
  },
  {
    "path": "substrate/validator-sets/primitives/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2022-2023 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "substrate/validator-sets/primitives/src/lib.rs",
    "content": "#![cfg_attr(not(feature = \"std\"), no_std)]\n\n#[cfg(feature = \"std\")]\nuse zeroize::Zeroize;\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::{group::GroupEncoding, Ciphersuite};\n\nuse scale::{Encode, Decode, DecodeWithMemTracking, MaxEncodedLen};\n\n#[cfg(feature = \"borsh\")]\nuse borsh::{BorshSerialize, BorshDeserialize};\n#[cfg(feature = \"serde\")]\nuse serde::{Serialize, Deserialize};\n\nuse sp_core::{ConstU32, sr25519::Public, bounded::BoundedVec};\n#[cfg(not(feature = \"std\"))]\nuse sp_std::vec::Vec;\n\nuse serai_primitives::{ExternalNetworkId, NetworkId};\n\n/// The maximum amount of key shares per set.\npub const MAX_KEY_SHARES_PER_SET: u32 = 150;\n// Support keys up to 96 bytes (BLS12-381 G2).\npub const MAX_KEY_LEN: u32 = 96;\n\n/// The type used to identify a specific session of validators.\n#[derive(\n  Clone,\n  Copy,\n  PartialEq,\n  Eq,\n  Hash,\n  Default,\n  Debug,\n  Encode,\n  Decode,\n  DecodeWithMemTracking,\n  MaxEncodedLen,\n)]\n#[cfg_attr(feature = \"std\", derive(Zeroize))]\n#[cfg_attr(feature = \"borsh\", derive(BorshSerialize, BorshDeserialize))]\n#[cfg_attr(feature = \"serde\", derive(Serialize, Deserialize))]\npub struct Session(pub u32);\n\n/// The type used to identify a specific validator set during a specific session.\n#[derive(\n  Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen,\n)]\n#[cfg_attr(feature = \"std\", derive(Zeroize))]\n#[cfg_attr(feature = \"borsh\", derive(BorshSerialize, BorshDeserialize))]\n#[cfg_attr(feature = \"serde\", derive(Serialize, Deserialize))]\npub struct ValidatorSet {\n  pub session: Session,\n  pub network: NetworkId,\n}\n\n/// The type used to identify a specific validator set during a specific session.\n#[derive(\n  Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen,\n)]\n#[cfg_attr(feature = \"std\", derive(Zeroize))]\n#[cfg_attr(feature = \"borsh\", derive(BorshSerialize, BorshDeserialize))]\n#[cfg_attr(feature = \"serde\", derive(Serialize, Deserialize))]\npub struct ExternalValidatorSet {\n  pub session: Session,\n  pub network: ExternalNetworkId,\n}\n\nimpl From<ExternalValidatorSet> for ValidatorSet {\n  fn from(set: ExternalValidatorSet) -> Self {\n    ValidatorSet { session: set.session, network: set.network.into() }\n  }\n}\n\nimpl TryFrom<ValidatorSet> for ExternalValidatorSet {\n  type Error = ();\n\n  fn try_from(set: ValidatorSet) -> Result<Self, Self::Error> {\n    match set.network {\n      NetworkId::Serai => Err(())?,\n      NetworkId::External(network) => Ok(ExternalValidatorSet { session: set.session, network }),\n    }\n  }\n}\n\ntype MaxKeyLen = ConstU32<MAX_KEY_LEN>;\n/// The type representing a Key from an external network.\npub type ExternalKey = BoundedVec<u8, MaxKeyLen>;\n\n/// The key pair for a validator set.\n///\n/// This is their Ristretto key, used for signing Batches, and their key on the external network.\n#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen)]\n#[cfg_attr(feature = \"borsh\", derive(BorshSerialize, BorshDeserialize))]\n#[cfg_attr(feature = \"serde\", derive(Serialize, Deserialize))]\npub struct KeyPair(\n  #[cfg_attr(\n    feature = \"borsh\",\n    borsh(\n      serialize_with = \"serai_primitives::borsh_serialize_public\",\n      deserialize_with = \"serai_primitives::borsh_deserialize_public\"\n    )\n  )]\n  pub Public,\n  #[cfg_attr(\n    feature = \"borsh\",\n    borsh(\n      serialize_with = \"serai_primitives::borsh_serialize_bounded_vec\",\n      deserialize_with = \"serai_primitives::borsh_deserialize_bounded_vec\"\n    )\n  )]\n  pub ExternalKey,\n);\n#[cfg(feature = \"std\")]\nimpl Zeroize for KeyPair {\n  fn zeroize(&mut self) {\n    self.0 .0.zeroize();\n    self.1.as_mut().zeroize();\n  }\n}\n\n/// The MuSig context for a validator set.\npub fn musig_context(set: ValidatorSet) -> [u8; 32] {\n  let mut context = [0; 32];\n  const DST: &[u8] = b\"ValidatorSets-musig_key\";\n  context[.. DST.len()].copy_from_slice(DST);\n  let set = set.encode();\n  context[DST.len() .. (DST.len() + set.len())].copy_from_slice(&set);\n  context\n}\n\n/// The MuSig public key for a validator set.\n///\n/// This function panics on invalid input.\npub fn musig_key(set: ValidatorSet, set_keys: &[Public]) -> Public {\n  let mut keys = Vec::new();\n  for key in set_keys {\n    keys.push(\n      <Ristretto as Ciphersuite>::read_G::<&[u8]>(&mut key.0.as_ref())\n        .expect(\"invalid participant\"),\n    );\n  }\n  dkg_musig::musig_key_vartime::<Ristretto>(musig_context(set), &keys).unwrap().to_bytes().into()\n}\n\n/// The message for the set_keys signature.\npub fn set_keys_message(\n  set: &ExternalValidatorSet,\n  removed_participants: &[Public],\n  key_pair: &KeyPair,\n) -> Vec<u8> {\n  (b\"ValidatorSets-set_keys\", set, removed_participants, key_pair).encode()\n}\n\npub fn report_slashes_message(set: &ExternalValidatorSet, slashes: &[(Public, u32)]) -> Vec<u8> {\n  (b\"ValidatorSets-report_slashes\", set, slashes).encode()\n}\n\n/// For a set of validators whose key shares may exceed the maximum, reduce until they equal the\n/// maximum.\n///\n/// Reduction occurs by reducing each validator in a reverse round-robin.\npub fn amortize_excess_key_shares(validators: &mut [(Public, u64)]) {\n  let total_key_shares = validators.iter().map(|(_, shares)| shares).sum::<u64>();\n  for i in 0 .. usize::try_from(total_key_shares.saturating_sub(u64::from(MAX_KEY_SHARES_PER_SET)))\n    .unwrap()\n  {\n    validators[validators.len() - ((i % validators.len()) + 1)].1 -= 1;\n  }\n}\n\n/// Returns the post-amortization key shares for the top validator.\n///\n/// Panics when `validators == 0`.\npub fn post_amortization_key_shares_for_top_validator(\n  validators: usize,\n  top: u64,\n  key_shares: u64,\n) -> u64 {\n  top -\n    (key_shares.saturating_sub(MAX_KEY_SHARES_PER_SET.into()) /\n      u64::try_from(validators).unwrap())\n}\n"
  },
  {
    "path": "tests/coordinator/Cargo.toml",
    "content": "[package]\nname = \"serai-coordinator-tests\"\nversion = \"0.1.0\"\ndescription = \"Tests for Serai's Coordinator\"\nlicense = \"AGPL-3.0-only\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/tests/coordinator\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nkeywords = []\nedition = \"2021\"\npublish = false\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nhex = \"0.4\"\n\nasync-trait = \"0.1\"\nzeroize = { version = \"1\", default-features = false }\nrand_core = { version = \"0.6\", default-features = false }\n\nblake2 = \"0.10\"\ndalek-ff-group = { path = \"../../crypto/dalek-ff-group\", default-features = false }\nciphersuite = { path = \"../../crypto/ciphersuite\", default-features = false }\nciphersuite-kp256 = { path = \"../../crypto/ciphersuite/kp256\", default-features = false }\nschnorrkel = \"0.11\"\ndkg = { path = \"../../crypto/dkg\", default-features = false }\n\nmessages = { package = \"serai-processor-messages\", path = \"../../processor/messages\" }\n\nscale = { package = \"parity-scale-codec\", version = \"3\" }\n\nserai-client = { path = \"../../substrate/client\", features = [\"serai\"] }\nserai-message-queue = { path = \"../../message-queue\" }\n\nborsh = { version = \"1\", features = [\"de_strict_order\"] }\n\ntokio = { version = \"1\", features = [\"time\"] }\n\ndockertest = \"0.5\"\nserai-docker-tests = { path = \"../docker\" }\nserai-message-queue-tests = { path = \"../message-queue\" }\n"
  },
  {
    "path": "tests/coordinator/LICENSE",
    "content": "AGPL-3.0-only license\n\nCopyright (c) 2023 Luke Parker\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License Version 3 as\npublished by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n"
  },
  {
    "path": "tests/coordinator/src/lib.rs",
    "content": "#![allow(clippy::needless_pass_by_ref_mut)] // False positives\n\nuse std::{\n  sync::{OnceLock, Arc},\n  time::Duration,\n};\n\nuse tokio::{\n  task::AbortHandle,\n  sync::{Mutex as AsyncMutex, mpsc},\n};\n\nuse rand_core::{RngCore, OsRng};\n\nuse zeroize::Zeroizing;\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::{\n  group::{ff::PrimeField, GroupEncoding},\n  Ciphersuite,\n};\n\nuse serai_client::primitives::ExternalNetworkId;\n\nuse messages::{\n  coordinator::{SubstrateSignableId, SubstrateSignId, cosign_block_msg},\n  CoordinatorMessage, ProcessorMessage,\n};\nuse serai_message_queue::{Service, Metadata, client::MessageQueue};\n\nuse serai_client::Serai;\n\nuse dockertest::{PullPolicy, Image, TestBodySpecification, DockerOperations};\n\n#[cfg(test)]\nmod tests;\n\npub fn coordinator_instance(\n  name: &str,\n  message_queue_key: <Ristretto as Ciphersuite>::F,\n) -> TestBodySpecification {\n  serai_docker_tests::build(\"coordinator\".to_string());\n\n  TestBodySpecification::with_image(\n    Image::with_repository(\"serai-dev-coordinator\").pull_policy(PullPolicy::Never),\n  )\n  .replace_env(\n    [\n      (\"MESSAGE_QUEUE_KEY\".to_string(), hex::encode(message_queue_key.to_repr())),\n      (\"DB_PATH\".to_string(), \"./coordinator-db\".to_string()),\n      (\"SERAI_KEY\".to_string(), {\n        use serai_client::primitives::insecure_pair_from_name;\n        hex::encode(&insecure_pair_from_name(name).as_ref().secret.to_bytes()[.. 32])\n      }),\n      (\n        \"RUST_LOG\".to_string(),\n        \"serai_coordinator=trace,\".to_string() + \"tributary_chain=trace,\" + \"tendermint=trace\",\n      ),\n    ]\n    .into(),\n  )\n}\n\npub fn serai_composition(name: &str, fast_epoch: bool) -> TestBodySpecification {\n  (if fast_epoch {\n    serai_docker_tests::build(\"serai-fast-epoch\".to_string());\n    TestBodySpecification::with_image(\n      Image::with_repository(\"serai-dev-serai-fast-epoch\").pull_policy(PullPolicy::Never),\n    )\n  } else {\n    serai_docker_tests::build(\"serai\".to_string());\n    TestBodySpecification::with_image(\n      Image::with_repository(\"serai-dev-serai\").pull_policy(PullPolicy::Never),\n    )\n  })\n  .replace_env(\n    [(\"SERAI_NAME\".to_string(), name.to_lowercase()), (\"KEY\".to_string(), \" \".to_string())].into(),\n  )\n  .set_publish_all_ports(true)\n}\n\nfn is_cosign_message(msg: &CoordinatorMessage) -> bool {\n  matches!(\n    msg,\n    CoordinatorMessage::Coordinator(\n      messages::coordinator::CoordinatorMessage::CosignSubstrateBlock { .. }\n    )\n  ) || matches!(\n    msg,\n    CoordinatorMessage::Coordinator(\n      messages::coordinator::CoordinatorMessage::SubstratePreprocesses {\n        id: SubstrateSignId { id: SubstrateSignableId::CosigningSubstrateBlock(_), .. },\n        ..\n      }\n    ),\n  ) || matches!(\n    msg,\n    CoordinatorMessage::Coordinator(messages::coordinator::CoordinatorMessage::SubstrateShares {\n      id: SubstrateSignId { id: SubstrateSignableId::CosigningSubstrateBlock(_), .. },\n      ..\n    }),\n  )\n}\n\n#[derive(Clone, PartialEq, Eq, Debug)]\npub struct Handles {\n  pub(crate) serai: String,\n  pub(crate) message_queue: String,\n}\n\npub struct Processor {\n  network: ExternalNetworkId,\n\n  serai_rpc: String,\n  #[allow(unused)]\n  handles: Handles,\n\n  msgs: mpsc::UnboundedReceiver<messages::CoordinatorMessage>,\n  queue_for_sending: MessageQueue,\n  abort_handle: Option<Arc<AbortHandle>>,\n\n  substrate_key: Arc<AsyncMutex<Option<Zeroizing<<Ristretto as Ciphersuite>::F>>>>,\n}\n\nimpl Drop for Processor {\n  fn drop(&mut self) {\n    if let Some(abort_handle) = self.abort_handle.take() {\n      abort_handle.abort();\n    };\n  }\n}\n\nimpl Processor {\n  pub async fn new(\n    raw_i: u8,\n    network: ExternalNetworkId,\n    ops: &DockerOperations,\n    handles: Handles,\n    processor_key: <Ristretto as Ciphersuite>::F,\n  ) -> Processor {\n    let message_queue_rpc = ops.handle(&handles.message_queue).host_port(2287).unwrap();\n    let message_queue_rpc = format!(\"{}:{}\", message_queue_rpc.0, message_queue_rpc.1);\n\n    // Sleep until the Substrate RPC starts\n    let serai_rpc = ops.handle(&handles.serai).host_port(9944).unwrap();\n    let serai_rpc = format!(\"http://{}:{}\", serai_rpc.0, serai_rpc.1);\n    // Bound execution to 60 seconds\n    for _ in 0 .. 60 {\n      tokio::time::sleep(Duration::from_secs(1)).await;\n      let Ok(client) = Serai::new(serai_rpc.clone()).await else { continue };\n      if client.latest_finalized_block_hash().await.is_err() {\n        continue;\n      }\n      break;\n    }\n\n    // The Serai RPC may or may not be started\n    // Assume it is and continue, so if it's a few seconds late, it's still within tolerance\n\n    // Create the queue\n    let mut queue = (\n      0,\n      Arc::new(MessageQueue::new(\n        Service::Processor(network),\n        message_queue_rpc.clone(),\n        Zeroizing::new(processor_key),\n      )),\n    );\n\n    let (msg_send, msg_recv) = mpsc::unbounded_channel();\n\n    let substrate_key = Arc::new(AsyncMutex::new(None));\n    let mut res = Processor {\n      network,\n\n      serai_rpc,\n      handles,\n\n      queue_for_sending: MessageQueue::new(\n        Service::Processor(network),\n        message_queue_rpc,\n        Zeroizing::new(processor_key),\n      ),\n      msgs: msg_recv,\n      abort_handle: None,\n\n      substrate_key: substrate_key.clone(),\n    };\n\n    // Spawn a task to handle cosigns and forward messages as appropriate\n    let abort_handle = tokio::spawn({\n      async move {\n        loop {\n          // Get new messages\n          let (next_recv_id, queue) = &mut queue;\n          let msg = queue.next(Service::Coordinator).await;\n          assert_eq!(msg.from, Service::Coordinator);\n          assert_eq!(msg.id, *next_recv_id);\n          queue.ack(Service::Coordinator, msg.id).await;\n          *next_recv_id += 1;\n\n          let msg_msg = borsh::from_slice(&msg.msg).unwrap();\n\n          // Remove any BatchReattempts clogging the pipe\n          // TODO: Set up a wrapper around serai-client so we aren't throwing this away yet\n          // leave it for the tests\n          if matches!(\n            msg_msg,\n            messages::CoordinatorMessage::Coordinator(\n              messages::coordinator::CoordinatorMessage::BatchReattempt { .. }\n            )\n          ) {\n            continue;\n          }\n\n          if !is_cosign_message(&msg_msg) {\n            msg_send.send(msg_msg).unwrap();\n            continue;\n          }\n          let msg = msg_msg;\n\n          let send_message = |msg: ProcessorMessage| async move {\n            queue\n              .queue(\n                Metadata {\n                  from: Service::Processor(network),\n                  to: Service::Coordinator,\n                  intent: msg.intent(),\n                },\n                borsh::to_vec(&msg).unwrap(),\n              )\n              .await;\n          };\n\n          struct CurrentCosign {\n            block_number: u64,\n            block: [u8; 32],\n          }\n          static CURRENT_COSIGN: OnceLock<AsyncMutex<Option<CurrentCosign>>> = OnceLock::new();\n          let mut current_cosign =\n            CURRENT_COSIGN.get_or_init(|| AsyncMutex::new(None)).lock().await;\n          match msg {\n            // If this is a CosignSubstrateBlock, reset the CurrentCosign\n            // While technically, each processor should individually track the current cosign,\n            // this is fine for current testing purposes\n            CoordinatorMessage::Coordinator(\n              messages::coordinator::CoordinatorMessage::CosignSubstrateBlock { id, block_number },\n            ) => {\n              let SubstrateSignId {\n                id: SubstrateSignableId::CosigningSubstrateBlock(block), ..\n              } = id\n              else {\n                panic!(\"CosignSubstrateBlock didn't have CosigningSubstrateBlock ID\")\n              };\n\n              let new_cosign = CurrentCosign { block_number, block };\n              if current_cosign.is_none() || (current_cosign.as_ref().unwrap().block != block) {\n                *current_cosign = Some(new_cosign);\n              }\n              send_message(\n                messages::coordinator::ProcessorMessage::CosignPreprocess {\n                  id: id.clone(),\n                  preprocesses: vec![[raw_i; 64]],\n                }\n                .into(),\n              )\n              .await;\n            }\n            CoordinatorMessage::Coordinator(\n              messages::coordinator::CoordinatorMessage::SubstratePreprocesses { id, .. },\n            ) => {\n              // TODO: Assert the ID matches CURRENT_COSIGN\n              // TODO: Verify the received preprocesses\n              send_message(\n                messages::coordinator::ProcessorMessage::SubstrateShare {\n                  id,\n                  shares: vec![[raw_i; 32]],\n                }\n                .into(),\n              )\n              .await;\n            }\n            CoordinatorMessage::Coordinator(\n              messages::coordinator::CoordinatorMessage::SubstrateShares { .. },\n            ) => {\n              // TODO: Assert the ID matches CURRENT_COSIGN\n              // TODO: Verify the shares\n\n              let block_number = current_cosign.as_ref().unwrap().block_number;\n              let block = current_cosign.as_ref().unwrap().block;\n\n              let substrate_key = substrate_key.lock().await.clone().unwrap();\n\n              // Expand to a key pair as Schnorrkel expects\n              // It's the private key + 32-bytes of entropy for nonces + the public key\n              let mut schnorrkel_key_pair = [0; 96];\n              schnorrkel_key_pair[.. 32].copy_from_slice(&substrate_key.to_repr());\n              OsRng.fill_bytes(&mut schnorrkel_key_pair[32 .. 64]);\n              schnorrkel_key_pair[64 ..].copy_from_slice(\n                &(<Ristretto as Ciphersuite>::generator() * *substrate_key).to_bytes(),\n              );\n              let signature = schnorrkel::keys::Keypair::from_bytes(&schnorrkel_key_pair)\n                .unwrap()\n                .sign_simple(b\"substrate\", &cosign_block_msg(block_number, block))\n                .to_bytes();\n\n              send_message(\n                messages::coordinator::ProcessorMessage::CosignedBlock {\n                  block_number,\n                  block,\n                  signature: signature.to_vec(),\n                }\n                .into(),\n              )\n              .await;\n            }\n            _ => panic!(\"unexpected message passed is_cosign_message\"),\n          }\n        }\n      }\n    })\n    .abort_handle();\n\n    res.abort_handle = Some(Arc::new(abort_handle));\n\n    res\n  }\n\n  pub async fn serai(&self) -> Serai {\n    Serai::new(self.serai_rpc.clone()).await.unwrap()\n  }\n\n  /// Send a message to the coordinator as a processor.\n  pub async fn send_message(&mut self, msg: impl Into<ProcessorMessage>) {\n    let msg: ProcessorMessage = msg.into();\n\n    self\n      .queue_for_sending\n      .queue(\n        Metadata {\n          from: Service::Processor(self.network),\n          to: Service::Coordinator,\n          intent: msg.intent(),\n        },\n        borsh::to_vec(&msg).unwrap(),\n      )\n      .await;\n  }\n\n  /// Receive a message from the coordinator as a processor.\n  pub async fn recv_message(&mut self) -> CoordinatorMessage {\n    // Set a timeout of 20 minutes to allow effectively any protocol to occur without a fear of\n    // an arbitrary timeout cutting it short\n    tokio::time::timeout(Duration::from_secs(20 * 60), self.msgs.recv()).await.unwrap().unwrap()\n  }\n\n  pub async fn set_substrate_key(\n    &mut self,\n    substrate_key: Zeroizing<<Ristretto as Ciphersuite>::F>,\n  ) {\n    *self.substrate_key.lock().await = Some(substrate_key);\n  }\n}\n"
  },
  {
    "path": "tests/coordinator/src/tests/batch.rs",
    "content": "use std::{\n  time::Duration,\n  collections::{HashSet, HashMap},\n};\n\nuse zeroize::Zeroizing;\nuse rand_core::{RngCore, OsRng};\n\nuse blake2::{\n  digest::{consts::U32, Digest},\n  Blake2b,\n};\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::{group::GroupEncoding, Ciphersuite};\nuse ciphersuite_kp256::Secp256k1;\nuse dkg::Participant;\n\nuse scale::Encode;\n\nuse serai_client::{\n  primitives::BlockHash,\n  in_instructions::{\n    primitives::{Batch, SignedBatch, batch_message},\n    InInstructionsEvent,\n  },\n  validator_sets::primitives::Session,\n};\nuse messages::{\n  coordinator::{SubstrateSignableId, SubstrateSignId},\n  SubstrateContext, CoordinatorMessage,\n};\n\nuse crate::{*, tests::*};\n\npub async fn batch(\n  processors: &mut [Processor],\n  processor_is: &[u8],\n  session: Session,\n  substrate_key: &Zeroizing<<Ristretto as Ciphersuite>::F>,\n  batch: Batch,\n) -> u64 {\n  let id = SubstrateSignId { session, id: SubstrateSignableId::Batch(batch.id), attempt: 0 };\n\n  for processor in &mut *processors {\n    processor\n      .send_message(messages::substrate::ProcessorMessage::Batch { batch: batch.clone() })\n      .await;\n  }\n\n  // Select a random participant to exclude, so we know for sure who *is* participating\n  assert_eq!(COORDINATORS - THRESHOLD, 1);\n  let excluded_signer =\n    usize::try_from(OsRng.next_u64() % u64::try_from(processors.len()).unwrap()).unwrap();\n  for (i, processor) in processors.iter_mut().enumerate() {\n    if i == excluded_signer {\n      continue;\n    }\n\n    processor\n      .send_message(messages::coordinator::ProcessorMessage::BatchPreprocess {\n        id: id.clone(),\n        block: batch.block,\n        preprocesses: vec![[processor_is[i]; 64]],\n      })\n      .await;\n  }\n  // Before this Batch is signed, the Tributary will agree this block occurred, adding an extra\n  // step of latency\n  wait_for_tributary().await;\n  wait_for_tributary().await;\n\n  // Send from the excluded signer so they don't stay stuck\n  processors[excluded_signer]\n    .send_message(messages::coordinator::ProcessorMessage::BatchPreprocess {\n      id: id.clone(),\n      block: batch.block,\n      preprocesses: vec![[processor_is[excluded_signer]; 64]],\n    })\n    .await;\n\n  // Read from a known signer to find out who was selected to sign\n  let known_signer = (excluded_signer + 1) % COORDINATORS;\n  let first_preprocesses = processors[known_signer].recv_message().await;\n  let participants = match first_preprocesses {\n    CoordinatorMessage::Coordinator(\n      messages::coordinator::CoordinatorMessage::SubstratePreprocesses {\n        id: this_id,\n        preprocesses,\n      },\n    ) => {\n      assert_eq!(&id, &this_id);\n      assert_eq!(preprocesses.len(), THRESHOLD - 1);\n      let known_signer_i = Participant::new(u16::from(processor_is[known_signer])).unwrap();\n      assert!(!preprocesses.contains_key(&known_signer_i));\n\n      let mut participants = preprocesses.keys().copied().collect::<HashSet<_>>();\n      for (p, preprocess) in preprocesses {\n        assert_eq!(preprocess, [u8::try_from(u16::from(p)).unwrap(); 64]);\n      }\n      participants.insert(known_signer_i);\n      participants\n    }\n    other => panic!(\"coordinator didn't send back SubstratePreprocesses: {other:?}\"),\n  };\n\n  for i in participants.clone() {\n    if u16::from(i) == u16::from(processor_is[known_signer]) {\n      continue;\n    }\n\n    let processor =\n      &mut processors[processor_is.iter().position(|p_i| u16::from(*p_i) == u16::from(i)).unwrap()];\n    let mut preprocesses = participants\n      .clone()\n      .into_iter()\n      .map(|i| (i, [u8::try_from(u16::from(i)).unwrap(); 64]))\n      .collect::<HashMap<_, _>>();\n    preprocesses.remove(&i);\n\n    assert_eq!(\n      processor.recv_message().await,\n      CoordinatorMessage::Coordinator(\n        messages::coordinator::CoordinatorMessage::SubstratePreprocesses {\n          id: id.clone(),\n          preprocesses\n        }\n      )\n    );\n  }\n\n  for i in participants.clone() {\n    let processor =\n      &mut processors[processor_is.iter().position(|p_i| u16::from(*p_i) == u16::from(i)).unwrap()];\n    processor\n      .send_message(messages::coordinator::ProcessorMessage::SubstrateShare {\n        id: id.clone(),\n        shares: vec![[u8::try_from(u16::from(i)).unwrap(); 32]],\n      })\n      .await;\n  }\n  wait_for_tributary().await;\n  for i in participants.clone() {\n    let processor =\n      &mut processors[processor_is.iter().position(|p_i| u16::from(*p_i) == u16::from(i)).unwrap()];\n    let mut shares = participants\n      .clone()\n      .into_iter()\n      .map(|i| (i, [u8::try_from(u16::from(i)).unwrap(); 32]))\n      .collect::<HashMap<_, _>>();\n    shares.remove(&i);\n\n    assert_eq!(\n      processor.recv_message().await,\n      CoordinatorMessage::Coordinator(messages::coordinator::CoordinatorMessage::SubstrateShares {\n        id: id.clone(),\n        shares,\n      })\n    );\n  }\n\n  // Expand to a key pair as Schnorrkel expects\n  // It's the private key + 32-bytes of entropy for nonces + the public key\n  let mut schnorrkel_key_pair = [0; 96];\n  schnorrkel_key_pair[.. 32].copy_from_slice(&substrate_key.to_repr());\n  OsRng.fill_bytes(&mut schnorrkel_key_pair[32 .. 64]);\n  schnorrkel_key_pair[64 ..]\n    .copy_from_slice(&(<Ristretto as Ciphersuite>::generator() * **substrate_key).to_bytes());\n  let signature = schnorrkel::keys::Keypair::from_bytes(&schnorrkel_key_pair)\n    .unwrap()\n    .sign_simple(b\"substrate\", &batch_message(&batch))\n    .to_bytes()\n    .into();\n\n  let batch = SignedBatch { batch, signature };\n\n  let serai = processors[0].serai().await;\n  let mut last_serai_block = serai.latest_finalized_block().await.unwrap().number();\n\n  for (i, processor) in processors.iter_mut().enumerate() {\n    if i == excluded_signer {\n      continue;\n    }\n    processor\n      .send_message(messages::substrate::ProcessorMessage::SignedBatch { batch: batch.clone() })\n      .await;\n  }\n\n  // Verify the Batch was published to Substrate\n  'outer: for _ in 0 .. 20 {\n    tokio::time::sleep(Duration::from_secs(6)).await;\n    if std::env::var(\"GITHUB_CI\") == Ok(\"true\".to_string()) {\n      tokio::time::sleep(Duration::from_secs(6)).await;\n    }\n\n    while last_serai_block <= serai.latest_finalized_block().await.unwrap().number() {\n      let batch_events = serai\n        .as_of(serai.finalized_block_by_number(last_serai_block).await.unwrap().unwrap().hash())\n        .in_instructions()\n        .batch_events()\n        .await\n        .unwrap();\n\n      if !batch_events.is_empty() {\n        assert_eq!(batch_events.len(), 1);\n        assert_eq!(\n          batch_events[0],\n          InInstructionsEvent::Batch {\n            network: batch.batch.network,\n            id: batch.batch.id,\n            block: batch.batch.block,\n            instructions_hash: Blake2b::<U32>::digest(batch.batch.instructions.encode()).into(),\n          }\n        );\n        break 'outer;\n      }\n      last_serai_block += 1;\n    }\n  }\n\n  // Verify the coordinator sends SubstrateBlock to all processors\n  let last_block = serai.finalized_block_by_number(last_serai_block).await.unwrap().unwrap();\n  for processor in &mut *processors {\n    // Handle a potential re-attempt message in the pipeline\n    let mut received = processor.recv_message().await;\n    if matches!(\n      received,\n      messages::CoordinatorMessage::Coordinator(\n        messages::coordinator::CoordinatorMessage::BatchReattempt { .. }\n      )\n    ) {\n      received = processor.recv_message().await\n    }\n\n    assert_eq!(\n      received,\n      messages::CoordinatorMessage::Substrate(\n        messages::substrate::CoordinatorMessage::SubstrateBlock {\n          context: SubstrateContext {\n            serai_time: last_block.time().unwrap() / 1000,\n            network_latest_finalized_block: batch.batch.block,\n          },\n          block: last_serai_block,\n          burns: vec![],\n          batches: vec![batch.batch.id],\n        }\n      )\n    );\n\n    // Send the ack as expected\n    processor\n      .send_message(messages::ProcessorMessage::Coordinator(\n        messages::coordinator::ProcessorMessage::SubstrateBlockAck {\n          block: last_serai_block,\n          plans: vec![],\n        },\n      ))\n      .await;\n  }\n  last_block.number()\n}\n\n#[tokio::test]\nasync fn batch_test() {\n  new_test(\n    |mut processors: Vec<Processor>| async move {\n      // pop the last participant since genesis keygen has only 4 participants\n      processors.pop().unwrap();\n      assert_eq!(processors.len(), COORDINATORS);\n\n      let (processor_is, substrate_key, _) =\n        key_gen::<Secp256k1>(&mut processors, Session(0)).await;\n      batch(\n        &mut processors,\n        &processor_is,\n        Session(0),\n        &substrate_key,\n        Batch {\n          network: ExternalNetworkId::Bitcoin,\n          id: 0,\n          block: BlockHash([0x22; 32]),\n          instructions: vec![],\n        },\n      )\n      .await;\n    },\n    false,\n  )\n  .await;\n}\n"
  },
  {
    "path": "tests/coordinator/src/tests/key_gen.rs",
    "content": "use std::{\n  time::{Duration, SystemTime},\n  collections::HashMap,\n};\n\nuse zeroize::Zeroizing;\nuse rand_core::OsRng;\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::{\n  group::{ff::Field, GroupEncoding},\n  Ciphersuite,\n};\nuse ciphersuite_kp256::Secp256k1;\nuse dkg::ThresholdParams;\n\nuse serai_client::validator_sets::primitives::{ExternalValidatorSet, KeyPair, Session};\nuse messages::{key_gen::KeyGenId, CoordinatorMessage};\n\nuse crate::tests::*;\n\npub async fn key_gen<C: Ciphersuite>(\n  processors: &mut [Processor],\n  session: Session,\n) -> (Vec<u8>, Zeroizing<<Ristretto as Ciphersuite>::F>, Zeroizing<C::F>) {\n  let coordinators = processors.len();\n  let mut participant_is = vec![];\n\n  let set = ExternalValidatorSet { session, network: ExternalNetworkId::Bitcoin };\n  let id = KeyGenId { session: set.session, attempt: 0 };\n\n  for (i, processor) in processors.iter_mut().enumerate() {\n    let msg = processor.recv_message().await;\n    match &msg {\n      CoordinatorMessage::KeyGen(messages::key_gen::CoordinatorMessage::GenerateKey {\n        params,\n        ..\n      }) => {\n        participant_is.push(params.i());\n      }\n      _ => panic!(\"unexpected message: {msg:?}\"),\n    }\n\n    assert_eq!(\n      msg,\n      CoordinatorMessage::KeyGen(messages::key_gen::CoordinatorMessage::GenerateKey {\n        id,\n        params: ThresholdParams::new(\n          u16::try_from(((coordinators * 2) / 3) + 1).unwrap(),\n          u16::try_from(coordinators).unwrap(),\n          participant_is[i],\n        )\n        .unwrap(),\n        shares: 1,\n      })\n    );\n\n    processor\n      .send_message(messages::key_gen::ProcessorMessage::Commitments {\n        id,\n        commitments: vec![vec![u8::try_from(u16::from(participant_is[i])).unwrap()]],\n      })\n      .await;\n  }\n\n  wait_for_tributary().await;\n  for (i, processor) in processors.iter_mut().enumerate() {\n    let mut commitments = (0 .. u8::try_from(coordinators).unwrap())\n      .map(|l| {\n        (\n          participant_is[usize::from(l)],\n          vec![u8::try_from(u16::from(participant_is[usize::from(l)])).unwrap()],\n        )\n      })\n      .collect::<HashMap<_, _>>();\n    commitments.remove(&participant_is[i]);\n    assert_eq!(\n      processor.recv_message().await,\n      CoordinatorMessage::KeyGen(messages::key_gen::CoordinatorMessage::Commitments {\n        id,\n        commitments,\n      })\n    );\n\n    // Recipient it's for -> (Sender i, Recipient i)\n    let mut shares = (0 .. u8::try_from(coordinators).unwrap())\n      .map(|l| {\n        (\n          participant_is[usize::from(l)],\n          vec![\n            u8::try_from(u16::from(participant_is[i])).unwrap(),\n            u8::try_from(u16::from(participant_is[usize::from(l)])).unwrap(),\n          ],\n        )\n      })\n      .collect::<HashMap<_, _>>();\n\n    shares.remove(&participant_is[i]);\n    processor\n      .send_message(messages::key_gen::ProcessorMessage::Shares { id, shares: vec![shares] })\n      .await;\n  }\n\n  let substrate_priv_key = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng));\n  let substrate_key = (<Ristretto as Ciphersuite>::generator() * *substrate_priv_key).to_bytes();\n\n  let network_priv_key = Zeroizing::new(C::F::random(&mut OsRng));\n  let network_key = (C::generator() * *network_priv_key).to_bytes().as_ref().to_vec();\n\n  let serai = processors[0].serai().await;\n  let mut last_serai_block = serai.latest_finalized_block().await.unwrap().number();\n\n  wait_for_tributary().await;\n  for (i, processor) in processors.iter_mut().enumerate() {\n    let i = participant_is[i];\n    assert_eq!(\n      processor.recv_message().await,\n      CoordinatorMessage::KeyGen(messages::key_gen::CoordinatorMessage::Shares {\n        id,\n        shares: {\n          let mut shares = (0 .. u8::try_from(coordinators).unwrap())\n            .map(|l| {\n              (\n                participant_is[usize::from(l)],\n                vec![\n                  u8::try_from(u16::from(participant_is[usize::from(l)])).unwrap(),\n                  u8::try_from(u16::from(i)).unwrap(),\n                ],\n              )\n            })\n            .collect::<HashMap<_, _>>();\n          shares.remove(&i);\n          vec![shares]\n        },\n      })\n    );\n    processor\n      .send_message(messages::key_gen::ProcessorMessage::GeneratedKeyPair {\n        id,\n        substrate_key,\n        network_key: network_key.clone(),\n      })\n      .await;\n  }\n\n  // Sleeps for longer since we need to wait for a Substrate block as well\n  'outer: for _ in 0 .. 20 {\n    tokio::time::sleep(Duration::from_secs(6)).await;\n    if std::env::var(\"GITHUB_CI\") == Ok(\"true\".to_string()) {\n      tokio::time::sleep(Duration::from_secs(6)).await;\n    }\n\n    while last_serai_block <= serai.latest_finalized_block().await.unwrap().number() {\n      if !serai\n        .as_of(serai.finalized_block_by_number(last_serai_block).await.unwrap().unwrap().hash())\n        .validator_sets()\n        .key_gen_events()\n        .await\n        .unwrap()\n        .is_empty()\n      {\n        break 'outer;\n      }\n      last_serai_block += 1;\n    }\n  }\n  let mut message = None;\n  for processor in &mut *processors {\n    let msg = processor.recv_message().await;\n    if message.is_none() {\n      match msg {\n        CoordinatorMessage::Substrate(\n          messages::substrate::CoordinatorMessage::ConfirmKeyPair {\n            context,\n            session,\n            ref key_pair,\n          },\n        ) => {\n          assert!(\n            SystemTime::now()\n              .duration_since(SystemTime::UNIX_EPOCH)\n              .unwrap()\n              .as_secs()\n              .abs_diff(context.serai_time) <\n              (60 * 60 * 3) // 3 hours, which should exceed the length of any test we run\n          );\n          assert_eq!(context.network_latest_finalized_block.0, [0; 32]);\n          assert_eq!(set.session, session);\n          assert_eq!(key_pair.0 .0, substrate_key);\n          assert_eq!(&key_pair.1, &network_key);\n        }\n        _ => panic!(\"coordinator didn't respond with ConfirmKeyPair. msg: {msg:?}\"),\n      }\n      message = Some(msg);\n    } else {\n      assert_eq!(message, Some(msg));\n    }\n  }\n  assert_eq!(\n    serai\n      .as_of(serai.finalized_block_by_number(last_serai_block).await.unwrap().unwrap().hash())\n      .validator_sets()\n      .keys(set)\n      .await\n      .unwrap()\n      .unwrap(),\n    KeyPair(substrate_key.into(), network_key.try_into().unwrap())\n  );\n\n  for processor in &mut *processors {\n    processor.set_substrate_key(substrate_priv_key.clone()).await;\n  }\n\n  (\n    participant_is.into_iter().map(|i| u8::try_from(u16::from(i)).unwrap()).collect(),\n    substrate_priv_key,\n    network_priv_key,\n  )\n}\n\n#[tokio::test]\nasync fn key_gen_test() {\n  new_test(\n    |mut processors: Vec<Processor>| async move {\n      // pop the last participant since genesis keygen has only 4 participants\n      processors.pop().unwrap();\n      assert_eq!(processors.len(), COORDINATORS);\n\n      key_gen::<Secp256k1>(&mut processors, Session(0)).await;\n    },\n    false,\n  )\n  .await;\n}\n"
  },
  {
    "path": "tests/coordinator/src/tests/mod.rs",
    "content": "use core::future::Future;\nuse std::{sync::OnceLock, collections::HashMap};\n\nuse tokio::sync::Mutex;\n\nuse dockertest::{\n  LogAction, LogPolicy, LogSource, LogOptions, StartPolicy, TestBodySpecification,\n  DockerOperations, DockerTest,\n};\n\nuse serai_docker_tests::fresh_logs_folder;\n\nuse crate::*;\n\nmod key_gen;\npub use key_gen::key_gen;\n\nmod batch;\npub use batch::batch;\n\nmod sign;\n#[allow(unused_imports)]\npub use sign::sign;\n\nmod rotation;\n\npub(crate) const COORDINATORS: usize = 4;\npub(crate) const THRESHOLD: usize = ((COORDINATORS * 2) / 3) + 1;\n\n// Provide a unique ID and ensures only one invocation occurs at a time.\nstatic UNIQUE_ID: OnceLock<Mutex<u16>> = OnceLock::new();\n\n#[async_trait::async_trait]\npub(crate) trait TestBody: 'static + Send + Sync {\n  async fn body(&self, processors: Vec<Processor>);\n}\n#[async_trait::async_trait]\nimpl<F: Send + Future, TB: 'static + Send + Sync + Fn(Vec<Processor>) -> F> TestBody for TB {\n  async fn body(&self, processors: Vec<Processor>) {\n    (self)(processors).await;\n  }\n}\n\npub(crate) async fn new_test(test_body: impl TestBody, fast_epoch: bool) {\n  let mut unique_id_lock = UNIQUE_ID.get_or_init(|| Mutex::new(0)).lock().await;\n\n  let mut coordinators = vec![];\n  let mut test = DockerTest::new().with_network(dockertest::Network::Isolated);\n  let mut coordinator_compositions = vec![];\n  // Spawn one extra coordinator which isn't in-set\n  #[allow(clippy::range_plus_one)]\n  for i in 0 .. (COORDINATORS + 1) {\n    let name = match i {\n      0 => \"Alice\",\n      1 => \"Bob\",\n      2 => \"Charlie\",\n      3 => \"Dave\",\n      4 => \"Eve\",\n      5 => \"Ferdie\",\n      _ => panic!(\"needed a 7th name for a serai node\"),\n    };\n    let serai_composition = serai_composition(name, fast_epoch);\n\n    let (processor_key, message_queue_keys, message_queue_composition) =\n      serai_message_queue_tests::instance();\n\n    let coordinator_composition = coordinator_instance(name, processor_key);\n\n    // Give every item in this stack a unique ID\n    // Uses a Mutex as we can't generate a 8-byte random ID without hitting hostname length limits\n    let (first, unique_id) = {\n      let first = *unique_id_lock == 0;\n      let unique_id = *unique_id_lock;\n      *unique_id_lock += 1;\n      (first, unique_id)\n    };\n\n    let logs_path = fresh_logs_folder(first, \"coordinator\");\n\n    let mut compositions = vec![];\n    let mut handles = HashMap::new();\n    for (name, composition) in [\n      (\"serai_node\", serai_composition),\n      (\"message_queue\", message_queue_composition),\n      (\"coordinator\", coordinator_composition),\n    ] {\n      let handle = format!(\"coordinator-{name}-{unique_id}\");\n\n      compositions.push(\n        composition\n          .set_start_policy(StartPolicy::Strict)\n          .set_handle(handle.clone())\n          .set_log_options(Some(LogOptions {\n            action: if std::env::var(\"GITHUB_CI\") == Ok(\"true\".to_string()) {\n              LogAction::Forward\n            } else {\n              LogAction::ForwardToFile { path: logs_path.clone() }\n            },\n            policy: LogPolicy::Always,\n            source: LogSource::Both,\n          })),\n      );\n\n      handles.insert(name, handle);\n    }\n\n    let processor_key = message_queue_keys[&ExternalNetworkId::Bitcoin];\n\n    coordinators.push((\n      Handles {\n        serai: handles.remove(\"serai_node\").unwrap(),\n        message_queue: handles.remove(\"message_queue\").unwrap(),\n      },\n      processor_key,\n    ));\n    coordinator_compositions.push(compositions.pop().unwrap());\n    for composition in compositions {\n      test.provide_container(composition);\n    }\n  }\n\n  struct Context {\n    pending_coordinator_compositions: Mutex<Vec<TestBodySpecification>>,\n    handles_and_keys: Vec<(Handles, <Ristretto as Ciphersuite>::F)>,\n    test_body: Box<dyn TestBody>,\n  }\n  static CONTEXT: OnceLock<Mutex<Option<Context>>> = OnceLock::new();\n  *CONTEXT.get_or_init(|| Mutex::new(None)).lock().await = Some(Context {\n    pending_coordinator_compositions: Mutex::new(coordinator_compositions),\n    handles_and_keys: coordinators,\n    test_body: Box::new(test_body),\n  });\n\n  // The DockerOperations from the first invocation, containing the Message Queue servers and the\n  // Serai nodes.\n  static OUTER_OPS: OnceLock<Mutex<Option<DockerOperations>>> = OnceLock::new();\n\n  // Reset OUTER_OPS\n  *OUTER_OPS.get_or_init(|| Mutex::new(None)).lock().await = None;\n\n  // Spawns a coordinator, if one has yet to be spawned, or else runs the test.\n  async fn spawn_coordinator_or_run_test(inner_ops: DockerOperations) {\n    // If the outer operations have yet to be set, these *are* the outer operations\n    let outer_ops = OUTER_OPS.get().unwrap();\n    if outer_ops.lock().await.is_none() {\n      *outer_ops.lock().await = Some(inner_ops);\n    }\n\n    let context_lock = CONTEXT.get().unwrap().lock().await;\n    let Context { pending_coordinator_compositions, handles_and_keys: coordinators, test_body } =\n      context_lock.as_ref().unwrap();\n\n    // Check if there is a coordinator left\n    let maybe_coordinator = {\n      let mut remaining = pending_coordinator_compositions.lock().await;\n      let maybe_coordinator = if !remaining.is_empty() {\n        let handles = coordinators[coordinators.len() - remaining.len()].0.clone();\n        let composition = remaining.remove(0);\n        Some((composition, handles))\n      } else {\n        None\n      };\n      drop(remaining);\n      maybe_coordinator\n    };\n\n    if let Some((mut composition, handles)) = maybe_coordinator {\n      let network = {\n        let outer_ops = outer_ops.lock().await;\n        let outer_ops = outer_ops.as_ref().unwrap();\n        // Spawn it by building another DockerTest which recursively calls this function\n        // TODO: Spawn this outside of DockerTest so we can remove the recursion\n        let serai_container = outer_ops.handle(&handles.serai);\n        composition.modify_env(\"SERAI_HOSTNAME\", serai_container.ip());\n        let message_queue_container = outer_ops.handle(&handles.message_queue);\n        composition.modify_env(\"MESSAGE_QUEUE_RPC\", message_queue_container.ip());\n\n        format!(\"container:{}\", serai_container.name())\n      };\n      let mut test = DockerTest::new().with_network(dockertest::Network::External(network));\n      test.provide_container(composition);\n\n      drop(context_lock);\n      fn recurse(ops: DockerOperations) -> core::pin::Pin<Box<impl Send + Future<Output = ()>>> {\n        Box::pin(spawn_coordinator_or_run_test(ops))\n      }\n      test.run_async(recurse).await;\n    } else {\n      let outer_ops = outer_ops.lock().await.take().unwrap();\n\n      // Wait for the Serai node to boot, and for the Tendermint chain to get past the first block\n      // TODO: Replace this with a Coordinator RPC we can query\n      tokio::time::sleep(Duration::from_secs(60)).await;\n\n      // Connect to the Message Queues as the processor\n      let mut processors: Vec<Processor> = vec![];\n      for (i, (handles, key)) in coordinators.iter().enumerate() {\n        processors.push(\n          Processor::new(\n            i.try_into().unwrap(),\n            ExternalNetworkId::Bitcoin,\n            &outer_ops,\n            handles.clone(),\n            *key,\n          )\n          .await,\n        );\n      }\n\n      test_body.body(processors).await;\n    }\n  }\n\n  test.run_async(spawn_coordinator_or_run_test).await;\n}\n\n// TODO: Don't use a pessimistic sleep\n// Use an RPC to enaluate if a condition was met, with the following time being a timeout\n// https://github.com/serai-dex/serai/issues/340\npub(crate) async fn wait_for_tributary() {\n  tokio::time::sleep(Duration::from_secs(15)).await;\n  if std::env::var(\"GITHUB_CI\") == Ok(\"true\".to_string()) {\n    tokio::time::sleep(Duration::from_secs(6)).await;\n  }\n}\n"
  },
  {
    "path": "tests/coordinator/src/tests/rotation.rs",
    "content": "use tokio::time::{sleep, Duration};\n\nuse ciphersuite_kp256::Secp256k1;\n\nuse serai_client::{\n  primitives::{insecure_pair_from_name, NetworkId},\n  validator_sets::{\n    self,\n    primitives::{Session, ValidatorSet},\n    ValidatorSetsEvent,\n  },\n  Amount, Pair, Transaction,\n};\n\nuse crate::{*, tests::*};\n\n// TODO: This is duplicated with serai-client's tests\nasync fn publish_tx(serai: &Serai, tx: &Transaction) -> [u8; 32] {\n  let mut latest = serai\n    .block(serai.latest_finalized_block_hash().await.unwrap())\n    .await\n    .unwrap()\n    .unwrap()\n    .number();\n\n  serai.publish(tx).await.unwrap();\n\n  // Get the block it was included in\n  // TODO: Add an RPC method for this/check the guarantee on the subscription\n  let mut ticks = 0;\n  loop {\n    latest += 1;\n\n    let block = {\n      let mut block;\n      while {\n        block = serai.finalized_block_by_number(latest).await.unwrap();\n        block.is_none()\n      } {\n        sleep(Duration::from_secs(1)).await;\n        ticks += 1;\n\n        if ticks > 60 {\n          panic!(\"60 seconds without inclusion in a finalized block\");\n        }\n      }\n      block.unwrap()\n    };\n\n    for transaction in &block.transactions {\n      if transaction == tx {\n        return block.hash();\n      }\n    }\n  }\n}\n\n#[allow(dead_code)]\nasync fn allocate_stake(\n  serai: &Serai,\n  network: NetworkId,\n  amount: Amount,\n  pair: &Pair,\n  nonce: u32,\n) -> [u8; 32] {\n  // get the call\n  let tx =\n    serai.sign(pair, validator_sets::SeraiValidatorSets::allocate(network, amount), nonce, 0);\n  publish_tx(serai, &tx).await\n}\n\n#[allow(dead_code)]\nasync fn deallocate_stake(\n  serai: &Serai,\n  network: NetworkId,\n  amount: Amount,\n  pair: &Pair,\n  nonce: u32,\n) -> [u8; 32] {\n  // get the call\n  let tx =\n    serai.sign(pair, validator_sets::SeraiValidatorSets::deallocate(network, amount), nonce, 0);\n  publish_tx(serai, &tx).await\n}\n\nasync fn get_session(serai: &Serai, network: NetworkId) -> Session {\n  serai\n    .as_of_latest_finalized_block()\n    .await\n    .unwrap()\n    .validator_sets()\n    .session(network)\n    .await\n    .unwrap()\n    .unwrap()\n}\n\nasync fn wait_till_session_1(serai: &Serai, network: NetworkId) {\n  let mut current_session = get_session(serai, network).await;\n\n  while current_session.0 < 1 {\n    sleep(Duration::from_secs(6)).await;\n    current_session = get_session(serai, network).await;\n  }\n}\n\nasync fn most_recent_new_set_event(serai: &Serai, network: NetworkId) -> ValidatorSetsEvent {\n  let mut current_block = serai.latest_finalized_block().await.unwrap();\n  loop {\n    let events = serai.as_of(current_block.hash()).validator_sets().new_set_events().await.unwrap();\n    for event in events {\n      match event {\n        ValidatorSetsEvent::NewSet { set } => {\n          if set.network == network {\n            return event;\n          }\n        }\n        _ => panic!(\"new_set_events gave non-NewSet event: {event:?}\"),\n      }\n    }\n    current_block = serai.block(current_block.header.parent_hash.0).await.unwrap().unwrap();\n  }\n}\n\n#[tokio::test]\nasync fn set_rotation_test() {\n  new_test(\n    |mut processors: Vec<Processor>| async move {\n      // exclude the last processor from keygen since we will add him later\n      let mut excluded = processors.pop().unwrap();\n      assert_eq!(processors.len(), COORDINATORS);\n\n      // excluded participant\n      let pair5 = insecure_pair_from_name(\"Eve\");\n      let network = ExternalNetworkId::Bitcoin;\n      let amount = Amount(1_000_000 * 10_u64.pow(8));\n      let serai = processors[0].serai().await;\n\n      // allocate now for the last participant so that it is guaranteed to be included into session\n      // 1 set. This doesn't affect the genesis set at all since that is a predetermined set.\n      allocate_stake(&serai, network.into(), amount, &pair5, 0).await;\n\n      // genesis keygen\n      let _ = key_gen::<Secp256k1>(&mut processors, Session(0)).await;\n      // Even the excluded processor should receive the key pair confirmation\n      match excluded.recv_message().await {\n        CoordinatorMessage::Substrate(\n          messages::substrate::CoordinatorMessage::ConfirmKeyPair { session, .. },\n        ) => assert_eq!(session, Session(0)),\n        _ => panic!(\"excluded got message other than ConfirmKeyPair\"),\n      }\n\n      // wait until next session to see the effect on coordinator\n      wait_till_session_1(&serai, network.into()).await;\n\n      // Ensure the new validator was included in the new set\n      assert_eq!(\n        most_recent_new_set_event(&serai, network.into()).await,\n        ValidatorSetsEvent::NewSet {\n          set: ValidatorSet { session: Session(1), network: network.into() }\n        },\n      );\n\n      // add the last participant & do the keygen\n      processors.push(excluded);\n      let _ = key_gen::<Secp256k1>(&mut processors, Session(1)).await;\n    },\n    true,\n  )\n  .await;\n}\n"
  },
  {
    "path": "tests/coordinator/src/tests/sign.rs",
    "content": "use std::{\n  time::Duration,\n  collections::{HashSet, HashMap},\n};\n\nuse rand_core::{RngCore, OsRng};\n\nuse ciphersuite_kp256::Secp256k1;\n\nuse dkg::Participant;\n\nuse serai_client::{\n  coins::{\n    primitives::{OutInstruction, OutInstructionWithBalance},\n    CoinsEvent,\n  },\n  in_instructions::primitives::{Batch, InInstruction, InInstructionWithBalance},\n  primitives::{\n    insecure_pair_from_name, Amount, Balance, BlockHash, Coin, ExternalAddress, ExternalBalance,\n    ExternalCoin, SeraiAddress,\n  },\n  validator_sets::primitives::Session,\n  PairTrait, SeraiCoins,\n};\nuse messages::{coordinator::PlanMeta, sign::SignId, SubstrateContext, CoordinatorMessage};\n\nuse crate::tests::*;\n\npub async fn sign(\n  processors: &mut [Processor],\n  processor_is: &[u8],\n  session: Session,\n  plan_id: [u8; 32],\n) {\n  let id = SignId { session, id: plan_id, attempt: 0 };\n\n  // Select a random participant to exclude, so we know for sure who *is* participating\n  assert_eq!(COORDINATORS - THRESHOLD, 1);\n  let excluded_signer =\n    usize::try_from(OsRng.next_u64() % u64::try_from(processors.len()).unwrap()).unwrap();\n  for (i, processor) in processors.iter_mut().enumerate() {\n    if i == excluded_signer {\n      continue;\n    }\n\n    processor\n      .send_message(messages::sign::ProcessorMessage::Preprocess {\n        id: id.clone(),\n        preprocesses: vec![vec![processor_is[i]; 128]],\n      })\n      .await;\n  }\n  // Before this plan is signed, the Tributary will agree the triggering Substrate block occurred,\n  // adding an extra step of latency\n  wait_for_tributary().await;\n  wait_for_tributary().await;\n\n  // Send from the excluded signer so they don't stay stuck\n  processors[excluded_signer]\n    .send_message(messages::sign::ProcessorMessage::Preprocess {\n      id: id.clone(),\n      preprocesses: vec![vec![processor_is[excluded_signer]; 128]],\n    })\n    .await;\n\n  // Read from a known signer to find out who was selected to sign\n  let known_signer = (excluded_signer + 1) % COORDINATORS;\n  let participants = match processors[known_signer].recv_message().await {\n    CoordinatorMessage::Sign(messages::sign::CoordinatorMessage::Preprocesses {\n      id: this_id,\n      preprocesses,\n    }) => {\n      assert_eq!(&id, &this_id);\n      assert_eq!(preprocesses.len(), THRESHOLD - 1);\n      let known_signer_i = Participant::new(u16::from(processor_is[known_signer])).unwrap();\n      assert!(!preprocesses.contains_key(&known_signer_i));\n\n      let mut participants = preprocesses.keys().copied().collect::<HashSet<_>>();\n      for (p, preprocess) in preprocesses {\n        assert_eq!(preprocess, vec![u8::try_from(u16::from(p)).unwrap(); 128]);\n      }\n      participants.insert(known_signer_i);\n      participants\n    }\n    _ => panic!(\"coordinator didn't send back Preprocesses\"),\n  };\n\n  for i in participants.clone() {\n    if u16::from(i) == u16::from(processor_is[known_signer]) {\n      continue;\n    }\n\n    let processor =\n      &mut processors[processor_is.iter().position(|p_i| u16::from(*p_i) == u16::from(i)).unwrap()];\n    let mut preprocesses = participants\n      .clone()\n      .into_iter()\n      .map(|i| (i, vec![u8::try_from(u16::from(i)).unwrap(); 128]))\n      .collect::<HashMap<_, _>>();\n    preprocesses.remove(&i);\n\n    assert_eq!(\n      processor.recv_message().await,\n      CoordinatorMessage::Sign(messages::sign::CoordinatorMessage::Preprocesses {\n        id: id.clone(),\n        preprocesses\n      })\n    );\n  }\n\n  for i in participants.clone() {\n    let processor =\n      &mut processors[processor_is.iter().position(|p_i| u16::from(*p_i) == u16::from(i)).unwrap()];\n    processor\n      .send_message(messages::sign::ProcessorMessage::Share {\n        id: id.clone(),\n        shares: vec![vec![u8::try_from(u16::from(i)).unwrap(); 32]],\n      })\n      .await;\n  }\n  wait_for_tributary().await;\n  for i in participants.clone() {\n    let processor =\n      &mut processors[processor_is.iter().position(|p_i| u16::from(*p_i) == u16::from(i)).unwrap()];\n    let mut shares = participants\n      .clone()\n      .into_iter()\n      .map(|i| (i, vec![u8::try_from(u16::from(i)).unwrap(); 32]))\n      .collect::<HashMap<_, _>>();\n    shares.remove(&i);\n\n    assert_eq!(\n      processor.recv_message().await,\n      CoordinatorMessage::Sign(messages::sign::CoordinatorMessage::Shares {\n        id: id.clone(),\n        shares,\n      })\n    );\n  }\n\n  // Send Completed\n  for i in participants.clone() {\n    let processor =\n      &mut processors[processor_is.iter().position(|p_i| u16::from(*p_i) == u16::from(i)).unwrap()];\n    processor\n      .send_message(messages::sign::ProcessorMessage::Completed {\n        session,\n        id: id.id,\n        tx: b\"signed_tx\".to_vec(),\n      })\n      .await;\n  }\n  wait_for_tributary().await;\n\n  // Make sure every processor gets Completed\n  for processor in processors {\n    assert_eq!(\n      processor.recv_message().await,\n      CoordinatorMessage::Sign(messages::sign::CoordinatorMessage::Completed {\n        session,\n        id: id.id,\n        tx: b\"signed_tx\".to_vec()\n      })\n    );\n  }\n}\n\n#[tokio::test]\nasync fn sign_test() {\n  new_test(\n    |mut processors: Vec<Processor>| async move {\n      // pop the last participant since genesis keygen has only 4 participant.\n      processors.pop().unwrap();\n      assert_eq!(processors.len(), COORDINATORS);\n\n      let (participant_is, substrate_key, _) =\n        key_gen::<Secp256k1>(&mut processors, Session(0)).await;\n\n      // 'Send' external coins into Serai\n      let serai = processors[0].serai().await;\n      let (serai_pair, serai_addr) = {\n        let mut name = [0; 4];\n        OsRng.fill_bytes(&mut name);\n        let pair = insecure_pair_from_name(&hex::encode(name));\n        let address = SeraiAddress::from(pair.public());\n\n        // Fund the new account to pay for fees\n        let balance = Balance { coin: Coin::Serai, amount: Amount(1_000_000_000) };\n        serai\n          .publish(&serai.sign(\n            &insecure_pair_from_name(\"Ferdie\"),\n            SeraiCoins::transfer(address, balance),\n            0,\n            Default::default(),\n          ))\n          .await\n          .unwrap();\n\n        (pair, address)\n      };\n\n      #[allow(clippy::inconsistent_digit_grouping)]\n      let amount = Amount(1_000_000_00);\n      let balance = ExternalBalance { coin: ExternalCoin::Bitcoin, amount };\n\n      let coin_block = BlockHash([0x33; 32]);\n      let block_included_in = batch(\n        &mut processors,\n        &participant_is,\n        Session(0),\n        &substrate_key,\n        Batch {\n          network: balance.coin.network(),\n          id: 0,\n          block: coin_block,\n          instructions: vec![InInstructionWithBalance {\n            instruction: InInstruction::Transfer(serai_addr),\n            balance,\n          }],\n        },\n      )\n      .await;\n\n      {\n        let block_included_in_hash =\n          serai.finalized_block_by_number(block_included_in).await.unwrap().unwrap().hash();\n\n        let serai = serai.as_of(block_included_in_hash);\n        let serai = serai.coins();\n        assert_eq!(\n          serai.coin_balance(Coin::Serai, serai_addr).await.unwrap(),\n          Amount(1_000_000_000)\n        );\n\n        // Verify the mint occurred as expected\n        assert_eq!(\n          serai.mint_events().await.unwrap(),\n          vec![CoinsEvent::Mint { to: serai_addr, balance: balance.into() }]\n        );\n        assert_eq!(serai.coin_supply(ExternalCoin::Bitcoin.into()).await.unwrap(), amount);\n        assert_eq!(\n          serai.coin_balance(ExternalCoin::Bitcoin.into(), serai_addr).await.unwrap(),\n          amount\n        );\n      }\n\n      // Trigger a burn\n      let out_instruction = OutInstructionWithBalance {\n        balance,\n        instruction: OutInstruction {\n          address: ExternalAddress::new(b\"external\".to_vec()).unwrap(),\n          data: None,\n        },\n      };\n      serai\n        .publish(&serai.sign(\n          &serai_pair,\n          SeraiCoins::burn_with_instruction(out_instruction.clone()),\n          0,\n          Default::default(),\n        ))\n        .await\n        .unwrap();\n\n      // TODO: We *really* need a helper for this pattern\n      let mut last_serai_block = block_included_in;\n      'outer: for _ in 0 .. 20 {\n        tokio::time::sleep(Duration::from_secs(6)).await;\n        if std::env::var(\"GITHUB_CI\") == Ok(\"true\".to_string()) {\n          tokio::time::sleep(Duration::from_secs(6)).await;\n        }\n\n        while last_serai_block <= serai.latest_finalized_block().await.unwrap().number() {\n          let burn_events = serai\n            .as_of(serai.finalized_block_by_number(last_serai_block).await.unwrap().unwrap().hash())\n            .coins()\n            .burn_with_instruction_events()\n            .await\n            .unwrap();\n\n          if !burn_events.is_empty() {\n            assert_eq!(burn_events.len(), 1);\n            assert_eq!(\n              burn_events[0],\n              CoinsEvent::BurnWithInstruction {\n                from: serai_addr,\n                instruction: out_instruction.clone()\n              }\n            );\n            break 'outer;\n          }\n          last_serai_block += 1;\n        }\n      }\n\n      let last_serai_block =\n        serai.finalized_block_by_number(last_serai_block).await.unwrap().unwrap();\n      let last_serai_block_hash = last_serai_block.hash();\n      let serai = serai.as_of(last_serai_block_hash);\n      let serai = serai.coins();\n      assert_eq!(serai.coin_supply(ExternalCoin::Bitcoin.into()).await.unwrap(), Amount(0));\n      assert_eq!(\n        serai.coin_balance(ExternalCoin::Bitcoin.into(), serai_addr).await.unwrap(),\n        Amount(0)\n      );\n\n      let mut plan_id = [0; 32];\n      OsRng.fill_bytes(&mut plan_id);\n      let plan_id = plan_id;\n\n      // We should now get a SubstrateBlock\n      for processor in &mut processors {\n        assert_eq!(\n          processor.recv_message().await,\n          messages::CoordinatorMessage::Substrate(\n            messages::substrate::CoordinatorMessage::SubstrateBlock {\n              context: SubstrateContext {\n                serai_time: last_serai_block.time().unwrap() / 1000,\n                network_latest_finalized_block: coin_block,\n              },\n              block: last_serai_block.number(),\n              burns: vec![out_instruction.clone()],\n              batches: vec![],\n            }\n          )\n        );\n\n        // Send the ACK, claiming there's a plan to sign\n        processor\n          .send_message(messages::ProcessorMessage::Coordinator(\n            messages::coordinator::ProcessorMessage::SubstrateBlockAck {\n              block: last_serai_block.number(),\n              plans: vec![PlanMeta { session: Session(0), id: plan_id }],\n            },\n          ))\n          .await;\n      }\n\n      sign(&mut processors, &participant_is, Session(0), plan_id).await;\n    },\n    false,\n  )\n  .await;\n}\n"
  },
  {
    "path": "tests/docker/Cargo.toml",
    "content": "[package]\nname = \"serai-docker-tests\"\nversion = \"0.1.0\"\ndescription = \"Docker-based testing infrastructure for Serai\"\nlicense = \"AGPL-3.0-only\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/tests/docker\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nkeywords = []\nedition = \"2021\"\npublish = false\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nchrono = \"0.4\"\n"
  },
  {
    "path": "tests/docker/LICENSE",
    "content": "AGPL-3.0-only license\n\nCopyright (c) 2023 Luke Parker\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License Version 3 as\npublished by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n"
  },
  {
    "path": "tests/docker/README.md",
    "content": "# Docker Tests\n\nTest infrastructure based around Docker.\n"
  },
  {
    "path": "tests/docker/src/lib.rs",
    "content": "use std::{\n  sync::{Mutex, OnceLock},\n  collections::{HashSet, HashMap},\n  time::SystemTime,\n  path::PathBuf,\n  fs, env,\n  process::Command,\n};\n\npub fn fresh_logs_folder(first: bool, label: &str) -> String {\n  let logs_path = [std::env::current_dir().unwrap().to_str().unwrap(), \".test-logs\", label]\n    .iter()\n    .collect::<std::path::PathBuf>();\n  if first {\n    let _ = fs::remove_dir_all(&logs_path);\n    fs::create_dir_all(&logs_path).expect(\"couldn't create logs directory\");\n    assert!(\n      fs::read_dir(&logs_path).expect(\"couldn't read the logs folder\").next().is_none(),\n      \"logs folder wasn't empty, despite removing it at the start of the run\",\n    );\n  }\n  logs_path.to_str().unwrap().to_string()\n}\n\n// TODO: Merge this with what's in serai-orchestrator/have serai-orchestrator perform building\nstatic BUILT: OnceLock<Mutex<HashMap<String, bool>>> = OnceLock::new();\npub fn build(name: String) {\n  let built = BUILT.get_or_init(|| Mutex::new(HashMap::new()));\n  // Only one call to build will acquire this lock\n  let mut built_lock = built.lock().unwrap();\n  if built_lock.contains_key(&name) {\n    // If it was built, return\n    return;\n  }\n\n  // Else, hold the lock while we build\n  let mut repo_path = env::current_exe().unwrap();\n  repo_path.pop();\n  assert!(repo_path.as_path().ends_with(\"deps\"));\n  repo_path.pop();\n  assert!(repo_path.as_path().ends_with(\"debug\"));\n  repo_path.pop();\n  assert!(repo_path.as_path().ends_with(\"target\"));\n  repo_path.pop();\n\n  // Run the orchestrator to ensure the most recent files exist\n  if !Command::new(\"cargo\")\n    .current_dir(&repo_path)\n    .arg(\"run\")\n    .arg(\"-p\")\n    .arg(\"serai-orchestrator\")\n    .arg(\"--\")\n    .arg(\"key_gen\")\n    .arg(\"dev\")\n    .spawn()\n    .unwrap()\n    .wait()\n    .unwrap()\n    .success()\n  {\n    panic!(\"failed to run the orchestrator\");\n  }\n\n  if !Command::new(\"cargo\")\n    .current_dir(&repo_path)\n    .arg(\"run\")\n    .arg(\"-p\")\n    .arg(\"serai-orchestrator\")\n    .arg(\"--\")\n    .arg(\"setup\")\n    .arg(\"dev\")\n    .spawn()\n    .unwrap()\n    .wait()\n    .unwrap()\n    .success()\n  {\n    panic!(\"failed to run the orchestrator\");\n  }\n\n  let mut orchestration_path = repo_path.clone();\n  orchestration_path.push(\"orchestration\");\n  if name != \"runtime\" {\n    orchestration_path.push(\"dev\");\n  }\n\n  let mut dockerfile_path = orchestration_path.clone();\n  if HashSet::from([\"bitcoin\", \"ethereum\", \"ethereum-relayer\", \"monero\"]).contains(name.as_str()) {\n    dockerfile_path = dockerfile_path.join(\"networks\");\n  }\n  if name.contains(\"-processor\") {\n    dockerfile_path =\n      dockerfile_path.join(\"processor\").join(name.split('-').next().unwrap()).join(\"Dockerfile\");\n  } else if name == \"serai-fast-epoch\" {\n    dockerfile_path = dockerfile_path.join(\"serai\").join(\"Dockerfile.fast-epoch\");\n  } else {\n    dockerfile_path = dockerfile_path.join(&name).join(\"Dockerfile\");\n  }\n\n  // If this Docker image was created after this repo was last edited, return here\n  // This should have better performance than Docker and allows running while offline\n  if let Ok(res) = Command::new(\"docker\")\n    .arg(\"inspect\")\n    .arg(\"-f\")\n    .arg(\"{{ .Metadata.LastTagTime }}\")\n    .arg(format!(\"serai-dev-{name}\"))\n    .output()\n  {\n    let last_tag_time_buf = String::from_utf8(res.stdout).expect(\"docker had non-utf8 output\");\n    let last_tag_time = last_tag_time_buf.trim();\n    if !last_tag_time.is_empty() {\n      let created_time = SystemTime::from(\n        chrono::DateTime::parse_and_remainder(last_tag_time, \"%F %T.%f %z\")\n          .unwrap_or_else(|_| {\n            panic!(\"docker formatted last tag time unexpectedly: {last_tag_time}\")\n          })\n          .0,\n      );\n\n      // For all services, if the Dockerfile was edited after the image was built we should rebuild\n      let mut last_modified =\n        fs::metadata(&dockerfile_path).ok().and_then(|meta| meta.modified().ok());\n\n      // Check any additionally specified paths\n      let meta = |path: PathBuf| (path.clone(), fs::metadata(path));\n      let mut metadatas = match name.as_str() {\n        \"bitcoin\" | \"ethereum\" | \"monero\" => vec![],\n        \"ethereum-relayer\" => {\n          vec![meta(repo_path.join(\"common\")), meta(repo_path.join(\"networks\"))]\n        }\n        \"message-queue\" => vec![\n          meta(repo_path.join(\"common\")),\n          meta(repo_path.join(\"crypto\")),\n          meta(repo_path.join(\"substrate\").join(\"primitives\")),\n          meta(repo_path.join(\"message-queue\")),\n        ],\n        \"bitcoin-processor\" | \"ethereum-processor\" | \"monero-processor\" => vec![\n          meta(repo_path.join(\"common\")),\n          meta(repo_path.join(\"crypto\")),\n          meta(repo_path.join(\"networks\")),\n          meta(repo_path.join(\"substrate\")),\n          meta(repo_path.join(\"message-queue\")),\n          meta(repo_path.join(\"processor\")),\n        ],\n        \"coordinator\" => vec![\n          meta(repo_path.join(\"common\")),\n          meta(repo_path.join(\"crypto\")),\n          meta(repo_path.join(\"networks\")),\n          meta(repo_path.join(\"substrate\")),\n          meta(repo_path.join(\"message-queue\")),\n          meta(repo_path.join(\"coordinator\")),\n        ],\n        \"runtime\" | \"serai\" | \"serai-fast-epoch\" => vec![\n          meta(repo_path.join(\"common\")),\n          meta(repo_path.join(\"crypto\")),\n          meta(repo_path.join(\"substrate\")),\n        ],\n        _ => panic!(\"building unrecognized docker image\"),\n      };\n\n      while !metadatas.is_empty() {\n        if let (path, Ok(metadata)) = metadatas.pop().unwrap() {\n          if metadata.is_file() {\n            if let Ok(modified) = metadata.modified() {\n              if modified >\n                last_modified\n                  .expect(\"got when source was last modified yet not when the Dockerfile was\")\n              {\n                last_modified = Some(modified);\n              }\n            }\n          } else {\n            // Recursively crawl since we care when the folder's contents were edited, not the\n            // folder itself\n            for entry in fs::read_dir(path.clone()).expect(\"couldn't read directory\") {\n              metadatas.push(meta(\n                path.join(entry.expect(\"couldn't access item in directory\").file_name()),\n              ));\n            }\n          }\n        }\n      }\n\n      if let Some(last_modified) = last_modified {\n        if last_modified < created_time {\n          println!(\"{name} was built after the most recent source code edits, assuming built.\");\n          built_lock.insert(name, true);\n          return;\n        }\n      }\n    }\n  }\n\n  println!(\"Building {}...\", &name);\n\n  // Version which always prints\n  if !Command::new(\"docker\")\n    .current_dir(&repo_path)\n    .arg(\"build\")\n    .arg(\"-f\")\n    .arg(dockerfile_path)\n    .arg(\".\")\n    .arg(\"-t\")\n    .arg(format!(\"serai-dev-{name}\"))\n    .spawn()\n    .unwrap()\n    .wait()\n    .unwrap()\n    .success()\n  {\n    panic!(\"failed to build {name}\");\n  }\n\n  // Version which only prints on error\n  /*\n  let res = Command::new(\"docker\")\n    .current_dir(dockerfile_path)\n    .arg(\"build\")\n    .arg(\".\")\n    .arg(\"-t\")\n    .arg(format!(\"serai-dev-{name}\"))\n    .output()\n    .unwrap();\n  if !res.status.success() {\n    println!(\"failed to build {name}\\n\");\n    println!(\"-- stdout --\");\n    println!(\n      \"{}\\r\\n\",\n      String::from_utf8(res.stdout)\n        .unwrap_or_else(|_| \"stdout had non-utf8 characters\".to_string())\n    );\n    println!(\"-- stderr --\");\n    println!(\n      \"{}\\r\\n\",\n      String::from_utf8(res.stderr)\n        .unwrap_or_else(|_| \"stderr had non-utf8 characters\".to_string())\n    );\n    panic!(\"failed to build {name}\");\n  }\n  */\n\n  println!(\"Built!\");\n\n  if std::env::var(\"GITHUB_CI\").is_ok() {\n    println!(\"In CI, so clearing cache to prevent hitting the storage limits.\");\n    if !Command::new(\"docker\")\n      .arg(\"builder\")\n      .arg(\"prune\")\n      .arg(\"--all\")\n      .arg(\"--force\")\n      .output()\n      .unwrap()\n      .status\n      .success()\n    {\n      println!(\"failed to clear cache after building {name}\\n\");\n    }\n  }\n\n  // Set built\n  built_lock.insert(name, true);\n}\n"
  },
  {
    "path": "tests/full-stack/Cargo.toml",
    "content": "[package]\nname = \"serai-full-stack-tests\"\nversion = \"0.1.0\"\ndescription = \"Tests for Serai's Full Stack\"\nlicense = \"AGPL-3.0-only\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/tests/full-stack\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nkeywords = []\nedition = \"2021\"\npublish = false\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nhex = \"0.4\"\n\nasync-trait = \"0.1\"\n\nzeroize = { version = \"1\", default-features = false }\nrand_core = { version = \"0.6\", default-features = false }\n\ncurve25519-dalek = { version = \"4\", features = [\"rand_core\"] }\n\nbitcoin-serai = { path = \"../../networks/bitcoin\" }\nmonero-simple-request-rpc = { git = \"https://github.com/monero-oxide/monero-oxide\", rev = \"32e6b5fe5ba9e1ea3e68da882550005122a11d22\" }\nmonero-wallet = { git = \"https://github.com/monero-oxide/monero-oxide\", rev = \"32e6b5fe5ba9e1ea3e68da882550005122a11d22\" }\n\nscale = { package = \"parity-scale-codec\", version = \"3\" }\nserde = \"1\"\nserde_json = \"1\"\n\nprocessor = { package = \"serai-processor\", path = \"../../processor\", features = [\"bitcoin\", \"monero\"] }\n\nserai-client = { path = \"../../substrate/client\", features = [\"serai\"] }\n\ntokio = { version = \"1\", features = [\"time\"] }\n\ndockertest = \"0.5\"\nserai-docker-tests = { path = \"../docker\" }\nserai-message-queue-tests = { path = \"../message-queue\" }\nserai-processor-tests = { path = \"../processor\" }\nserai-coordinator-tests = { path = \"../coordinator\" }\n"
  },
  {
    "path": "tests/full-stack/LICENSE",
    "content": "AGPL-3.0-only license\n\nCopyright (c) 2023 Luke Parker\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License Version 3 as\npublished by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n"
  },
  {
    "path": "tests/full-stack/src/lib.rs",
    "content": "use std::time::Duration;\n\nuse serai_client::Serai;\n\nuse dockertest::DockerOperations;\n\nuse serai_processor_tests::{RPC_USER, RPC_PASS};\n\n#[cfg(test)]\nmod tests;\n\n#[allow(unused)]\n#[derive(Clone, Debug)]\npub struct Handles {\n  bitcoin: (String, u32),\n  bitcoin_processor: String,\n  monero: (String, u32),\n  monero_processor: String,\n  message_queue: String,\n  serai: String,\n}\n\nimpl Handles {\n  pub async fn serai(&self, ops: &DockerOperations) -> Serai {\n    let serai_rpc = ops.handle(&self.serai).host_port(9944).unwrap();\n    let serai_rpc = format!(\"http://{}:{}\", serai_rpc.0, serai_rpc.1);\n\n    // If the RPC server has yet to start, sleep for up to 60s until it does\n    for _ in 0 .. 60 {\n      tokio::time::sleep(Duration::from_secs(1)).await;\n      let Ok(client) = Serai::new(serai_rpc.clone()).await else { continue };\n      if client.latest_finalized_block_hash().await.is_err() {\n        continue;\n      }\n      return client;\n    }\n    panic!(\"serai RPC server wasn't available after 60s\");\n  }\n\n  pub async fn bitcoin(&self, ops: &DockerOperations) -> bitcoin_serai::rpc::Rpc {\n    let rpc = ops.handle(&self.bitcoin.0).host_port(self.bitcoin.1).unwrap();\n    let rpc = format!(\"http://{RPC_USER}:{RPC_PASS}@{}:{}\", rpc.0, rpc.1);\n\n    // If the RPC server has yet to start, sleep for up to 60s until it does\n    for _ in 0 .. 60 {\n      tokio::time::sleep(Duration::from_secs(1)).await;\n      let Ok(client) = bitcoin_serai::rpc::Rpc::new(rpc.clone()).await else { continue };\n      return client;\n    }\n    panic!(\"bitcoin RPC server wasn't available after 60s\");\n  }\n\n  pub async fn monero(\n    &self,\n    ops: &DockerOperations,\n  ) -> monero_simple_request_rpc::SimpleRequestRpc {\n    use monero_simple_request_rpc::SimpleRequestRpc;\n    use monero_wallet::rpc::Rpc;\n\n    let rpc = ops.handle(&self.monero.0).host_port(self.monero.1).unwrap();\n    let rpc = format!(\"http://{RPC_USER}:{RPC_PASS}@{}:{}\", rpc.0, rpc.1);\n\n    // If the RPC server has yet to start, sleep for up to 60s until it does\n    for _ in 0 .. 60 {\n      tokio::time::sleep(Duration::from_secs(1)).await;\n      let Ok(client) = SimpleRequestRpc::new(rpc.clone()).await else { continue };\n      if client.get_height().await.is_err() {\n        continue;\n      }\n      return client;\n    }\n    panic!(\"monero RPC server wasn't available after 60s\");\n  }\n}\n"
  },
  {
    "path": "tests/full-stack/src/tests/mint_and_burn.rs",
    "content": "use std::{\n  sync::{OnceLock, Arc, Mutex},\n  time::{Duration, Instant},\n};\n\nuse zeroize::Zeroizing;\nuse rand_core::{RngCore, OsRng};\n\nuse scale::Encode;\n\nuse serai_client::{\n  coins::primitives::{OutInstruction, OutInstructionWithBalance},\n  in_instructions::primitives::Shorthand,\n  primitives::{\n    insecure_pair_from_name, Amount, Balance, Coin, ExternalAddress, ExternalBalance, ExternalCoin,\n    SeraiAddress,\n  },\n  validator_sets::primitives::{ExternalValidatorSet, Session},\n  PairTrait, SeraiCoins,\n};\n\nuse crate::tests::*;\n\n// TODO: Break this test out into functions re-usable across processor, processor e2e, and full\n// stack tests\n#[tokio::test]\nasync fn mint_and_burn_test() {\n  new_test(|ops, handles: Vec<Handles>| async move {\n    let ops = Arc::new(ops);\n    let serai = handles[0].serai(&ops).await;\n\n    // Helper to mine a block on each network\n    async fn mine_blocks(\n      handles: &[Handles],\n      ops: &DockerOperations,\n      producer: &mut usize,\n      count: usize,\n    ) {\n      static MINE_BLOCKS_CALL: OnceLock<tokio::sync::Mutex<()>> = OnceLock::new();\n\n      // Only let one instance of this function run at a time\n      let _lock = MINE_BLOCKS_CALL.get_or_init(|| tokio::sync::Mutex::new(())).lock().await;\n\n      // Pick a block producer via a round robin\n      let producer_handles = &handles[*producer];\n      *producer += 1;\n      *producer %= handles.len();\n\n      // Mine a Bitcoin block\n      let bitcoin_blocks = {\n        use bitcoin_serai::bitcoin::{\n          secp256k1::{SECP256K1, SecretKey},\n          PrivateKey, PublicKey,\n          consensus::Encodable,\n          network::Network,\n          address::Address,\n        };\n\n        let addr = Address::p2pkh(\n          PublicKey::from_private_key(\n            SECP256K1,\n            &PrivateKey::new(SecretKey::from_slice(&[0x01; 32]).unwrap(), Network::Bitcoin),\n          ),\n          Network::Regtest,\n        );\n\n        let rpc = producer_handles.bitcoin(ops).await;\n        let mut res = Vec::with_capacity(count);\n        for _ in 0 .. count {\n          let hash = rpc\n            .rpc_call::<Vec<String>>(\"generatetoaddress\", serde_json::json!([1, addr]))\n            .await\n            .unwrap()\n            .swap_remove(0);\n\n          let mut bytes = vec![];\n          rpc\n            .get_block(&hex::decode(hash).unwrap().try_into().unwrap())\n            .await\n            .unwrap()\n            .consensus_encode(&mut bytes)\n            .unwrap();\n          res.push(serde_json::json!([hex::encode(bytes)]));\n        }\n        res\n      };\n\n      // Mine a Monero block\n      let monero_blocks = {\n        use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, scalar::Scalar};\n        use monero_wallet::{rpc::Rpc, ViewPair, address::Network};\n\n        let addr = ViewPair::new(ED25519_BASEPOINT_POINT, Zeroizing::new(Scalar::ONE))\n          .unwrap()\n          .legacy_address(Network::Mainnet);\n\n        let rpc = producer_handles.monero(ops).await;\n        let mut res = Vec::with_capacity(count);\n        for _ in 0 .. count {\n          let block =\n            rpc.get_block(rpc.generate_blocks(&addr, 1).await.unwrap().0[0]).await.unwrap();\n\n          let mut txs = Vec::with_capacity(block.transactions.len());\n          for tx in &block.transactions {\n            txs.push(rpc.get_transaction(*tx).await.unwrap());\n          }\n          res.push((serde_json::json!([hex::encode(block.serialize())]), txs));\n        }\n        res\n      };\n\n      // Relay it to all other nodes\n      // If the producer is 0, the producer variable will be 1 since we already incremented\n      // it\n      // With 4 nodes, this will run 1 .. 4, which is the correct range\n      for receiver in *producer .. (*producer + (handles.len() - 1)) {\n        let receiver = receiver % handles.len();\n        let handles = &handles[receiver];\n\n        {\n          let rpc = handles.bitcoin(ops).await;\n          for block in &bitcoin_blocks {\n            let _: () = rpc.rpc_call(\"submitblock\", block.clone()).await.unwrap();\n          }\n        }\n\n        {\n          use monero_wallet::rpc::Rpc;\n\n          let rpc = handles.monero(ops).await;\n\n          for (block, txs) in &monero_blocks {\n            // Broadcast the Monero TXs, as they're not simply included with the block\n            for tx in txs {\n              // Ignore any errors since the TX already being present will return an error\n              let _ = rpc.publish_transaction(tx).await;\n            }\n\n            #[derive(Debug, serde::Deserialize)]\n            struct EmptyResponse {}\n            let _: EmptyResponse =\n              rpc.json_rpc_call(\"submit_block\", Some(block.clone())).await.unwrap();\n          }\n        }\n      }\n    }\n\n    // Mine blocks to create mature funds\n    mine_blocks(&handles, &ops, &mut 0, 101).await;\n\n    // Spawn a background task to mine blocks on Bitcoin/Monero\n    let keep_mining = Arc::new(Mutex::new(true));\n    {\n      let keep_mining = keep_mining.clone();\n      let existing = std::panic::take_hook();\n      std::panic::set_hook(Box::new(move |panic| {\n        // On panic, set keep_mining to false\n        if let Ok(mut keep_mining) = keep_mining.lock() {\n          *keep_mining = false;\n        } else {\n          println!(\"panic which poisoned keep_mining\");\n        }\n        existing(panic);\n      }));\n    }\n\n    let mining_task = {\n      let ops = ops.clone();\n      let handles = handles.clone();\n      let keep_mining = keep_mining.clone();\n      tokio::spawn(async move {\n        let start = Instant::now();\n        let mut producer = 0;\n        while {\n          // Ensure this is deref'd to a bool, not any permutation of the lock\n          let keep_mining: bool = *keep_mining.lock().unwrap();\n          // Bound execution to 60m\n          keep_mining && (Instant::now().duration_since(start) < Duration::from_secs(60 * 60))\n        } {\n          // Mine a block every 3s\n          tokio::time::sleep(Duration::from_secs(3)).await;\n          mine_blocks(&handles, &ops, &mut producer, 1).await;\n        }\n      })\n    };\n\n    // Get the generated keys\n    let (bitcoin_key_pair, monero_key_pair) = {\n      let key_pair = {\n        let serai = &serai;\n        move |additional, network| async move {\n          // If this is an additional key pair, it should've completed with the first barring\n          // misc latency, so only sleep up to 5 minutes\n          // If this is the first key pair, wait up to 10 minutes\n          let halt_at = if additional { 5 * 10 } else { 10 * 10 };\n          let print_at = halt_at / 2;\n          for i in 0 .. halt_at {\n            if let Some(key_pair) = serai\n              .as_of_latest_finalized_block()\n              .await\n              .unwrap()\n              .validator_sets()\n              .keys(ExternalValidatorSet { network, session: Session(0) })\n              .await\n              .unwrap()\n            {\n              return key_pair;\n            }\n\n            if i == print_at {\n              println!(\n                \"waiting for {}key gen to complete, it has been {} minutes\",\n                if additional { \"another \" } else { \"\" },\n                print_at / 10,\n              );\n            }\n            tokio::time::sleep(Duration::from_secs(6)).await;\n          }\n\n          panic!(\n            \"{}key gen did not complete within {} minutes\",\n            if additional { \"another \" } else { \"\" },\n            halt_at / 10,\n          );\n        }\n      };\n\n      (\n        key_pair(false, ExternalNetworkId::Bitcoin).await,\n        key_pair(true, ExternalNetworkId::Monero).await,\n      )\n    };\n\n    // Because the initial keys only become active when the network's time matches the Serai\n    // time, the Serai time is real yet the network time may be significantly delayed due to\n    // potentially being a median, mine a bunch of blocks now\n    mine_blocks(&handles, &ops, &mut 0, 100).await;\n\n    // Create a Serai address to receive the sriBTC/sriXMR to\n    let (serai_pair, serai_addr) = {\n      let mut name = [0; 4];\n      OsRng.fill_bytes(&mut name);\n      let pair = insecure_pair_from_name(&hex::encode(name));\n      let address = SeraiAddress::from(pair.public());\n\n      // Fund the new account to pay for fees\n      let balance = Balance { coin: Coin::Serai, amount: Amount(1_000_000_000) };\n      serai\n        .publish(&serai.sign(\n          &insecure_pair_from_name(\"Ferdie\"),\n          SeraiCoins::transfer(address, balance),\n          0,\n          Default::default(),\n        ))\n        .await\n        .unwrap();\n\n      (pair, address)\n    };\n\n    // Send in BTC\n    {\n      use bitcoin_serai::bitcoin::{\n        secp256k1::{SECP256K1, SecretKey, Message},\n        PrivateKey, PublicKey,\n        key::{XOnlyPublicKey, TweakedPublicKey},\n        sighash::{EcdsaSighashType, SighashCache},\n        script::{PushBytesBuf, Script, ScriptBuf, Builder},\n        absolute::LockTime,\n        transaction::{Version, Transaction},\n        Sequence, Witness, OutPoint, TxIn, Amount, TxOut, Network, Address,\n      };\n\n      let private_key =\n        PrivateKey::new(SecretKey::from_slice(&[0x01; 32]).unwrap(), Network::Bitcoin);\n      let public_key = PublicKey::from_private_key(SECP256K1, &private_key);\n      let addr = Address::p2pkh(public_key, Network::Bitcoin);\n\n      // Use the first block's coinbase\n      let rpc = handles[0].bitcoin(&ops).await;\n      let tx =\n        rpc.get_block(&rpc.get_block_hash(1).await.unwrap()).await.unwrap().txdata.swap_remove(0);\n      #[allow(clippy::inconsistent_digit_grouping)]\n      let mut tx = Transaction {\n        version: Version(2),\n        lock_time: LockTime::ZERO,\n        input: vec![TxIn {\n          previous_output: OutPoint { txid: tx.compute_txid(), vout: 0 },\n          script_sig: Script::new().into(),\n          sequence: Sequence(u32::MAX),\n          witness: Witness::default(),\n        }],\n        output: vec![\n          TxOut {\n            value: Amount::from_sat(1_100_000_00),\n            script_pubkey: Address::p2tr_tweaked(\n              TweakedPublicKey::dangerous_assume_tweaked(\n                XOnlyPublicKey::from_slice(&bitcoin_key_pair.1[1 ..]).unwrap(),\n              ),\n              Network::Bitcoin,\n            )\n            .script_pubkey(),\n          },\n          TxOut {\n            // change = amount spent - fee\n            value: Amount::from_sat(tx.output[0].value.to_sat() - 1_100_000_00 - 1_000_00),\n            script_pubkey: Address::p2tr_tweaked(\n              TweakedPublicKey::dangerous_assume_tweaked(\n                XOnlyPublicKey::from_slice(&public_key.inner.serialize()[1 ..]).unwrap(),\n              ),\n              Network::Bitcoin,\n            )\n            .script_pubkey(),\n          },\n          TxOut {\n            value: Amount::ZERO,\n            script_pubkey: ScriptBuf::new_op_return(\n              PushBytesBuf::try_from(Shorthand::transfer(None, serai_addr).encode()).unwrap(),\n            ),\n          },\n        ],\n      };\n\n      let mut der = SECP256K1\n        .sign_ecdsa_low_r(\n          &Message::from_digest_slice(\n            SighashCache::new(&tx)\n              .legacy_signature_hash(0, &addr.script_pubkey(), EcdsaSighashType::All.to_u32())\n              .unwrap()\n              .to_raw_hash()\n              .as_ref(),\n          )\n          .unwrap(),\n          &private_key.inner,\n        )\n        .serialize_der()\n        .to_vec();\n      der.push(1);\n      tx.input[0].script_sig = Builder::new()\n        .push_slice(PushBytesBuf::try_from(der).unwrap())\n        .push_key(&public_key)\n        .into_script();\n\n      rpc.send_raw_transaction(&tx).await.unwrap();\n    }\n\n    // Send in XMR\n    {\n      use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, scalar::Scalar};\n      use monero_wallet::{\n        io::decompress_point,\n        ringct::RctType,\n        rpc::{FeePriority, Rpc},\n        address::{Network, AddressType, MoneroAddress},\n        ViewPair, Scanner, OutputWithDecoys,\n        send::{Change, SignableTransaction},\n      };\n\n      // Grab the first output on the chain\n      let rpc = handles[0].monero(&ops).await;\n      let view_pair = ViewPair::new(ED25519_BASEPOINT_POINT, Zeroizing::new(Scalar::ONE)).unwrap();\n      let mut scanner = Scanner::new(view_pair.clone());\n      let output = scanner\n        .scan(rpc.get_scannable_block_by_number(1).await.unwrap())\n        .unwrap()\n        .additional_timelock_satisfied_by(rpc.get_height().await.unwrap(), 0)\n        .swap_remove(0);\n\n      let input = OutputWithDecoys::fingerprintable_deterministic_new(\n        &mut OsRng,\n        &rpc,\n        16,\n        rpc.get_height().await.unwrap(),\n        output.clone(),\n      )\n      .await\n      .unwrap();\n\n      let mut outgoing_view_key = Zeroizing::new([0; 32]);\n      OsRng.fill_bytes(outgoing_view_key.as_mut());\n      let tx = SignableTransaction::new(\n        RctType::ClsagBulletproofPlus,\n        outgoing_view_key,\n        vec![input],\n        vec![(\n          MoneroAddress::new(\n            Network::Mainnet,\n            AddressType::Featured { guaranteed: true, subaddress: false, payment_id: None },\n            decompress_point(monero_key_pair.1.to_vec().try_into().unwrap()).unwrap(),\n            ED25519_BASEPOINT_POINT *\n              processor::additional_key::<processor::networks::monero::Monero>(0).0,\n          ),\n          1_100_000_000_000,\n        )],\n        Change::new(view_pair.clone(), None),\n        vec![Shorthand::transfer(None, serai_addr).encode()],\n        rpc.get_fee_rate(FeePriority::Unimportant).await.unwrap(),\n      )\n      .unwrap()\n      .sign(&mut OsRng, &Zeroizing::new(Scalar::ONE))\n      .unwrap();\n\n      rpc.publish_transaction(&tx).await.unwrap()\n    }\n\n    // Wait for Batch publication\n    // TODO: Merge this block with the above one\n    // (take in a lambda for the specific checks to execute?)\n    {\n      let wait_for_batch = {\n        let serai = &serai;\n        move |additional, network| async move {\n          let halt_at = if additional { 5 * 10 } else { 10 * 10 };\n          let print_at = halt_at / 2;\n          for i in 0 .. halt_at {\n            if serai\n              .as_of_latest_finalized_block()\n              .await\n              .unwrap()\n              .in_instructions()\n              .last_batch_for_network(network)\n              .await\n              .unwrap()\n              .is_some()\n            {\n              return;\n            }\n\n            if i == print_at {\n              println!(\n                \"waiting for {}batch to complete, it has been {} minutes\",\n                if additional { \"another \" } else { \"\" },\n                print_at / 10,\n              );\n            }\n            tokio::time::sleep(Duration::from_secs(6)).await;\n          }\n\n          panic!(\n            \"{}batch did not complete within {} minutes\",\n            if additional { \"another \" } else { \"\" },\n            halt_at / 10,\n          );\n        }\n      };\n      wait_for_batch(false, ExternalNetworkId::Bitcoin).await;\n      wait_for_batch(true, ExternalNetworkId::Monero).await;\n    }\n\n    // TODO: Verify the mints\n\n    // Create a random Bitcoin/Monero address\n    let bitcoin_addr = {\n      use bitcoin_serai::bitcoin::{key::PublicKey, ScriptBuf};\n      ScriptBuf::new_p2pkh(\n        &(loop {\n          let mut bytes = [0; 33];\n          OsRng.fill_bytes(&mut bytes);\n          bytes[0] %= 4;\n          if let Ok(key) = PublicKey::from_slice(&bytes) {\n            break key;\n          }\n        })\n        .pubkey_hash(),\n      )\n    };\n\n    let (monero_spend, monero_view, monero_addr) = {\n      use curve25519_dalek::{constants::ED25519_BASEPOINT_TABLE, scalar::Scalar};\n      let spend = ED25519_BASEPOINT_TABLE * &Scalar::random(&mut OsRng);\n      let view = Scalar::random(&mut OsRng);\n\n      use monero_wallet::address::{Network, AddressType, MoneroAddress};\n      let addr = MoneroAddress::new(\n        Network::Mainnet,\n        AddressType::Legacy,\n        spend,\n        ED25519_BASEPOINT_TABLE * &view,\n      );\n\n      (spend, view, addr)\n    };\n\n    // Get the current blocks\n    let mut start_bitcoin_block =\n      handles[0].bitcoin(&ops).await.get_latest_block_number().await.unwrap();\n    let mut start_monero_block = {\n      use monero_wallet::rpc::Rpc;\n      handles[0].monero(&ops).await.get_height().await.unwrap()\n    };\n\n    // Burn the sriBTC/sriXMR\n    {\n      let burn = {\n        let serai = &serai;\n        let serai_pair = &serai_pair;\n        move |nonce, coin, amount, address| async move {\n          let out_instruction = OutInstructionWithBalance {\n            balance: ExternalBalance { coin, amount: Amount(amount) },\n            instruction: OutInstruction { address, data: None },\n          };\n\n          serai\n            .publish(&serai.sign(\n              serai_pair,\n              SeraiCoins::burn_with_instruction(out_instruction),\n              nonce,\n              Default::default(),\n            ))\n            .await\n            .unwrap();\n        }\n      };\n\n      #[allow(clippy::inconsistent_digit_grouping)]\n      burn(\n        0,\n        ExternalCoin::Bitcoin,\n        1_000_000_00,\n        ExternalAddress::new(\n          serai_client::networks::bitcoin::Address::new(bitcoin_addr.clone()).unwrap().into(),\n        )\n        .unwrap(),\n      )\n      .await;\n\n      burn(\n        1,\n        ExternalCoin::Monero,\n        1_000_000_000_000,\n        ExternalAddress::new(\n          serai_client::networks::monero::Address::new(monero_addr).unwrap().into(),\n        )\n        .unwrap(),\n      )\n      .await;\n    }\n\n    // TODO: Verify the burns\n\n    // Verify the received Bitcoin TX\n    #[allow(clippy::inconsistent_digit_grouping)]\n    {\n      let rpc = handles[0].bitcoin(&ops).await;\n\n      // Check for up to 15 minutes\n      let mut found = false;\n      let mut i = 0;\n      while i < (15 * 6) {\n        if let Ok(hash) = rpc.get_block_hash(start_bitcoin_block).await {\n          let block = rpc.get_block(&hash).await.unwrap();\n          start_bitcoin_block += 1;\n\n          if block.txdata.len() > 1 {\n            assert_eq!(block.txdata.len(), 2);\n            assert_eq!(block.txdata[1].output.len(), 2);\n\n            let received_output = block.txdata[1]\n              .output\n              .iter()\n              .find(|output| output.script_pubkey == bitcoin_addr)\n              .unwrap();\n\n            let tx_fee = 1_100_000_00 -\n              block.txdata[1].output.iter().map(|output| output.value.to_sat()).sum::<u64>();\n\n            assert_eq!(received_output.value.to_sat(), 1_000_000_00 - tx_fee);\n            found = true;\n          }\n        } else {\n          i += 1;\n          tokio::time::sleep(Duration::from_secs(10)).await;\n        }\n      }\n      if !found {\n        panic!(\"couldn't find the expected Bitcoin transaction within 15 minutes\");\n      }\n    }\n\n    // Verify the received Monero TX\n    {\n      use monero_wallet::{transaction::Transaction, rpc::Rpc, ViewPair, Scanner};\n      let rpc = handles[0].monero(&ops).await;\n      let mut scanner =\n        Scanner::new(ViewPair::new(monero_spend, Zeroizing::new(monero_view)).unwrap());\n\n      // Check for up to 5 minutes\n      let mut found = false;\n      let mut i = 0;\n      while i < (5 * 6) {\n        if let Ok(block) = rpc.get_block_by_number(start_monero_block).await {\n          start_monero_block += 1;\n          let outputs = scanner\n            .scan(rpc.get_scannable_block(block.clone()).await.unwrap())\n            .unwrap()\n            .not_additionally_locked();\n          if !outputs.is_empty() {\n            assert_eq!(outputs.len(), 1);\n\n            assert_eq!(block.transactions.len(), 1);\n            let tx = rpc.get_transaction(block.transactions[0]).await.unwrap();\n            let tx_fee = match &tx {\n              Transaction::V2 { proofs: Some(proofs), .. } => proofs.base.fee,\n              _ => panic!(\"fetched TX wasn't a signed V2 TX\"),\n            };\n\n            assert_eq!(outputs[0].commitment().amount, 1_000_000_000_000 - tx_fee);\n            found = true;\n          }\n        } else {\n          i += 1;\n          tokio::time::sleep(Duration::from_secs(10)).await;\n        }\n      }\n      if !found {\n        panic!(\"couldn't find the expected Monero transaction within 5 minutes\");\n      }\n    }\n\n    *keep_mining.lock().unwrap() = false;\n    mining_task.await.unwrap();\n  })\n  .await;\n}\n"
  },
  {
    "path": "tests/full-stack/src/tests/mod.rs",
    "content": "use core::future::Future;\nuse std::{sync::OnceLock, collections::HashMap};\n\nuse tokio::sync::Mutex;\n\nuse serai_client::primitives::ExternalNetworkId;\n\nuse dockertest::{\n  LogAction, LogPolicy, LogSource, LogOptions, StartPolicy, TestBodySpecification,\n  DockerOperations, DockerTest,\n};\n\nuse serai_docker_tests::fresh_logs_folder;\nuse serai_processor_tests::{network_instance, processor_instance};\nuse serai_message_queue_tests::instance as message_queue_instance;\nuse serai_coordinator_tests::{coordinator_instance, serai_composition};\n\nuse crate::*;\n\nmod mint_and_burn;\n\npub(crate) const VALIDATORS: usize = 4;\n// pub(crate) const THRESHOLD: usize = ((VALIDATORS * 2) / 3) + 1;\n\nstatic UNIQUE_ID: OnceLock<Mutex<u16>> = OnceLock::new();\n\n#[async_trait::async_trait]\npub(crate) trait TestBody: 'static + Send + Sync {\n  async fn body(&self, ops: DockerOperations, handles: Vec<Handles>);\n}\n#[async_trait::async_trait]\nimpl<F: Send + Future, TB: 'static + Send + Sync + Fn(DockerOperations, Vec<Handles>) -> F> TestBody\n  for TB\n{\n  async fn body(&self, ops: DockerOperations, handles: Vec<Handles>) {\n    (self)(ops, handles).await;\n  }\n}\n\npub(crate) async fn new_test(test_body: impl TestBody) {\n  let mut unique_id_lock = UNIQUE_ID.get_or_init(|| Mutex::new(0)).lock().await;\n\n  let mut all_handles = vec![];\n  let mut test = DockerTest::new().with_network(dockertest::Network::Isolated);\n  let mut coordinator_compositions = vec![];\n  for i in 0 .. VALIDATORS {\n    let name = match i {\n      0 => \"Alice\",\n      1 => \"Bob\",\n      2 => \"Charlie\",\n      3 => \"Dave\",\n      4 => \"Eve\",\n      5 => \"Ferdie\",\n      _ => panic!(\"needed a 7th name for a serai node\"),\n    };\n\n    let (coord_key, message_queue_keys, message_queue_composition) = message_queue_instance();\n\n    let (bitcoin_composition, bitcoin_port) = network_instance(ExternalNetworkId::Bitcoin);\n    let mut bitcoin_processor_composition = processor_instance(\n      ExternalNetworkId::Bitcoin,\n      bitcoin_port,\n      message_queue_keys[&ExternalNetworkId::Bitcoin],\n    );\n    assert_eq!(bitcoin_processor_composition.len(), 1);\n    let bitcoin_processor_composition = bitcoin_processor_composition.swap_remove(0);\n\n    let (monero_composition, monero_port) = network_instance(ExternalNetworkId::Monero);\n    let mut monero_processor_composition = processor_instance(\n      ExternalNetworkId::Monero,\n      monero_port,\n      message_queue_keys[&ExternalNetworkId::Monero],\n    );\n    assert_eq!(monero_processor_composition.len(), 1);\n    let monero_processor_composition = monero_processor_composition.swap_remove(0);\n\n    let coordinator_composition = coordinator_instance(name, coord_key);\n    let serai_composition = serai_composition(name, false);\n\n    // Give every item in this stack a unique ID\n    // Uses a Mutex as we can't generate a 8-byte random ID without hitting hostname length limits\n    let (first, unique_id) = {\n      let first = *unique_id_lock == 0;\n      let unique_id = *unique_id_lock;\n      *unique_id_lock += 1;\n      (first, unique_id)\n    };\n\n    let logs_path = fresh_logs_folder(first, \"full-stack\");\n\n    let mut compositions = HashMap::new();\n    let mut handles = HashMap::new();\n    for (name, composition) in [\n      (\"message_queue\", message_queue_composition),\n      (\"bitcoin\", bitcoin_composition),\n      (\"bitcoin_processor\", bitcoin_processor_composition),\n      (\"monero\", monero_composition),\n      (\"monero_processor\", monero_processor_composition),\n      (\"coordinator\", coordinator_composition),\n      (\"serai\", serai_composition),\n    ] {\n      let handle = format!(\"full_stack-{name}-{unique_id}\");\n      compositions.insert(\n        name,\n        composition\n          .set_start_policy(StartPolicy::Strict)\n          .set_handle(handle.clone())\n          .set_log_options(Some(LogOptions {\n            action: if std::env::var(\"GITHUB_CI\") == Ok(\"true\".to_string()) {\n              LogAction::Forward\n            } else {\n              LogAction::ForwardToFile { path: logs_path.clone() }\n            },\n            policy: LogPolicy::Always,\n            source: LogSource::Both,\n          })),\n      );\n      handles.insert(name, handle);\n    }\n\n    let handles = Handles {\n      message_queue: handles.remove(\"message_queue\").unwrap(),\n      bitcoin: (handles.remove(\"bitcoin\").unwrap(), bitcoin_port),\n      bitcoin_processor: handles.remove(\"bitcoin_processor\").unwrap(),\n      monero: (handles.remove(\"monero\").unwrap(), monero_port),\n      monero_processor: handles.remove(\"monero_processor\").unwrap(),\n      serai: handles.remove(\"serai\").unwrap(),\n    };\n\n    {\n      let bitcoin_processor_composition = compositions.get_mut(\"bitcoin_processor\").unwrap();\n      bitcoin_processor_composition\n        .inject_container_name(handles.message_queue.clone(), \"MESSAGE_QUEUE_RPC\");\n      bitcoin_processor_composition\n        .inject_container_name(handles.bitcoin.0.clone(), \"NETWORK_RPC_HOSTNAME\");\n    }\n\n    {\n      let monero_processor_composition = compositions.get_mut(\"monero_processor\").unwrap();\n      monero_processor_composition\n        .inject_container_name(handles.message_queue.clone(), \"MESSAGE_QUEUE_RPC\");\n      monero_processor_composition\n        .inject_container_name(handles.monero.0.clone(), \"NETWORK_RPC_HOSTNAME\");\n    }\n\n    coordinator_compositions.push(compositions.remove(\"coordinator\").unwrap());\n\n    all_handles.push(handles);\n    for (_, composition) in compositions {\n      test.provide_container(composition);\n    }\n  }\n\n  struct Context {\n    pending_coordinator_compositions: Mutex<Vec<TestBodySpecification>>,\n    handles: Vec<Handles>,\n    test_body: Box<dyn TestBody>,\n  }\n  static CONTEXT: OnceLock<Mutex<Option<Context>>> = OnceLock::new();\n  *CONTEXT.get_or_init(|| Mutex::new(None)).lock().await = Some(Context {\n    pending_coordinator_compositions: Mutex::new(coordinator_compositions),\n    handles: all_handles,\n    test_body: Box::new(test_body),\n  });\n\n  // The DockerOperations from the first invocation, containing the Message Queue servers and the\n  // Serai nodes.\n  static OUTER_OPS: OnceLock<Mutex<Option<DockerOperations>>> = OnceLock::new();\n\n  // Reset OUTER_OPS\n  *OUTER_OPS.get_or_init(|| Mutex::new(None)).lock().await = None;\n\n  // Spawns a coordinator, if one has yet to be spawned, or else runs the test.\n  pub(crate) fn spawn_coordinator_or_run_test(\n    inner_ops: DockerOperations,\n  ) -> core::pin::Pin<Box<impl Send + Future<Output = ()>>> {\n    Box::pin(async {\n      // If the outer operations have yet to be set, these *are* the outer operations\n      let outer_ops = OUTER_OPS.get().unwrap();\n      if outer_ops.lock().await.is_none() {\n        *outer_ops.lock().await = Some(inner_ops);\n      }\n\n      let context_lock = CONTEXT.get().unwrap().lock().await;\n      let Context { pending_coordinator_compositions, handles, test_body } =\n        context_lock.as_ref().unwrap();\n\n      // Check if there is a coordinator left\n      let maybe_coordinator = {\n        let mut remaining = pending_coordinator_compositions.lock().await;\n        let maybe_coordinator = if !remaining.is_empty() {\n          let handles = handles[handles.len() - remaining.len()].clone();\n          let composition = remaining.remove(0);\n          Some((composition, handles))\n        } else {\n          None\n        };\n        drop(remaining);\n        maybe_coordinator\n      };\n\n      if let Some((mut composition, handles)) = maybe_coordinator {\n        let network = {\n          let outer_ops = outer_ops.lock().await;\n          let outer_ops = outer_ops.as_ref().unwrap();\n          // Spawn it by building another DockerTest which recursively calls this function\n          // TODO: Spawn this outside of DockerTest so we can remove the recursion\n          let serai_container = outer_ops.handle(&handles.serai);\n          composition.modify_env(\"SERAI_HOSTNAME\", serai_container.ip());\n          let message_queue_container = outer_ops.handle(&handles.message_queue);\n          composition.modify_env(\"MESSAGE_QUEUE_RPC\", message_queue_container.ip());\n\n          format!(\"container:{}\", serai_container.name())\n        };\n        let mut test = DockerTest::new().with_network(dockertest::Network::External(network));\n        test.provide_container(composition);\n\n        drop(context_lock);\n        test.run_async(spawn_coordinator_or_run_test).await;\n      } else {\n        let outer_ops = outer_ops.lock().await.take().unwrap();\n        test_body.body(outer_ops, handles.clone()).await;\n      }\n    })\n  }\n\n  test.run_async(spawn_coordinator_or_run_test).await;\n}\n"
  },
  {
    "path": "tests/message-queue/Cargo.toml",
    "content": "[package]\nname = \"serai-message-queue-tests\"\nversion = \"0.1.0\"\ndescription = \"Tests for Serai's Message Queue\"\nlicense = \"AGPL-3.0-only\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/tests/message-queue\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nkeywords = []\nedition = \"2021\"\npublish = false\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nhex = \"0.4\"\n\nzeroize = { version = \"1\", default-features = false }\nrand_core = { version = \"0.6\", default-features = false, features = [\"getrandom\"] }\n\ndalek-ff-group = { path = \"../../crypto/dalek-ff-group\", default-features = false }\nciphersuite = { path = \"../../crypto/ciphersuite\", default-features = false }\n\nserai-primitives = { path = \"../../substrate/primitives\" }\nserai-message-queue = { path = \"../../message-queue\" }\n\ntokio = { version = \"1\", features = [\"time\"] }\ndockertest = \"0.5\"\nserai-docker-tests = { path = \"../docker\" }\n"
  },
  {
    "path": "tests/message-queue/LICENSE",
    "content": "AGPL-3.0-only license\n\nCopyright (c) 2023 Luke Parker\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License Version 3 as\npublished by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n"
  },
  {
    "path": "tests/message-queue/src/lib.rs",
    "content": "use std::collections::HashMap;\n\nuse rand_core::OsRng;\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::{\n  group::{ff::Field, GroupEncoding},\n  Ciphersuite,\n};\n\nuse serai_primitives::{ExternalNetworkId, EXTERNAL_NETWORKS};\n\nuse dockertest::{\n  PullPolicy, Image, LogAction, LogPolicy, LogSource, LogOptions, TestBodySpecification,\n};\n\npub type MessageQueuePrivateKey = <Ristretto as Ciphersuite>::F;\npub fn instance() -> (\n  MessageQueuePrivateKey,\n  HashMap<ExternalNetworkId, MessageQueuePrivateKey>,\n  TestBodySpecification,\n) {\n  serai_docker_tests::build(\"message-queue\".to_string());\n\n  let coord_key = <Ristretto as Ciphersuite>::F::random(&mut OsRng);\n  let priv_keys = EXTERNAL_NETWORKS\n    .into_iter()\n    .map(|n| (n, <Ristretto as Ciphersuite>::F::random(&mut OsRng)))\n    .collect::<HashMap<_, _>>();\n\n  let composition = TestBodySpecification::with_image(\n    Image::with_repository(\"serai-dev-message-queue\").pull_policy(PullPolicy::Never),\n  )\n  .set_log_options(Some(LogOptions {\n    action: LogAction::Forward,\n    policy: LogPolicy::Always,\n    source: LogSource::Both,\n  }))\n  .replace_env(\n    [\n      (\"COORDINATOR_KEY\".to_string(), hex::encode((Ristretto::generator() * coord_key).to_bytes())),\n      (\n        \"BITCOIN_KEY\".to_string(),\n        hex::encode((Ristretto::generator() * priv_keys[&ExternalNetworkId::Bitcoin]).to_bytes()),\n      ),\n      (\n        \"ETHEREUM_KEY\".to_string(),\n        hex::encode((Ristretto::generator() * priv_keys[&ExternalNetworkId::Ethereum]).to_bytes()),\n      ),\n      (\n        \"MONERO_KEY\".to_string(),\n        hex::encode((Ristretto::generator() * priv_keys[&ExternalNetworkId::Monero]).to_bytes()),\n      ),\n      (\"DB_PATH\".to_string(), \"./message-queue-db\".to_string()),\n      (\"RUST_LOG\".to_string(), \"serai_message_queue=trace,\".to_string()),\n    ]\n    .into(),\n  )\n  .set_publish_all_ports(true);\n\n  (coord_key, priv_keys, composition)\n}\n\n#[test]\nfn basic_functionality() {\n  use zeroize::Zeroizing;\n\n  use dockertest::DockerTest;\n\n  use serai_message_queue::{Service, Metadata, client::MessageQueue};\n\n  let mut test = DockerTest::new().with_network(dockertest::Network::Isolated);\n  let (coord_key, priv_keys, composition) = instance();\n  test.provide_container(composition);\n  test.run(|ops| async move {\n    tokio::time::timeout(core::time::Duration::from_secs(60), async move {\n      // Sleep for a second for the message-queue to boot\n      // It isn't an error to start immediately, it just silences an error\n      tokio::time::sleep(core::time::Duration::from_secs(1)).await;\n\n      let rpc = ops.handle(\"serai-dev-message-queue\").host_port(2287).unwrap();\n      let rpc = rpc.0.to_string() + \":\" + &rpc.1.to_string();\n\n      // Queue some messages\n      let coordinator =\n        MessageQueue::new(Service::Coordinator, rpc.clone(), Zeroizing::new(coord_key));\n      coordinator\n        .queue(\n          Metadata {\n            from: Service::Coordinator,\n            to: Service::Processor(ExternalNetworkId::Bitcoin),\n            intent: b\"intent\".to_vec(),\n          },\n          b\"Hello, World!\".to_vec(),\n        )\n        .await;\n\n      // Queue this twice, which message-queue should de-duplicate\n      for _ in 0 .. 2 {\n        coordinator\n          .queue(\n            Metadata {\n              from: Service::Coordinator,\n              to: Service::Processor(ExternalNetworkId::Bitcoin),\n              intent: b\"intent 2\".to_vec(),\n            },\n            b\"Hello, World, again!\".to_vec(),\n          )\n          .await;\n      }\n\n      // Successfully get it\n      let bitcoin = MessageQueue::new(\n        Service::Processor(ExternalNetworkId::Bitcoin),\n        rpc.clone(),\n        Zeroizing::new(priv_keys[&ExternalNetworkId::Bitcoin]),\n      );\n      let msg = bitcoin.next(Service::Coordinator).await;\n      assert_eq!(msg.from, Service::Coordinator);\n      assert_eq!(msg.id, 0);\n      assert_eq!(&msg.msg, b\"Hello, World!\");\n\n      // If we don't ack it, it should continue to be returned\n      assert_eq!(msg, bitcoin.next(Service::Coordinator).await);\n\n      // Acknowledging it should yield the next message\n      bitcoin.ack(Service::Coordinator, 0).await;\n\n      let next_msg = bitcoin.next(Service::Coordinator).await;\n      assert!(msg != next_msg);\n      assert_eq!(next_msg.from, Service::Coordinator);\n      assert_eq!(next_msg.id, 1);\n      assert_eq!(&next_msg.msg, b\"Hello, World, again!\");\n      bitcoin.ack(Service::Coordinator, 1).await;\n\n      // No further messages should be available\n      tokio::time::timeout(core::time::Duration::from_secs(10), bitcoin.next(Service::Coordinator))\n        .await\n        .unwrap_err();\n\n      // Queueing to a distinct processor should work, with a unique ID\n      coordinator\n        .queue(\n          Metadata {\n            from: Service::Coordinator,\n            to: Service::Processor(ExternalNetworkId::Monero),\n            // Intents should be per-from-to, making this valid\n            intent: b\"intent\".to_vec(),\n          },\n          b\"Hello, World!\".to_vec(),\n        )\n        .await;\n\n      let monero = MessageQueue::new(\n        Service::Processor(ExternalNetworkId::Monero),\n        rpc,\n        Zeroizing::new(priv_keys[&ExternalNetworkId::Monero]),\n      );\n      assert_eq!(monero.next(Service::Coordinator).await.id, 0);\n      monero.ack(Service::Coordinator, 0).await;\n      tokio::time::timeout(core::time::Duration::from_secs(10), monero.next(Service::Coordinator))\n        .await\n        .unwrap_err();\n    })\n    .await\n    .unwrap();\n  });\n}\n"
  },
  {
    "path": "tests/no-std/Cargo.toml",
    "content": "[package]\nname = \"serai-no-std-tests\"\nversion = \"0.1.0\"\ndescription = \"A crate to test no-std builds of Serai crates work\"\nlicense = \"MIT\"\nrepository = \"https://github.com/kayabaNerve/serai/tree/develop/tests/no-std\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nkeywords = [\"nostd\", \"no_std\", \"alloc\"]\nedition = \"2021\"\npublish = false\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nflexible-transcript = { path = \"../../crypto/transcript\", default-features = false, features = [\"recommended\", \"merlin\"] }\n\ndalek-ff-group = { path = \"../../crypto/dalek-ff-group\", default-features = false, features = [\"alloc\"] }\nminimal-ed448 = { path = \"../../crypto/ed448\", default-features = false, features = [\"alloc\"] }\n\nciphersuite = { path = \"../../crypto/ciphersuite\", default-features = false, features = [\"alloc\"] }\nciphersuite-kp256 = { path = \"../../crypto/ciphersuite/kp256\", default-features = false, features = [\"alloc\"] }\n\nmultiexp = { path = \"../../crypto/multiexp\", default-features = false, features = [\"batch\"] }\n\ndleq = { path = \"../../crypto/dleq\", default-features = false }\nschnorr-signatures = { path = \"../../crypto/schnorr\", default-features = false }\n\ndkg = { path = \"../../crypto/dkg\", default-features = false }\ndkg-recovery = { path = \"../../crypto/dkg/recovery\", default-features = false }\ndkg-dealer = { path = \"../../crypto/dkg/dealer\", default-features = false }\ndkg-musig = { path = \"../../crypto/dkg/musig\", default-features = false }\n# modular-frost = { path = \"../../crypto/frost\", default-features = false }\n# frost-schnorrkel = { path = \"../../crypto/schnorrkel\", default-features = false }\n\nbitcoin-serai = { path = \"../../networks/bitcoin\", default-features = false, features = [\"hazmat\"] }\n"
  },
  {
    "path": "tests/no-std/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2023 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "tests/no-std/README.md",
    "content": "# no-std tests\n\nA crate usable to test building various Serai crates in a no-std environment.\n"
  },
  {
    "path": "tests/no-std/src/lib.rs",
    "content": "#![no_std]\n\npub use flexible_transcript;\n\npub use dalek_ff_group;\npub use minimal_ed448;\n\npub use ciphersuite;\npub use ciphersuite_kp256;\n\npub use multiexp;\n\npub use dleq;\npub use schnorr_signatures;\n\npub use dkg;\npub use dkg_recovery;\npub use dkg_dealer;\npub use dkg_musig;\n/*\npub use modular_frost;\npub use frost_schnorrkel;\n*/\n\npub use bitcoin_serai;\n"
  },
  {
    "path": "tests/processor/Cargo.toml",
    "content": "[package]\nname = \"serai-processor-tests\"\nversion = \"0.1.0\"\ndescription = \"Tests for Serai's Processor\"\nlicense = \"AGPL-3.0-only\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/tests/processor\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nkeywords = []\nedition = \"2021\"\npublish = false\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nhex = \"0.4\"\n\nzeroize = { version = \"1\", default-features = false }\nrand_core = { version = \"0.6\", default-features = false, features = [\"getrandom\"] }\n\ncurve25519-dalek = \"4\"\ndalek-ff-group = { path = \"../../crypto/dalek-ff-group\", default-features = false }\nciphersuite = { path = \"../../crypto/ciphersuite\", default-features = false }\nciphersuite-kp256 = { path = \"../../crypto/ciphersuite/kp256\", default-features = false }\ndkg = { path = \"../../crypto/dkg\", default-features = false }\n\nbitcoin-serai = { path = \"../../networks/bitcoin\" }\n\nk256 = \"0.13\"\nethereum-serai = { path = \"../../networks/ethereum\" }\n\nmonero-simple-request-rpc = { git = \"https://github.com/monero-oxide/monero-oxide\", rev = \"32e6b5fe5ba9e1ea3e68da882550005122a11d22\" }\nmonero-wallet = { git = \"https://github.com/monero-oxide/monero-oxide\", rev = \"32e6b5fe5ba9e1ea3e68da882550005122a11d22\" }\n\nmessages = { package = \"serai-processor-messages\", path = \"../../processor/messages\" }\n\nscale = { package = \"parity-scale-codec\", version = \"3\" }\nserai-client = { path = \"../../substrate/client\" }\nserai-db = { path = \"../../common/db\", default-features = false }\nserai-message-queue = { path = \"../../message-queue\" }\n\nborsh = { version = \"1\", features = [\"de_strict_order\"] }\nserde_json = { version = \"1\", default-features = false }\n\ntokio = { version = \"1\", features = [\"time\"] }\n\nprocessor = { package = \"serai-processor\", path = \"../../processor\", features = [\"bitcoin\", \"ethereum\", \"monero\"] }\n\ndockertest = \"0.5\"\nserai-docker-tests = { path = \"../docker\" }\nserai-message-queue-tests = { path = \"../message-queue\" }\n"
  },
  {
    "path": "tests/processor/LICENSE",
    "content": "AGPL-3.0-only license\n\nCopyright (c) 2023 Luke Parker\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License Version 3 as\npublished by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n"
  },
  {
    "path": "tests/processor/src/lib.rs",
    "content": "#![allow(clippy::needless_pass_by_ref_mut)] // False positives\n\nuse std::sync::{OnceLock, Mutex};\n\nuse zeroize::Zeroizing;\nuse rand_core::{RngCore, OsRng};\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::{group::ff::PrimeField, Ciphersuite};\n\nuse serai_client::primitives::ExternalNetworkId;\nuse messages::{ProcessorMessage, CoordinatorMessage};\nuse serai_message_queue::{Service, Metadata, client::MessageQueue};\n\nuse dockertest::{\n  PullPolicy, Image, LogAction, LogPolicy, LogSource, LogOptions, StartPolicy,\n  TestBodySpecification, DockerOperations,\n};\n\nmod networks;\npub use networks::*;\n\n#[cfg(test)]\nmod tests;\n\nstatic UNIQUE_ID: OnceLock<Mutex<u16>> = OnceLock::new();\n\npub fn processor_instance(\n  network: ExternalNetworkId,\n  port: u32,\n  message_queue_key: <Ristretto as Ciphersuite>::F,\n) -> Vec<TestBodySpecification> {\n  let mut entropy = [0; 32];\n  OsRng.fill_bytes(&mut entropy);\n\n  let network_str = match network {\n    ExternalNetworkId::Bitcoin => \"bitcoin\",\n    ExternalNetworkId::Ethereum => \"ethereum\",\n    ExternalNetworkId::Monero => \"monero\",\n  };\n  let image = format!(\"{network_str}-processor\");\n  serai_docker_tests::build(image.clone());\n\n  let mut res = vec![TestBodySpecification::with_image(\n    Image::with_repository(format!(\"serai-dev-{image}\")).pull_policy(PullPolicy::Never),\n  )\n  .replace_env(\n    [\n      (\"MESSAGE_QUEUE_KEY\".to_string(), hex::encode(message_queue_key.to_repr())),\n      (\"ENTROPY\".to_string(), hex::encode(entropy)),\n      (\"NETWORK\".to_string(), network_str.to_string()),\n      (\"NETWORK_RPC_LOGIN\".to_string(), format!(\"{RPC_USER}:{RPC_PASS}\")),\n      (\"NETWORK_RPC_PORT\".to_string(), port.to_string()),\n      (\"DB_PATH\".to_string(), \"./processor-db\".to_string()),\n      (\"RUST_LOG\".to_string(), \"serai_processor=trace,\".to_string()),\n    ]\n    .into(),\n  )];\n\n  if network == ExternalNetworkId::Ethereum {\n    serai_docker_tests::build(\"ethereum-relayer\".to_string());\n    res.push(\n      TestBodySpecification::with_image(\n        Image::with_repository(\"serai-dev-ethereum-relayer\".to_string())\n          .pull_policy(PullPolicy::Never),\n      )\n      .replace_env(\n        [\n          (\"DB_PATH\".to_string(), \"./ethereum-relayer-db\".to_string()),\n          (\"RUST_LOG\".to_string(), \"serai_ethereum_relayer=trace,\".to_string()),\n        ]\n        .into(),\n      )\n      .set_publish_all_ports(true),\n    );\n  }\n\n  res\n}\n\npub type Handles = (String, String, String, String);\npub fn processor_stack(\n  network: ExternalNetworkId,\n  network_hostname_override: Option<String>,\n) -> (Handles, <Ristretto as Ciphersuite>::F, Vec<TestBodySpecification>) {\n  let (network_composition, network_rpc_port) = network_instance(network);\n\n  let (coord_key, message_queue_keys, message_queue_composition) =\n    serai_message_queue_tests::instance();\n\n  let mut processor_compositions =\n    processor_instance(network, network_rpc_port, message_queue_keys[&network]);\n\n  // Give every item in this stack a unique ID\n  // Uses a Mutex as we can't generate a 8-byte random ID without hitting hostname length limits\n  let unique_id = {\n    let unique_id_mutex = UNIQUE_ID.get_or_init(|| Mutex::new(0));\n    let mut unique_id_lock = unique_id_mutex.lock().unwrap();\n    let unique_id = *unique_id_lock;\n    *unique_id_lock += 1;\n    unique_id\n  };\n\n  let mut compositions = vec![];\n  let mut handles = vec![];\n  for (name, composition) in [\n    Some((\n      match network {\n        ExternalNetworkId::Bitcoin => \"bitcoin\",\n        ExternalNetworkId::Ethereum => \"ethereum\",\n        ExternalNetworkId::Monero => \"monero\",\n      },\n      network_composition,\n    )),\n    Some((\"message_queue\", message_queue_composition)),\n    Some((\"processor\", processor_compositions.remove(0))),\n    processor_compositions.pop().map(|composition| (\"relayer\", composition)),\n  ]\n  .into_iter()\n  .flatten()\n  {\n    let handle = format!(\"processor-{name}-{unique_id}\");\n    compositions.push(\n      composition.set_start_policy(StartPolicy::Strict).set_handle(handle.clone()).set_log_options(\n        Some(LogOptions {\n          action: LogAction::Forward,\n          policy: if handle.contains(\"-processor-\") {\n            LogPolicy::Always\n          } else {\n            LogPolicy::OnError\n          },\n          source: LogSource::Both,\n        }),\n      ),\n    );\n    handles.push(handle);\n  }\n\n  let processor_composition = compositions.get_mut(2).unwrap();\n  processor_composition.inject_container_name(\n    network_hostname_override.unwrap_or_else(|| handles[0].clone()),\n    \"NETWORK_RPC_HOSTNAME\",\n  );\n  if let Some(hostname) = handles.get(3) {\n    processor_composition.inject_container_name(hostname, \"ETHEREUM_RELAYER_HOSTNAME\");\n    processor_composition.modify_env(\"ETHEREUM_RELAYER_PORT\", \"20830\");\n  }\n  processor_composition.inject_container_name(handles[1].clone(), \"MESSAGE_QUEUE_RPC\");\n\n  (\n    (\n      handles[0].clone(),\n      handles[1].clone(),\n      handles[2].clone(),\n      handles.get(3).cloned().unwrap_or(String::new()),\n    ),\n    coord_key,\n    compositions,\n  )\n}\n\npub struct Coordinator {\n  network: ExternalNetworkId,\n\n  network_handle: String,\n  #[allow(unused)]\n  message_queue_handle: String,\n  #[allow(unused)]\n  processor_handle: String,\n  relayer_handle: String,\n\n  next_send_id: u64,\n  next_recv_id: u64,\n  queue: MessageQueue,\n}\n\nimpl Coordinator {\n  pub fn new(\n    network: ExternalNetworkId,\n    ops: &DockerOperations,\n    handles: Handles,\n    coord_key: <Ristretto as Ciphersuite>::F,\n  ) -> Coordinator {\n    let rpc = ops.handle(&handles.1).host_port(2287).unwrap();\n    let rpc = rpc.0.to_string() + \":\" + &rpc.1.to_string();\n\n    let res = Coordinator {\n      network,\n\n      network_handle: handles.0,\n      message_queue_handle: handles.1,\n      processor_handle: handles.2,\n      relayer_handle: handles.3,\n\n      next_send_id: 0,\n      next_recv_id: 0,\n      queue: MessageQueue::new(Service::Coordinator, rpc, Zeroizing::new(coord_key)),\n    };\n\n    // Sleep for up to a minute in case the external network's RPC has yet to start\n\n    // Gets an async handle to block on since this function plays nicer when it isn't itself async\n    {\n      let ops = ops.clone();\n      let network_handle = res.network_handle.clone();\n      std::thread::spawn(move || {\n        let runtime = tokio::runtime::Runtime::new().unwrap();\n        let handle = runtime.handle();\n        let _async = handle.enter();\n\n        let rpc_url = network_rpc(network, &ops, &network_handle);\n        let mut iters = 0;\n        while iters < 60 {\n          match network {\n            ExternalNetworkId::Bitcoin => {\n              use bitcoin_serai::rpc::Rpc;\n\n              // Bitcoin's Rpc::new will test the connection\n              if handle.block_on(Rpc::new(rpc_url.clone())).is_ok() {\n                break;\n              }\n            }\n            ExternalNetworkId::Ethereum => {\n              use std::sync::Arc;\n              use ethereum_serai::{\n                alloy::{\n                  simple_request_transport::SimpleRequest,\n                  rpc_client::ClientBuilder,\n                  provider::{Provider, RootProvider},\n                  network::Ethereum,\n                },\n                deployer::Deployer,\n              };\n\n              let provider = Arc::new(RootProvider::<_, Ethereum>::new(\n                ClientBuilder::default().transport(SimpleRequest::new(rpc_url.clone()), true),\n              ));\n\n              if handle\n                .block_on(provider.raw_request::<_, ()>(\"evm_setAutomine\".into(), [false]))\n                .is_ok()\n              {\n                handle.block_on(async {\n                  // Deploy the deployer\n                  let tx = Deployer::deployment_tx();\n                  let signer = tx.recover_signer().unwrap();\n                  let (tx, sig, _) = tx.into_parts();\n\n                  provider\n                    .raw_request::<_, ()>(\n                      \"anvil_setBalance\".into(),\n                      [signer.to_string(), (u128::from(tx.gas_limit) * tx.gas_price).to_string()],\n                    )\n                    .await\n                    .unwrap();\n\n                  let mut bytes = vec![];\n                  tx.encode_with_signature_fields(&sig, &mut bytes);\n                  let _ = provider.send_raw_transaction(&bytes).await.unwrap();\n\n                  provider.raw_request::<_, ()>(\"anvil_mine\".into(), [96]).await.unwrap();\n\n                  let _ = Deployer::new(provider.clone()).await.unwrap().unwrap();\n\n                  // Sleep until the actual time is ahead of whatever time is in the epoch we just\n                  // mined\n                  tokio::time::sleep(core::time::Duration::from_secs(30)).await;\n                });\n                break;\n              }\n            }\n            ExternalNetworkId::Monero => {\n              use monero_simple_request_rpc::SimpleRequestRpc;\n              use monero_wallet::rpc::Rpc;\n\n              // Monero's won't, so call get_height\n              if handle\n                .block_on(SimpleRequestRpc::new(rpc_url.clone()))\n                .ok()\n                .and_then(|rpc| handle.block_on(rpc.get_height()).ok())\n                .is_some()\n              {\n                break;\n              }\n            }\n          }\n\n          println!(\"external network RPC has yet to boot, waiting 1 sec, attempt {iters}\");\n          handle.block_on(tokio::time::sleep(core::time::Duration::from_secs(1)));\n          iters += 1;\n        }\n        if iters == 60 {\n          panic!(\"couldn't connect to external network {network:?} after 60s\");\n        }\n      })\n      .join()\n      .unwrap();\n    }\n\n    res\n  }\n\n  /// Send a message to a processor as its coordinator.\n  pub async fn send_message(&mut self, msg: impl Into<CoordinatorMessage>) {\n    let msg: CoordinatorMessage = msg.into();\n    self\n      .queue\n      .queue(\n        Metadata {\n          from: Service::Coordinator,\n          to: Service::Processor(self.network),\n          intent: msg.intent(),\n        },\n        borsh::to_vec(&msg).unwrap(),\n      )\n      .await;\n    self.next_send_id += 1;\n  }\n\n  /// Receive a message from a processor as its coordinator.\n  pub async fn recv_message(&mut self) -> ProcessorMessage {\n    let msg = tokio::time::timeout(\n      core::time::Duration::from_secs(20),\n      self.queue.next(Service::Processor(self.network)),\n    )\n    .await\n    .unwrap();\n    assert_eq!(msg.from, Service::Processor(self.network));\n    assert_eq!(msg.id, self.next_recv_id);\n    self.queue.ack(Service::Processor(self.network), msg.id).await;\n    self.next_recv_id += 1;\n    borsh::from_slice(&msg.msg).unwrap()\n  }\n\n  pub async fn add_block(&self, ops: &DockerOperations) -> ([u8; 32], Vec<u8>) {\n    let rpc_url = network_rpc(self.network, ops, &self.network_handle);\n    match self.network {\n      ExternalNetworkId::Bitcoin => {\n        use bitcoin_serai::{\n          bitcoin::{consensus::Encodable, network::Network, Script, Address},\n          rpc::Rpc,\n        };\n\n        // Mine a block\n        let rpc = Rpc::new(rpc_url).await.expect(\"couldn't connect to the Bitcoin RPC\");\n        rpc\n          .rpc_call::<Vec<String>>(\n            \"generatetoaddress\",\n            serde_json::json!([1, Address::p2sh(Script::new(), Network::Regtest).unwrap()]),\n          )\n          .await\n          .unwrap();\n\n        // Get it so we can return it\n        let hash = rpc.get_block_hash(rpc.get_latest_block_number().await.unwrap()).await.unwrap();\n        let block = rpc.get_block(&hash).await.unwrap();\n        let mut block_buf = vec![];\n        block.consensus_encode(&mut block_buf).unwrap();\n        (hash, block_buf)\n      }\n      ExternalNetworkId::Ethereum => {\n        use ethereum_serai::alloy::{\n          simple_request_transport::SimpleRequest,\n          rpc_types::{BlockTransactionsKind, BlockNumberOrTag},\n          rpc_client::ClientBuilder,\n          provider::{Provider, RootProvider},\n          network::Ethereum,\n        };\n\n        let provider = RootProvider::<_, Ethereum>::new(\n          ClientBuilder::default().transport(SimpleRequest::new(rpc_url.clone()), true),\n        );\n        let start = provider\n          .get_block(BlockNumberOrTag::Latest.into(), BlockTransactionsKind::Hashes)\n          .await\n          .unwrap()\n          .unwrap()\n          .header\n          .number;\n        // We mine 96 blocks to mine one epoch, then cause its finalization\n        provider.raw_request::<_, ()>(\"anvil_mine\".into(), [96]).await.unwrap();\n        let end_of_epoch = start + 31;\n        let hash = provider\n          .get_block(BlockNumberOrTag::Number(end_of_epoch).into(), BlockTransactionsKind::Hashes)\n          .await\n          .unwrap()\n          .unwrap()\n          .header\n          .hash;\n\n        let state = provider\n          .raw_request::<_, String>(\"anvil_dumpState\".into(), ())\n          .await\n          .unwrap()\n          .into_bytes();\n        (hash.into(), state)\n      }\n      ExternalNetworkId::Monero => {\n        use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, scalar::Scalar};\n        use monero_simple_request_rpc::SimpleRequestRpc;\n        use monero_wallet::{rpc::Rpc, address::Network, ViewPair};\n\n        let rpc = SimpleRequestRpc::new(rpc_url).await.expect(\"couldn't connect to the Monero RPC\");\n        rpc\n          .generate_blocks(\n            &ViewPair::new(ED25519_BASEPOINT_POINT, Zeroizing::new(Scalar::ONE))\n              .unwrap()\n              .legacy_address(Network::Mainnet),\n            1,\n          )\n          .await\n          .unwrap();\n        let hash = rpc.get_block_hash(rpc.get_height().await.unwrap() - 1).await.unwrap();\n        (hash, rpc.get_block(hash).await.unwrap().serialize())\n      }\n    }\n  }\n\n  pub async fn sync(&self, ops: &DockerOperations, others: &[Coordinator]) {\n    let rpc_url = network_rpc(self.network, ops, &self.network_handle);\n    match self.network {\n      ExternalNetworkId::Bitcoin => {\n        use bitcoin_serai::{bitcoin::consensus::Encodable, rpc::Rpc};\n\n        let rpc = Rpc::new(rpc_url).await.expect(\"couldn't connect to the Bitcoin RPC\");\n        let to = rpc.get_latest_block_number().await.unwrap();\n        for coordinator in others {\n          let other_rpc = Rpc::new(network_rpc(self.network, ops, &coordinator.network_handle))\n            .await\n            .expect(\"couldn't connect to the Bitcoin RPC\");\n          let from = other_rpc.get_latest_block_number().await.unwrap() + 1;\n\n          for b in from ..= to {\n            let mut buf = vec![];\n            rpc\n              .get_block(&rpc.get_block_hash(b).await.unwrap())\n              .await\n              .unwrap()\n              .consensus_encode(&mut buf)\n              .unwrap();\n\n            let res: Option<String> = other_rpc\n              .rpc_call(\"submitblock\", serde_json::json!([hex::encode(buf)]))\n              .await\n              .unwrap();\n            if let Some(err) = res {\n              panic!(\"submitblock failed: {err}\");\n            }\n          }\n        }\n      }\n      ExternalNetworkId::Ethereum => {\n        use ethereum_serai::alloy::{\n          simple_request_transport::SimpleRequest,\n          rpc_types::{BlockTransactionsKind, BlockNumberOrTag},\n          rpc_client::ClientBuilder,\n          provider::{Provider, RootProvider},\n          network::Ethereum,\n        };\n\n        let (expected_number, state) = {\n          let provider = RootProvider::<_, Ethereum>::new(\n            ClientBuilder::default().transport(SimpleRequest::new(rpc_url.clone()), true),\n          );\n\n          let expected_number = provider\n            .get_block(BlockNumberOrTag::Latest.into(), BlockTransactionsKind::Hashes)\n            .await\n            .unwrap()\n            .unwrap()\n            .header\n            .number;\n          (\n            expected_number,\n            provider.raw_request::<_, String>(\"anvil_dumpState\".into(), ()).await.unwrap(),\n          )\n        };\n\n        for coordinator in others {\n          let rpc_url = network_rpc(coordinator.network, ops, &coordinator.network_handle);\n          let provider = RootProvider::<_, Ethereum>::new(\n            ClientBuilder::default().transport(SimpleRequest::new(rpc_url.clone()), true),\n          );\n          assert!(provider\n            .raw_request::<_, bool>(\"anvil_loadState\".into(), &[&state])\n            .await\n            .unwrap());\n\n          let new_number = provider\n            .get_block(BlockNumberOrTag::Latest.into(), BlockTransactionsKind::Hashes)\n            .await\n            .unwrap()\n            .unwrap()\n            .header\n            .number;\n\n          // TODO: https://github.com/foundry-rs/foundry/issues/7955\n          let _ = expected_number;\n          let _ = new_number;\n          //assert_eq!(expected_number, new_number);\n        }\n      }\n      ExternalNetworkId::Monero => {\n        use monero_simple_request_rpc::SimpleRequestRpc;\n        use monero_wallet::rpc::Rpc;\n\n        let rpc = SimpleRequestRpc::new(rpc_url).await.expect(\"couldn't connect to the Monero RPC\");\n        let to = rpc.get_height().await.unwrap();\n        for coordinator in others {\n          let other_rpc = SimpleRequestRpc::new(network_rpc(\n            coordinator.network,\n            ops,\n            &coordinator.network_handle,\n          ))\n          .await\n          .expect(\"couldn't connect to the Monero RPC\");\n\n          let from = other_rpc.get_height().await.unwrap();\n          for b in from .. to {\n            let block =\n              rpc.get_block(rpc.get_block_hash(b).await.unwrap()).await.unwrap().serialize();\n\n            let res: serde_json::Value = other_rpc\n              .json_rpc_call(\"submit_block\", Some(serde_json::json!([hex::encode(block)])))\n              .await\n              .unwrap();\n            let err = res.get(\"error\");\n            if err.is_some() && (err.unwrap() != &serde_json::Value::Null) {\n              panic!(\"failed to submit Monero block: {res}\");\n            }\n          }\n        }\n      }\n    }\n  }\n\n  pub async fn publish_transaction(&self, ops: &DockerOperations, tx: &[u8]) {\n    let rpc_url = network_rpc(self.network, ops, &self.network_handle);\n    match self.network {\n      ExternalNetworkId::Bitcoin => {\n        use bitcoin_serai::{\n          bitcoin::{consensus::Decodable, Transaction},\n          rpc::Rpc,\n        };\n\n        let rpc =\n          Rpc::new(rpc_url).await.expect(\"couldn't connect to the coordinator's Bitcoin RPC\");\n        rpc.send_raw_transaction(&Transaction::consensus_decode(&mut &*tx).unwrap()).await.unwrap();\n      }\n      ExternalNetworkId::Ethereum => {\n        use ethereum_serai::alloy::{\n          simple_request_transport::SimpleRequest,\n          rpc_client::ClientBuilder,\n          provider::{Provider, RootProvider},\n          network::Ethereum,\n        };\n\n        let provider = RootProvider::<_, Ethereum>::new(\n          ClientBuilder::default().transport(SimpleRequest::new(rpc_url.clone()), true),\n        );\n        let _ = provider.send_raw_transaction(tx).await.unwrap();\n      }\n      ExternalNetworkId::Monero => {\n        use monero_simple_request_rpc::SimpleRequestRpc;\n        use monero_wallet::{transaction::Transaction, rpc::Rpc};\n\n        let rpc = SimpleRequestRpc::new(rpc_url)\n          .await\n          .expect(\"couldn't connect to the coordinator's Monero RPC\");\n        rpc.publish_transaction(&Transaction::read(&mut &*tx).unwrap()).await.unwrap();\n      }\n    }\n  }\n\n  pub async fn publish_eventuality_completion(&self, ops: &DockerOperations, tx: &[u8]) {\n    match self.network {\n      ExternalNetworkId::Bitcoin | ExternalNetworkId::Monero => {\n        self.publish_transaction(ops, tx).await\n      }\n      ExternalNetworkId::Ethereum => (),\n    }\n  }\n\n  pub async fn get_published_transaction(\n    &self,\n    ops: &DockerOperations,\n    tx: &[u8],\n  ) -> Option<Vec<u8>> {\n    let rpc_url = network_rpc(self.network, ops, &self.network_handle);\n    match self.network {\n      ExternalNetworkId::Bitcoin => {\n        use bitcoin_serai::{bitcoin::consensus::Encodable, rpc::Rpc};\n\n        let rpc =\n          Rpc::new(rpc_url).await.expect(\"couldn't connect to the coordinator's Bitcoin RPC\");\n\n        // Bitcoin publishes a 0-byte TX ID to reduce variables\n        // Accordingly, read the mempool to find the (presumed relevant) TX\n        let entries: Vec<String> =\n          rpc.rpc_call(\"getrawmempool\", serde_json::json!([false])).await.unwrap();\n        assert_eq!(entries.len(), 1, \"more than one entry in the mempool, so unclear which to get\");\n\n        let mut hash = [0; 32];\n        hash.copy_from_slice(&hex::decode(&entries[0]).unwrap());\n        if let Ok(tx) = rpc.get_transaction(&hash).await {\n          let mut buf = vec![];\n          tx.consensus_encode(&mut buf).unwrap();\n          Some(buf)\n        } else {\n          None\n        }\n      }\n      ExternalNetworkId::Ethereum => {\n        /*\n        let provider = RootProvider::<_, Ethereum>::new(\n          ClientBuilder::default().transport(SimpleRequest::new(rpc_url.clone()), true),\n        );\n        let mut hash = [0; 32];\n        hash.copy_from_slice(tx);\n        let tx = provider.get_transaction_by_hash(hash.into()).await.unwrap()?;\n        let (tx, sig, _) = Signed::<TxLegacy>::try_from(tx).unwrap().into_parts();\n        let mut bytes = vec![];\n        tx.encode_with_signature_fields(&sig, &mut bytes);\n        Some(bytes)\n        */\n\n        // This is being passed a signature. We need to check the relayer has a TX with this\n        // signature\n\n        use tokio::{\n          io::{AsyncReadExt, AsyncWriteExt},\n          net::TcpStream,\n        };\n\n        let (ip, port) = ops.handle(&self.relayer_handle).host_port(20831).unwrap();\n        let relayer_url = format!(\"{ip}:{port}\");\n\n        let mut socket = TcpStream::connect(&relayer_url).await.unwrap();\n        // Iterate over every published command\n        for i in 1 .. u32::MAX {\n          socket.write_all(&i.to_le_bytes()).await.unwrap();\n\n          let mut recvd_len = [0; 4];\n          socket.read_exact(&mut recvd_len).await.unwrap();\n          if recvd_len == [0; 4] {\n            break;\n          }\n\n          let mut msg = vec![0; usize::try_from(u32::from_le_bytes(recvd_len)).unwrap()];\n          socket.read_exact(&mut msg).await.unwrap();\n          for start_pos in 0 .. msg.len() {\n            if (start_pos + tx.len()) > msg.len() {\n              break;\n            }\n            if &msg[start_pos .. (start_pos + tx.len())] == tx {\n              return Some(msg);\n            }\n          }\n        }\n\n        None\n      }\n      ExternalNetworkId::Monero => {\n        use monero_simple_request_rpc::SimpleRequestRpc;\n        use monero_wallet::rpc::Rpc;\n\n        let rpc = SimpleRequestRpc::new(rpc_url)\n          .await\n          .expect(\"couldn't connect to the coordinator's Monero RPC\");\n        let mut hash = [0; 32];\n        hash.copy_from_slice(tx);\n        if let Ok(tx) = rpc.get_transaction(hash).await {\n          Some(tx.serialize())\n        } else {\n          None\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "tests/processor/src/networks.rs",
    "content": "#![allow(deprecated)]\n\nuse zeroize::Zeroizing;\nuse rand_core::{RngCore, OsRng};\n\nuse scale::Encode;\n\nuse serai_client::{\n  in_instructions::primitives::{InInstruction, RefundableInInstruction, Shorthand},\n  primitives::{Amount, ExternalAddress, ExternalBalance, ExternalCoin, ExternalNetworkId},\n  validator_sets::primitives::ExternalKey,\n};\n\nuse dockertest::{PullPolicy, Image, StartPolicy, TestBodySpecification, DockerOperations};\n\nuse crate::*;\n\npub const RPC_USER: &str = \"serai\";\npub const RPC_PASS: &str = \"seraidex\";\n\npub const BTC_PORT: u32 = 8332;\npub const ETH_PORT: u32 = 8545;\npub const XMR_PORT: u32 = 18081;\n\npub fn bitcoin_instance() -> (TestBodySpecification, u32) {\n  serai_docker_tests::build(\"bitcoin\".to_string());\n\n  let composition = TestBodySpecification::with_image(\n    Image::with_repository(\"serai-dev-bitcoin\").pull_policy(PullPolicy::Never),\n  )\n  .set_publish_all_ports(true);\n  (composition, BTC_PORT)\n}\n\npub fn ethereum_instance() -> (TestBodySpecification, u32) {\n  serai_docker_tests::build(\"ethereum\".to_string());\n\n  let composition = TestBodySpecification::with_image(\n    Image::with_repository(\"serai-dev-ethereum\").pull_policy(PullPolicy::Never),\n  )\n  .set_start_policy(StartPolicy::Strict)\n  .set_publish_all_ports(true);\n  (composition, ETH_PORT)\n}\n\npub fn monero_instance() -> (TestBodySpecification, u32) {\n  serai_docker_tests::build(\"monero\".to_string());\n\n  let composition = TestBodySpecification::with_image(\n    Image::with_repository(\"serai-dev-monero\").pull_policy(PullPolicy::Never),\n  )\n  .set_start_policy(StartPolicy::Strict)\n  .set_publish_all_ports(true);\n  (composition, XMR_PORT)\n}\n\npub fn network_instance(network: ExternalNetworkId) -> (TestBodySpecification, u32) {\n  match network {\n    ExternalNetworkId::Bitcoin => bitcoin_instance(),\n    ExternalNetworkId::Ethereum => ethereum_instance(),\n    ExternalNetworkId::Monero => monero_instance(),\n  }\n}\n\npub fn network_rpc(network: ExternalNetworkId, ops: &DockerOperations, handle: &str) -> String {\n  let (ip, port) = ops\n    .handle(handle)\n    .host_port(match network {\n      ExternalNetworkId::Bitcoin => BTC_PORT,\n      ExternalNetworkId::Ethereum => ETH_PORT,\n      ExternalNetworkId::Monero => XMR_PORT,\n    })\n    .unwrap();\n  format!(\"http://{RPC_USER}:{RPC_PASS}@{ip}:{port}\")\n}\n\npub fn confirmations(network: ExternalNetworkId) -> usize {\n  use processor::networks::*;\n  match network {\n    ExternalNetworkId::Bitcoin => Bitcoin::CONFIRMATIONS,\n    ExternalNetworkId::Ethereum => Ethereum::<serai_db::MemDb>::CONFIRMATIONS,\n    ExternalNetworkId::Monero => Monero::CONFIRMATIONS,\n  }\n}\n\n#[derive(Clone)]\npub enum Wallet {\n  Bitcoin {\n    private_key: bitcoin_serai::bitcoin::PrivateKey,\n    public_key: bitcoin_serai::bitcoin::PublicKey,\n    input_tx: bitcoin_serai::bitcoin::Transaction,\n  },\n  Ethereum {\n    rpc_url: String,\n    key: <ciphersuite_kp256::Secp256k1 as Ciphersuite>::F,\n    nonce: u64,\n  },\n  Monero {\n    handle: String,\n    spend_key: Zeroizing<curve25519_dalek::scalar::Scalar>,\n    view_pair: monero_wallet::ViewPair,\n    last_tx: (usize, [u8; 32]),\n  },\n}\n\n// TODO: Merge these functions with the processor's tests, which offers very similar functionality\nimpl Wallet {\n  pub async fn new(network: ExternalNetworkId, ops: &DockerOperations, handle: String) -> Wallet {\n    let rpc_url = network_rpc(network, ops, &handle);\n\n    match network {\n      ExternalNetworkId::Bitcoin => {\n        use bitcoin_serai::{\n          bitcoin::{\n            secp256k1::{SECP256K1, SecretKey},\n            PrivateKey, PublicKey, ScriptBuf, Network, Address,\n          },\n          rpc::Rpc,\n        };\n\n        let secret_key = SecretKey::new(&mut rand_core::OsRng);\n        let private_key = PrivateKey::new(secret_key, Network::Regtest);\n        let public_key = PublicKey::from_private_key(SECP256K1, &private_key);\n        let main_addr = Address::p2pkh(public_key, Network::Regtest);\n\n        let rpc = Rpc::new(rpc_url).await.expect(\"couldn't connect to the Bitcoin RPC\");\n\n        let new_block = rpc.get_latest_block_number().await.unwrap() + 1;\n        rpc\n          .rpc_call::<Vec<String>>(\"generatetoaddress\", serde_json::json!([1, main_addr]))\n          .await\n          .unwrap();\n\n        // Mine it to maturity\n        rpc\n          .rpc_call::<Vec<String>>(\n            \"generatetoaddress\",\n            serde_json::json!([100, Address::p2sh(&ScriptBuf::new(), Network::Regtest).unwrap()]),\n          )\n          .await\n          .unwrap();\n\n        let funds = rpc\n          .get_block(&rpc.get_block_hash(new_block).await.unwrap())\n          .await\n          .unwrap()\n          .txdata\n          .swap_remove(0);\n\n        Wallet::Bitcoin { private_key, public_key, input_tx: funds }\n      }\n\n      ExternalNetworkId::Ethereum => {\n        use ciphersuite::group::ff::Field;\n        use ciphersuite_kp256::Secp256k1;\n        use ethereum_serai::alloy::{\n          primitives::{U256, Address},\n          simple_request_transport::SimpleRequest,\n          rpc_client::ClientBuilder,\n          provider::{Provider, RootProvider},\n          network::Ethereum,\n        };\n\n        let key = <Secp256k1 as Ciphersuite>::F::random(&mut OsRng);\n        let address =\n          ethereum_serai::crypto::address(&(<Secp256k1 as Ciphersuite>::generator() * key));\n\n        let provider = RootProvider::<_, Ethereum>::new(\n          ClientBuilder::default().transport(SimpleRequest::new(rpc_url.clone()), true),\n        );\n\n        provider\n          .raw_request::<_, ()>(\n            \"anvil_setBalance\".into(),\n            [Address(address.into()).to_string(), {\n              let nine_decimals = U256::from(1_000_000_000u64);\n              (U256::from(100u64) * nine_decimals * nine_decimals).to_string()\n            }],\n          )\n          .await\n          .unwrap();\n\n        Wallet::Ethereum { rpc_url: rpc_url.clone(), key, nonce: 0 }\n      }\n\n      ExternalNetworkId::Monero => {\n        use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, scalar::Scalar};\n        use monero_simple_request_rpc::SimpleRequestRpc;\n        use monero_wallet::{rpc::Rpc, address::Network, ViewPair};\n\n        let spend_key = Scalar::random(&mut OsRng);\n        let view_key = Scalar::random(&mut OsRng);\n\n        let view_pair =\n          ViewPair::new(ED25519_BASEPOINT_POINT * spend_key, Zeroizing::new(view_key)).unwrap();\n\n        let rpc = SimpleRequestRpc::new(rpc_url).await.expect(\"couldn't connect to the Monero RPC\");\n\n        let height = rpc.get_height().await.unwrap();\n        // Mines 200 blocks so sufficient decoys exist, as only 60 is needed for maturity\n        rpc.generate_blocks(&view_pair.legacy_address(Network::Mainnet), 200).await.unwrap();\n        let block = rpc.get_block(rpc.get_block_hash(height).await.unwrap()).await.unwrap();\n\n        Wallet::Monero {\n          handle,\n          spend_key: Zeroizing::new(spend_key),\n          view_pair,\n          last_tx: (height, block.miner_transaction.hash()),\n        }\n      }\n    }\n  }\n\n  pub async fn send_to_address(\n    &mut self,\n    ops: &DockerOperations,\n    to: &ExternalKey,\n    instruction: Option<InInstruction>,\n  ) -> (Vec<u8>, ExternalBalance) {\n    match self {\n      Wallet::Bitcoin { private_key, public_key, ref mut input_tx } => {\n        use bitcoin_serai::bitcoin::{\n          secp256k1::{SECP256K1, Message},\n          key::{XOnlyPublicKey, TweakedPublicKey},\n          consensus::Encodable,\n          sighash::{EcdsaSighashType, SighashCache},\n          script::{PushBytesBuf, Script, ScriptBuf, Builder},\n          OutPoint, Sequence, Witness, TxIn, Amount, TxOut,\n          absolute::LockTime,\n          transaction::{Version, Transaction},\n        };\n\n        const AMOUNT: u64 = 100000000;\n        let mut tx = Transaction {\n          version: Version(2),\n          lock_time: LockTime::ZERO,\n          input: vec![TxIn {\n            previous_output: OutPoint { txid: input_tx.compute_txid(), vout: 0 },\n            script_sig: Script::new().into(),\n            sequence: Sequence(u32::MAX),\n            witness: Witness::default(),\n          }],\n          output: vec![\n            TxOut {\n              value: Amount::from_sat(input_tx.output[0].value.to_sat() - AMOUNT - 10000),\n              script_pubkey: input_tx.output[0].script_pubkey.clone(),\n            },\n            TxOut {\n              value: Amount::from_sat(AMOUNT),\n              script_pubkey: ScriptBuf::new_p2tr_tweaked(\n                TweakedPublicKey::dangerous_assume_tweaked(\n                  XOnlyPublicKey::from_slice(&to[1 ..]).unwrap(),\n                ),\n              ),\n            },\n          ],\n        };\n\n        if let Some(instruction) = instruction {\n          tx.output.push(TxOut {\n            value: Amount::ZERO,\n            script_pubkey: ScriptBuf::new_op_return(\n              PushBytesBuf::try_from(\n                Shorthand::Raw(RefundableInInstruction { origin: None, instruction }).encode(),\n              )\n              .unwrap(),\n            ),\n          });\n        }\n\n        let mut der = SECP256K1\n          .sign_ecdsa_low_r(\n            &Message::from_digest_slice(\n              SighashCache::new(&tx)\n                .legacy_signature_hash(\n                  0,\n                  &input_tx.output[0].script_pubkey,\n                  EcdsaSighashType::All.to_u32(),\n                )\n                .unwrap()\n                .to_raw_hash()\n                .as_ref(),\n            )\n            .unwrap(),\n            &private_key.inner,\n          )\n          .serialize_der()\n          .to_vec();\n        der.push(1);\n        tx.input[0].script_sig = Builder::new()\n          .push_slice(PushBytesBuf::try_from(der).unwrap())\n          .push_key(public_key)\n          .into_script();\n\n        let mut buf = vec![];\n        tx.consensus_encode(&mut buf).unwrap();\n        *input_tx = tx;\n        (buf, ExternalBalance { coin: ExternalCoin::Bitcoin, amount: Amount(AMOUNT) })\n      }\n\n      Wallet::Ethereum { rpc_url, key, ref mut nonce } => {\n        use std::sync::Arc;\n        use ethereum_serai::{\n          alloy::{\n            primitives::{U256, Parity, Signature, TxKind},\n            sol_types::SolCall,\n            simple_request_transport::SimpleRequest,\n            consensus::{TxLegacy, SignableTransaction},\n            rpc_client::ClientBuilder,\n            provider::{Provider, RootProvider},\n            network::Ethereum,\n          },\n          crypto::PublicKey,\n          deployer::Deployer,\n        };\n\n        let eight_decimals = U256::from(100_000_000u64);\n        let nine_decimals = eight_decimals * U256::from(10u64);\n        let eighteen_decimals = nine_decimals * nine_decimals;\n        let one_eth = eighteen_decimals;\n\n        let provider = Arc::new(RootProvider::<_, Ethereum>::new(\n          ClientBuilder::default().transport(SimpleRequest::new(rpc_url.clone()), true),\n        ));\n\n        let to_as_key = PublicKey::new(\n          <ciphersuite_kp256::Secp256k1 as Ciphersuite>::read_G(&mut to.as_slice()).unwrap(),\n        )\n        .unwrap();\n        let router_addr = {\n          // Find the deployer\n          let deployer = Deployer::new(provider.clone()).await.unwrap().unwrap();\n\n          // Find the router, deploying if non-existent\n          let router = if let Some(router) =\n            deployer.find_router(provider.clone(), &to_as_key).await.unwrap()\n          {\n            router\n          } else {\n            let mut tx = deployer.deploy_router(&to_as_key);\n            tx.gas_price = 1_000_000_000u64.into();\n            let tx = ethereum_serai::crypto::deterministically_sign(&tx);\n            let signer = tx.recover_signer().unwrap();\n            let (tx, sig, _) = tx.into_parts();\n\n            provider\n              .raw_request::<_, ()>(\n                \"anvil_setBalance\".into(),\n                [signer.to_string(), (u128::from(tx.gas_limit) * tx.gas_price).to_string()],\n              )\n              .await\n              .unwrap();\n\n            let mut bytes = vec![];\n            tx.encode_with_signature_fields(&sig, &mut bytes);\n            let _ = provider.send_raw_transaction(&bytes).await.unwrap();\n\n            provider.raw_request::<_, ()>(\"anvil_mine\".into(), [96]).await.unwrap();\n\n            deployer.find_router(provider.clone(), &to_as_key).await.unwrap().unwrap()\n          };\n\n          router.address()\n        };\n\n        let tx = TxLegacy {\n          chain_id: None,\n          nonce: *nonce,\n          gas_price: 1_000_000_000u128,\n          gas_limit: 200_000,\n          to: TxKind::Call(router_addr.into()),\n          // 1 ETH\n          value: one_eth,\n          input: ethereum_serai::router::abi::inInstructionCall::new((\n            [0; 20].into(),\n            one_eth,\n            if let Some(instruction) = instruction {\n              Shorthand::Raw(RefundableInInstruction { origin: None, instruction }).encode().into()\n            } else {\n              vec![].into()\n            },\n          ))\n          .abi_encode()\n          .into(),\n        };\n\n        *nonce += 1;\n\n        let sig =\n          k256::ecdsa::SigningKey::from(k256::elliptic_curve::NonZeroScalar::new(*key).unwrap())\n            .sign_prehash_recoverable(tx.signature_hash().as_ref())\n            .unwrap();\n\n        let mut bytes = vec![];\n        let parity = Parity::NonEip155(Parity::from(sig.1).y_parity());\n        tx.encode_with_signature_fields(&Signature::from(sig).with_parity(parity), &mut bytes);\n\n        // We drop the bottom 10 decimals\n        (\n          bytes,\n          ExternalBalance {\n            coin: ExternalCoin::Ether,\n            amount: Amount(u64::try_from(eight_decimals).unwrap()),\n          },\n        )\n      }\n\n      Wallet::Monero { handle, ref spend_key, ref view_pair, ref mut last_tx } => {\n        use curve25519_dalek::constants::ED25519_BASEPOINT_POINT;\n        use monero_simple_request_rpc::SimpleRequestRpc;\n        use monero_wallet::{\n          io::decompress_point,\n          ringct::RctType,\n          rpc::{FeePriority, Rpc},\n          address::{Network, AddressType, Address},\n          Scanner, OutputWithDecoys,\n          send::{Change, SignableTransaction},\n        };\n        use processor::{additional_key, networks::Monero};\n\n        let rpc_url = network_rpc(ExternalNetworkId::Monero, ops, handle);\n        let rpc = SimpleRequestRpc::new(rpc_url).await.expect(\"couldn't connect to the Monero RPC\");\n\n        // Prepare inputs\n        let current_height = rpc.get_height().await.unwrap();\n        let mut outputs = vec![];\n        for block in last_tx.0 .. current_height {\n          let block = rpc.get_block_by_number(block).await.unwrap();\n          if (block.miner_transaction.hash() == last_tx.1) ||\n            block.transactions.contains(&last_tx.1)\n          {\n            outputs = Scanner::new(view_pair.clone())\n              .scan(rpc.get_scannable_block(block).await.unwrap())\n              .unwrap()\n              .ignore_additional_timelock();\n          }\n        }\n        assert!(!outputs.is_empty());\n\n        let mut inputs = Vec::with_capacity(outputs.len());\n        for output in outputs {\n          inputs.push(\n            OutputWithDecoys::fingerprintable_deterministic_new(\n              &mut OsRng,\n              &rpc,\n              16,\n              rpc.get_height().await.unwrap(),\n              output,\n            )\n            .await\n            .unwrap(),\n          );\n        }\n\n        let to_spend_key = decompress_point(<[u8; 32]>::try_from(to.as_ref()).unwrap()).unwrap();\n        let to_view_key = additional_key::<Monero>(0);\n        let to_addr = Address::new(\n          Network::Mainnet,\n          AddressType::Featured { subaddress: false, payment_id: None, guaranteed: true },\n          to_spend_key,\n          ED25519_BASEPOINT_POINT * to_view_key.0,\n        );\n\n        // Create and sign the TX\n        const AMOUNT: u64 = 1_000_000_000_000;\n        let mut data = vec![];\n        if let Some(instruction) = instruction {\n          data.push(Shorthand::Raw(RefundableInInstruction { origin: None, instruction }).encode());\n        }\n        let mut outgoing_view_key = Zeroizing::new([0; 32]);\n        OsRng.fill_bytes(outgoing_view_key.as_mut());\n        let tx = SignableTransaction::new(\n          RctType::ClsagBulletproofPlus,\n          outgoing_view_key,\n          inputs,\n          vec![(to_addr, AMOUNT)],\n          Change::new(view_pair.clone(), None),\n          data,\n          rpc.get_fee_rate(FeePriority::Unimportant).await.unwrap(),\n        )\n        .unwrap()\n        .sign(&mut OsRng, spend_key)\n        .unwrap();\n\n        // Update the last TX to track the change output\n        last_tx.0 = current_height;\n        last_tx.1 = tx.hash();\n\n        (tx.serialize(), ExternalBalance { coin: ExternalCoin::Monero, amount: Amount(AMOUNT) })\n      }\n    }\n  }\n\n  pub fn address(&self) -> ExternalAddress {\n    use serai_client::networks;\n\n    match self {\n      Wallet::Bitcoin { public_key, .. } => {\n        use bitcoin_serai::bitcoin::ScriptBuf;\n        ExternalAddress::new(\n          networks::bitcoin::Address::new(ScriptBuf::new_p2pkh(&public_key.pubkey_hash()))\n            .unwrap()\n            .into(),\n        )\n        .unwrap()\n      }\n      Wallet::Ethereum { key, .. } => ExternalAddress::new(\n        ethereum_serai::crypto::address(&(ciphersuite_kp256::Secp256k1::generator() * key)).into(),\n      )\n      .unwrap(),\n      Wallet::Monero { view_pair, .. } => {\n        use monero_wallet::address::Network;\n        ExternalAddress::new(\n          networks::monero::Address::new(view_pair.legacy_address(Network::Mainnet))\n            .unwrap()\n            .into(),\n        )\n        .unwrap()\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "tests/processor/src/tests/batch.rs",
    "content": "use std::{\n  collections::HashMap,\n  time::{SystemTime, Duration},\n};\n\nuse dkg::Participant;\n\nuse messages::{coordinator::*, SubstrateContext};\n\nuse serai_client::{\n  in_instructions::primitives::{\n    batch_message, Batch, InInstruction, InInstructionWithBalance, SignedBatch,\n  },\n  primitives::{\n    crypto::RuntimePublic, Amount, BlockHash, ExternalBalance, ExternalNetworkId, PublicKey,\n    SeraiAddress, EXTERNAL_NETWORKS,\n  },\n  validator_sets::primitives::Session,\n};\n\nuse serai_db::MemDb;\nuse processor::networks::{Network, Bitcoin, Ethereum, Monero};\n\nuse crate::{*, tests::*};\n\npub(crate) async fn recv_batch_preprocesses(\n  coordinators: &mut [Coordinator],\n  session: Session,\n  batch: &Batch,\n  attempt: u32,\n) -> (SubstrateSignId, HashMap<Participant, [u8; 64]>) {\n  let id = SubstrateSignId { session, id: SubstrateSignableId::Batch(batch.id), attempt };\n\n  let mut block = None;\n  let mut preprocesses = HashMap::new();\n  for (i, coordinator) in coordinators.iter_mut().enumerate() {\n    let i = Participant::new(u16::try_from(i).unwrap() + 1).unwrap();\n\n    if attempt == 0 {\n      match coordinator.recv_message().await {\n        messages::ProcessorMessage::Substrate(messages::substrate::ProcessorMessage::Batch {\n          batch: sent_batch,\n        }) => {\n          assert_eq!(&sent_batch, batch);\n        }\n        _ => panic!(\"processor didn't send batch\"),\n      }\n    }\n\n    match coordinator.recv_message().await {\n      messages::ProcessorMessage::Coordinator(\n        messages::coordinator::ProcessorMessage::BatchPreprocess {\n          id: this_id,\n          block: this_block,\n          preprocesses: mut these_preprocesses,\n        },\n      ) => {\n        assert_eq!(this_id, id);\n        if block.is_none() {\n          block = Some(this_block);\n        }\n        assert_eq!(&this_block, block.as_ref().unwrap());\n\n        assert_eq!(these_preprocesses.len(), 1);\n        preprocesses.insert(i, these_preprocesses.swap_remove(0));\n      }\n      _ => panic!(\"processor didn't send batch preprocess\"),\n    }\n  }\n\n  // Reduce the preprocesses down to the threshold\n  while preprocesses.len() > THRESHOLD {\n    preprocesses.remove(\n      &Participant::new(\n        u16::try_from(OsRng.next_u64() % u64::try_from(COORDINATORS).unwrap()).unwrap() + 1,\n      )\n      .unwrap(),\n    );\n  }\n\n  (id, preprocesses)\n}\n\npub(crate) async fn sign_batch(\n  coordinators: &mut [Coordinator],\n  key: [u8; 32],\n  id: SubstrateSignId,\n  preprocesses: HashMap<Participant, [u8; 64]>,\n) -> SignedBatch {\n  assert_eq!(preprocesses.len(), THRESHOLD);\n\n  for (i, coordinator) in coordinators.iter_mut().enumerate() {\n    let i = Participant::new(u16::try_from(i).unwrap() + 1).unwrap();\n\n    if preprocesses.contains_key(&i) {\n      coordinator\n        .send_message(messages::coordinator::CoordinatorMessage::SubstratePreprocesses {\n          id: id.clone(),\n          preprocesses: clone_without(&preprocesses, &i),\n        })\n        .await;\n    }\n  }\n\n  let mut shares = HashMap::new();\n  for (i, coordinator) in coordinators.iter_mut().enumerate() {\n    let i = Participant::new(u16::try_from(i).unwrap() + 1).unwrap();\n\n    if preprocesses.contains_key(&i) {\n      match coordinator.recv_message().await {\n        messages::ProcessorMessage::Coordinator(\n          messages::coordinator::ProcessorMessage::SubstrateShare {\n            id: this_id,\n            shares: mut these_shares,\n          },\n        ) => {\n          assert_eq!(&this_id, &id);\n          assert_eq!(these_shares.len(), 1);\n          shares.insert(i, these_shares.swap_remove(0));\n        }\n        _ => panic!(\"processor didn't send batch share\"),\n      }\n    }\n  }\n\n  for (i, coordinator) in coordinators.iter_mut().enumerate() {\n    let i = Participant::new(u16::try_from(i).unwrap() + 1).unwrap();\n\n    if preprocesses.contains_key(&i) {\n      coordinator\n        .send_message(messages::coordinator::CoordinatorMessage::SubstrateShares {\n          id: id.clone(),\n          shares: clone_without(&shares, &i),\n        })\n        .await;\n    }\n  }\n\n  // The selected processors should yield the batch\n  let mut batch = None;\n  for (i, coordinator) in coordinators.iter_mut().enumerate() {\n    let i = Participant::new(u16::try_from(i).unwrap() + 1).unwrap();\n\n    if preprocesses.contains_key(&i) {\n      match coordinator.recv_message().await {\n        messages::ProcessorMessage::Substrate(\n          messages::substrate::ProcessorMessage::SignedBatch { batch: this_batch },\n        ) => {\n          if batch.is_none() {\n            assert!(PublicKey::from_raw(key)\n              .verify(&batch_message(&this_batch.batch), &this_batch.signature));\n\n            batch = Some(this_batch.clone());\n          }\n\n          assert_eq!(batch.as_ref().unwrap(), &this_batch);\n        }\n        _ => panic!(\"processor didn't send batch\"),\n      }\n    }\n  }\n  batch.unwrap()\n}\n\npub(crate) async fn substrate_block(\n  coordinator: &mut Coordinator,\n  block: messages::substrate::CoordinatorMessage,\n) -> Vec<PlanMeta> {\n  match block.clone() {\n    messages::substrate::CoordinatorMessage::SubstrateBlock {\n      context: _,\n      block: sent_block,\n      burns: _,\n      batches: _,\n    } => {\n      coordinator.send_message(block).await;\n      match coordinator.recv_message().await {\n        messages::ProcessorMessage::Coordinator(\n          messages::coordinator::ProcessorMessage::SubstrateBlockAck { block: recvd_block, plans },\n        ) => {\n          assert_eq!(recvd_block, sent_block);\n          plans\n        }\n        _ => panic!(\"coordinator didn't respond to SubstrateBlock with SubstrateBlockAck\"),\n      }\n    }\n    _ => panic!(\"substrate_block message wasn't a SubstrateBlock\"),\n  }\n}\n\n#[test]\nfn batch_test() {\n  for network in EXTERNAL_NETWORKS {\n    let (coordinators, test) = new_test(network);\n\n    test.run(|ops| async move {\n      tokio::time::sleep(Duration::from_secs(1)).await;\n\n      let mut coordinators = coordinators\n        .into_iter()\n        .map(|(handles, key)| Coordinator::new(network, &ops, handles, key))\n        .collect::<Vec<_>>();\n\n      // Create a wallet before we start generating keys\n      let mut wallet = Wallet::new(network, &ops, coordinators[0].network_handle.clone()).await;\n      coordinators[0].sync(&ops, &coordinators[1 ..]).await;\n\n      // Generate keys\n      let key_pair = key_gen(&mut coordinators).await;\n\n      // Now we we have to mine blocks to activate the key\n      // (the first key is activated when the network's time as of a block exceeds the Serai time\n      // it was confirmed at)\n      // Mine multiple sets of medians to ensure the median is sufficiently advanced\n      for _ in 0 .. (10 * confirmations(network)) {\n        coordinators[0].add_block(&ops).await;\n        tokio::time::sleep(Duration::from_secs(1)).await;\n      }\n      coordinators[0].sync(&ops, &coordinators[1 ..]).await;\n\n      // Run twice, once with an instruction and once without\n      let substrate_block_num = (OsRng.next_u64() % 4_000_000_000u64) + 1;\n      for i in 0 .. 2 {\n        let mut serai_address = [0; 32];\n        OsRng.fill_bytes(&mut serai_address);\n        let instruction =\n          if i == 0 { Some(InInstruction::Transfer(SeraiAddress(serai_address))) } else { None };\n\n        // Send into the processor's wallet\n        let (tx, balance_sent) =\n          wallet.send_to_address(&ops, &key_pair.1, instruction.clone()).await;\n        for coordinator in &mut coordinators {\n          coordinator.publish_transaction(&ops, &tx).await;\n        }\n\n        // Put the TX past the confirmation depth\n        let mut block_with_tx = None;\n        for _ in 0 .. confirmations(network) {\n          let (hash, _) = coordinators[0].add_block(&ops).await;\n          if block_with_tx.is_none() {\n            block_with_tx = Some(hash);\n          }\n        }\n        coordinators[0].sync(&ops, &coordinators[1 ..]).await;\n\n        // Sleep for 10s\n        // The scanner works on a 5s interval, so this leaves a few s for any processing/latency\n        tokio::time::sleep(Duration::from_secs(10)).await;\n\n        println!(\"sent in transaction. with in instruction: {}\", instruction.is_some());\n\n        let expected_batch = Batch {\n          network,\n          id: i,\n          block: BlockHash(block_with_tx.unwrap()),\n          instructions: if let Some(instruction) = &instruction {\n            vec![InInstructionWithBalance {\n              instruction: instruction.clone(),\n              balance: ExternalBalance {\n                coin: balance_sent.coin,\n                amount: Amount(\n                  balance_sent.amount.0 -\n                    (2 * match network {\n                      ExternalNetworkId::Bitcoin => Bitcoin::COST_TO_AGGREGATE,\n                      ExternalNetworkId::Ethereum => Ethereum::<MemDb>::COST_TO_AGGREGATE,\n                      ExternalNetworkId::Monero => Monero::COST_TO_AGGREGATE,\n                    }),\n                ),\n              },\n            }]\n          } else {\n            // This shouldn't have an instruction as we didn't add any data into the TX we sent\n            // Empty batches remain valuable as they let us achieve consensus on the block and spend\n            // contained outputs\n            vec![]\n          },\n        };\n\n        println!(\"receiving batch preprocesses...\");\n\n        // Make sure the processors picked it up by checking they're trying to sign a batch for it\n        let (mut id, mut preprocesses) =\n          recv_batch_preprocesses(&mut coordinators, Session(0), &expected_batch, 0).await;\n        // Trigger a random amount of re-attempts\n        for attempt in 1 ..= u32::try_from(OsRng.next_u64() % 4).unwrap() {\n          // TODO: Double check how the processor handles this ID field\n          // It should be able to assert its perfectly sequential\n          id.attempt = attempt;\n          for coordinator in &mut coordinators {\n            coordinator\n              .send_message(messages::coordinator::CoordinatorMessage::BatchReattempt {\n                id: id.clone(),\n              })\n              .await;\n          }\n          (id, preprocesses) =\n            recv_batch_preprocesses(&mut coordinators, Session(0), &expected_batch, attempt).await;\n        }\n\n        println!(\"signing batch...\");\n\n        // Continue with signing the batch\n        let batch = sign_batch(&mut coordinators, key_pair.0 .0, id, preprocesses).await;\n\n        // Check it\n        assert_eq!(batch.batch, expected_batch);\n\n        // Fire a SubstrateBlock\n        let serai_time =\n          SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs();\n        for coordinator in &mut coordinators {\n          let plans = substrate_block(\n            coordinator,\n            messages::substrate::CoordinatorMessage::SubstrateBlock {\n              context: SubstrateContext {\n                serai_time,\n                network_latest_finalized_block: batch.batch.block,\n              },\n              block: substrate_block_num + u64::from(i),\n              burns: vec![],\n              batches: vec![batch.batch.id],\n            },\n          )\n          .await;\n          if instruction.is_some() ||\n            (instruction.is_none() && (network == ExternalNetworkId::Monero))\n          {\n            assert!(plans.is_empty());\n          } else {\n            // If no instruction was used, and the processor csn presume the origin, it'd have\n            // created a refund Plan\n            assert_eq!(plans.len(), 1);\n          }\n        }\n      }\n\n      // With the latter InInstruction not existing, we should've triggered a refund if the origin\n      // was detectable\n      // Check this is trying to sign a Plan\n      if network != ExternalNetworkId::Monero {\n        let mut refund_id = None;\n        for coordinator in &mut coordinators {\n          match coordinator.recv_message().await {\n            messages::ProcessorMessage::Sign(messages::sign::ProcessorMessage::Preprocess {\n              id,\n              ..\n            }) => {\n              if refund_id.is_none() {\n                refund_id = Some(id.clone());\n              }\n              assert_eq!(refund_id.as_ref().unwrap(), &id);\n            }\n            _ => panic!(\"processor didn't send preprocess for expected refund transaction\"),\n          }\n        }\n      }\n    });\n  }\n}\n"
  },
  {
    "path": "tests/processor/src/tests/key_gen.rs",
    "content": "use std::{collections::HashMap, time::SystemTime};\n\nuse dkg::{Participant, ThresholdParams};\n\nuse serai_client::{\n  primitives::{BlockHash, PublicKey, EXTERNAL_NETWORKS},\n  validator_sets::primitives::{KeyPair, Session},\n};\n\nuse messages::{SubstrateContext, key_gen::KeyGenId, CoordinatorMessage, ProcessorMessage};\n\nuse crate::{*, tests::*};\n\npub(crate) async fn key_gen(coordinators: &mut [Coordinator]) -> KeyPair {\n  // Perform an interaction with all processors via their coordinators\n  async fn interact_with_all<\n    FS: Fn(Participant) -> messages::key_gen::CoordinatorMessage,\n    FR: FnMut(Participant, messages::key_gen::ProcessorMessage),\n  >(\n    coordinators: &mut [Coordinator],\n    message: FS,\n    mut recv: FR,\n  ) {\n    for (i, coordinator) in coordinators.iter_mut().enumerate() {\n      let participant = Participant::new(u16::try_from(i + 1).unwrap()).unwrap();\n      coordinator.send_message(CoordinatorMessage::KeyGen(message(participant))).await;\n\n      match coordinator.recv_message().await {\n        ProcessorMessage::KeyGen(msg) => recv(participant, msg),\n        _ => panic!(\"processor didn't return KeyGen message\"),\n      }\n    }\n  }\n\n  // Order a key gen\n  let id = KeyGenId { session: Session(0), attempt: 0 };\n\n  let mut commitments = HashMap::new();\n  interact_with_all(\n    coordinators,\n    |participant| messages::key_gen::CoordinatorMessage::GenerateKey {\n      id,\n      params: ThresholdParams::new(\n        u16::try_from(THRESHOLD).unwrap(),\n        u16::try_from(COORDINATORS).unwrap(),\n        participant,\n      )\n      .unwrap(),\n      shares: 1,\n    },\n    |participant, msg| match msg {\n      messages::key_gen::ProcessorMessage::Commitments {\n        id: this_id,\n        commitments: mut these_commitments,\n      } => {\n        assert_eq!(this_id, id);\n        assert_eq!(these_commitments.len(), 1);\n        commitments.insert(participant, these_commitments.swap_remove(0));\n      }\n      _ => panic!(\"processor didn't return Commitments in response to GenerateKey\"),\n    },\n  )\n  .await;\n\n  // Send the commitments to all parties\n  let mut shares = HashMap::new();\n  interact_with_all(\n    coordinators,\n    |participant| messages::key_gen::CoordinatorMessage::Commitments {\n      id,\n      commitments: clone_without(&commitments, &participant),\n    },\n    |participant, msg| match msg {\n      messages::key_gen::ProcessorMessage::Shares { id: this_id, shares: mut these_shares } => {\n        assert_eq!(this_id, id);\n        assert_eq!(these_shares.len(), 1);\n        shares.insert(participant, these_shares.swap_remove(0));\n      }\n      _ => panic!(\"processor didn't return Shares in response to GenerateKey\"),\n    },\n  )\n  .await;\n\n  // Send the shares\n  let mut substrate_key = None;\n  let mut network_key = None;\n  interact_with_all(\n    coordinators,\n    |participant| messages::key_gen::CoordinatorMessage::Shares {\n      id,\n      shares: vec![shares\n        .iter()\n        .filter_map(|(this_participant, shares)| {\n          shares.get(&participant).cloned().map(|share| (*this_participant, share))\n        })\n        .collect()],\n    },\n    |_, msg| match msg {\n      messages::key_gen::ProcessorMessage::GeneratedKeyPair {\n        id: this_id,\n        substrate_key: this_substrate_key,\n        network_key: this_network_key,\n      } => {\n        assert_eq!(this_id, id);\n        if substrate_key.is_none() {\n          substrate_key = Some(this_substrate_key);\n          network_key = Some(this_network_key.clone());\n        }\n        assert_eq!(substrate_key.unwrap(), this_substrate_key);\n        assert_eq!(network_key.as_ref().unwrap(), &this_network_key);\n      }\n      _ => panic!(\"processor didn't return GeneratedKeyPair in response to GenerateKey\"),\n    },\n  )\n  .await;\n\n  // Confirm the key pair\n  // TODO: Better document network_latest_finalized_block's genesis state, and error if a set claims\n  // [0; 32] was finalized\n  let context = SubstrateContext {\n    serai_time: SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs(),\n    network_latest_finalized_block: BlockHash([0; 32]),\n  };\n\n  let key_pair = KeyPair(\n    PublicKey::from_raw(substrate_key.unwrap()),\n    network_key.clone().unwrap().try_into().unwrap(),\n  );\n\n  for coordinator in coordinators {\n    coordinator\n      .send_message(CoordinatorMessage::Substrate(\n        messages::substrate::CoordinatorMessage::ConfirmKeyPair {\n          context,\n          session: id.session,\n          key_pair: key_pair.clone(),\n        },\n      ))\n      .await;\n  }\n\n  key_pair\n}\n\n#[test]\nfn key_gen_test() {\n  for network in EXTERNAL_NETWORKS {\n    let (coordinators, test) = new_test(network);\n\n    test.run(|ops| async move {\n      // Sleep for a second for the message-queue to boot\n      // It isn't an error to start immediately, it just silences an error\n      tokio::time::sleep(core::time::Duration::from_secs(1)).await;\n\n      // Connect to the Message Queues as the coordinator\n      let mut coordinators = coordinators\n        .into_iter()\n        .map(|(handles, key)| Coordinator::new(network, &ops, handles, key))\n        .collect::<Vec<_>>();\n\n      key_gen(&mut coordinators).await;\n    });\n  }\n}\n"
  },
  {
    "path": "tests/processor/src/tests/mod.rs",
    "content": "use std::collections::HashMap;\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::Ciphersuite;\n\nuse dockertest::DockerTest;\n\nuse crate::*;\n\nmod key_gen;\npub(crate) use key_gen::key_gen;\n\nmod batch;\npub(crate) use batch::{recv_batch_preprocesses, sign_batch, substrate_block};\n\nmod send;\n\npub(crate) const COORDINATORS: usize = 4;\npub(crate) const THRESHOLD: usize = ((COORDINATORS * 2) / 3) + 1;\n\nfn clone_without<K: Clone + core::cmp::Eq + core::hash::Hash, V: Clone>(\n  map: &HashMap<K, V>,\n  without: &K,\n) -> HashMap<K, V> {\n  let mut res = map.clone();\n  res.remove(without).unwrap();\n  res\n}\n\nfn new_test(\n  network: ExternalNetworkId,\n) -> (Vec<(Handles, <Ristretto as Ciphersuite>::F)>, DockerTest) {\n  let mut coordinators = vec![];\n  let mut test = DockerTest::new().with_network(dockertest::Network::Isolated);\n  let mut eth_handle = None;\n  for _ in 0 .. COORDINATORS {\n    let (handles, coord_key, compositions) = processor_stack(network, eth_handle.clone());\n    // TODO: Remove this once https://github.com/foundry-rs/foundry/issues/7955\n    // This has all processors share an Ethereum node until we can sync controlled nodes\n    if network == ExternalNetworkId::Ethereum {\n      eth_handle = eth_handle.or_else(|| Some(handles.0.clone()));\n    }\n    coordinators.push((handles, coord_key));\n    for composition in compositions {\n      test.provide_container(composition);\n    }\n  }\n  (coordinators, test)\n}\n"
  },
  {
    "path": "tests/processor/src/tests/send.rs",
    "content": "use std::{\n  collections::{HashSet, HashMap},\n  time::{SystemTime, Duration},\n};\n\nuse dkg::Participant;\n\nuse messages::{sign::SignId, SubstrateContext};\n\nuse serai_client::{\n  coins::primitives::{OutInstruction, OutInstructionWithBalance},\n  in_instructions::primitives::{Batch, InInstruction, InInstructionWithBalance},\n  primitives::{Amount, BlockHash, ExternalBalance, SeraiAddress, EXTERNAL_NETWORKS},\n  validator_sets::primitives::Session,\n};\n\nuse serai_db::MemDb;\nuse processor::networks::{Network, Bitcoin, Ethereum, Monero};\n\nuse crate::{*, tests::*};\n\n#[allow(unused)]\npub(crate) async fn recv_sign_preprocesses(\n  coordinators: &mut [Coordinator],\n  session: Session,\n  attempt: u32,\n) -> (SignId, HashMap<Participant, Vec<u8>>) {\n  let mut id = None;\n  let mut preprocesses = HashMap::new();\n  for (i, coordinator) in coordinators.iter_mut().enumerate() {\n    let i = Participant::new(u16::try_from(i).unwrap() + 1).unwrap();\n\n    let msg = coordinator.recv_message().await;\n    match msg {\n      messages::ProcessorMessage::Sign(messages::sign::ProcessorMessage::Preprocess {\n        id: this_id,\n        preprocesses: mut these_preprocesses,\n      }) => {\n        if id.is_none() {\n          assert_eq!(&this_id.session, &session);\n          assert_eq!(this_id.attempt, attempt);\n          id = Some(this_id.clone());\n        }\n        assert_eq!(&this_id, id.as_ref().unwrap());\n\n        assert_eq!(these_preprocesses.len(), 1);\n        preprocesses.insert(i, these_preprocesses.swap_remove(0));\n      }\n      _ => panic!(\"processor didn't send sign preprocess\"),\n    }\n  }\n\n  // Reduce the preprocesses down to the threshold\n  while preprocesses.len() > THRESHOLD {\n    preprocesses.remove(\n      &Participant::new(\n        u16::try_from(OsRng.next_u64() % u64::try_from(COORDINATORS).unwrap()).unwrap() + 1,\n      )\n      .unwrap(),\n    );\n  }\n\n  (id.unwrap(), preprocesses)\n}\n\n#[allow(unused)]\npub(crate) async fn sign_tx(\n  coordinators: &mut [Coordinator],\n  session: Session,\n  id: SignId,\n  preprocesses: HashMap<Participant, Vec<u8>>,\n) -> Vec<u8> {\n  assert_eq!(preprocesses.len(), THRESHOLD);\n\n  for (i, coordinator) in coordinators.iter_mut().enumerate() {\n    let i = Participant::new(u16::try_from(i).unwrap() + 1).unwrap();\n\n    if preprocesses.contains_key(&i) {\n      coordinator\n        .send_message(messages::sign::CoordinatorMessage::Preprocesses {\n          id: id.clone(),\n          preprocesses: clone_without(&preprocesses, &i),\n        })\n        .await;\n    }\n  }\n\n  let mut shares = HashMap::new();\n  for (i, coordinator) in coordinators.iter_mut().enumerate() {\n    let i = Participant::new(u16::try_from(i).unwrap() + 1).unwrap();\n\n    if preprocesses.contains_key(&i) {\n      match coordinator.recv_message().await {\n        messages::ProcessorMessage::Sign(messages::sign::ProcessorMessage::Share {\n          id: this_id,\n          shares: mut these_shares,\n        }) => {\n          assert_eq!(&this_id, &id);\n          assert_eq!(these_shares.len(), 1);\n          shares.insert(i, these_shares.swap_remove(0));\n        }\n        _ => panic!(\"processor didn't send TX shares\"),\n      }\n    }\n  }\n\n  for (i, coordinator) in coordinators.iter_mut().enumerate() {\n    let i = Participant::new(u16::try_from(i).unwrap() + 1).unwrap();\n\n    if preprocesses.contains_key(&i) {\n      coordinator\n        .send_message(messages::sign::CoordinatorMessage::Shares {\n          id: id.clone(),\n          shares: clone_without(&shares, &i),\n        })\n        .await;\n    }\n  }\n\n  // The selected processors should yield Completed\n  let mut tx = None;\n  for (i, coordinator) in coordinators.iter_mut().enumerate() {\n    let i = Participant::new(u16::try_from(i).unwrap() + 1).unwrap();\n\n    if preprocesses.contains_key(&i) {\n      match coordinator.recv_message().await {\n        messages::ProcessorMessage::Sign(messages::sign::ProcessorMessage::Completed {\n          session: this_session,\n          id: this_id,\n          tx: this_tx,\n        }) => {\n          assert_eq!(session, this_session);\n          assert_eq!(&this_id, &id.id);\n\n          if tx.is_none() {\n            tx = Some(this_tx.clone());\n          }\n\n          assert_eq!(tx.as_ref().unwrap(), &this_tx);\n        }\n        _ => panic!(\"processor didn't send Completed\"),\n      }\n    }\n  }\n  tx.unwrap()\n}\n\n#[test]\nfn send_test() {\n  for network in EXTERNAL_NETWORKS {\n    let (coordinators, test) = new_test(network);\n\n    test.run(|ops| async move {\n      tokio::time::sleep(Duration::from_secs(1)).await;\n\n      let mut coordinators = coordinators\n        .into_iter()\n        .map(|(handles, key)| Coordinator::new(network, &ops, handles, key))\n        .collect::<Vec<_>>();\n\n      // Create a wallet before we start generating keys\n      let mut wallet = Wallet::new(network, &ops, coordinators[0].network_handle.clone()).await;\n      coordinators[0].sync(&ops, &coordinators[1 ..]).await;\n\n      // Generate keys\n      let key_pair = key_gen(&mut coordinators).await;\n\n      // Now we we have to mine blocks to activate the key\n      // (the first key is activated when the network's time as of a block exceeds the Serai time\n      // it was confirmed at)\n      // Mine multiple sets of medians to ensure the median is sufficiently advanced\n      for _ in 0 .. (10 * confirmations(network)) {\n        coordinators[0].add_block(&ops).await;\n        tokio::time::sleep(Duration::from_secs(1)).await;\n      }\n      coordinators[0].sync(&ops, &coordinators[1 ..]).await;\n\n      // Send into the processor's wallet\n      let mut serai_address = [0; 32];\n      OsRng.fill_bytes(&mut serai_address);\n      let instruction = InInstruction::Transfer(SeraiAddress(serai_address));\n      let (tx, balance_sent) =\n        wallet.send_to_address(&ops, &key_pair.1, Some(instruction.clone())).await;\n      for coordinator in &mut coordinators {\n        coordinator.publish_transaction(&ops, &tx).await;\n      }\n\n      // Put the TX past the confirmation depth\n      let mut block_with_tx = None;\n      for _ in 0 .. confirmations(network) {\n        let (hash, _) = coordinators[0].add_block(&ops).await;\n        if block_with_tx.is_none() {\n          block_with_tx = Some(hash);\n        }\n      }\n      coordinators[0].sync(&ops, &coordinators[1 ..]).await;\n\n      // Sleep for 10s\n      // The scanner works on a 5s interval, so this leaves a few s for any processing/latency\n      tokio::time::sleep(Duration::from_secs(10)).await;\n\n      let amount_minted = Amount(\n        balance_sent.amount.0 -\n          (2 * match network {\n            ExternalNetworkId::Bitcoin => Bitcoin::COST_TO_AGGREGATE,\n            ExternalNetworkId::Ethereum => Ethereum::<MemDb>::COST_TO_AGGREGATE,\n            ExternalNetworkId::Monero => Monero::COST_TO_AGGREGATE,\n          }),\n      );\n\n      let expected_batch = Batch {\n        network,\n        id: 0,\n        block: BlockHash(block_with_tx.unwrap()),\n        instructions: vec![InInstructionWithBalance {\n          instruction,\n          balance: ExternalBalance { coin: balance_sent.coin, amount: amount_minted },\n        }],\n      };\n\n      // Make sure the proceessors picked it up by checking they're trying to sign a batch for it\n      let (id, preprocesses) =\n        recv_batch_preprocesses(&mut coordinators, Session(0), &expected_batch, 0).await;\n\n      // Continue with signing the batch\n      let batch = sign_batch(&mut coordinators, key_pair.0 .0, id, preprocesses).await;\n\n      // Check it\n      assert_eq!(batch.batch, expected_batch);\n\n      // Fire a SubstrateBlock with a burn\n      let substrate_block_num = (OsRng.next_u64() % 4_000_000_000u64) + 1;\n      let serai_time = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs();\n\n      let mut plans = vec![];\n      for coordinator in &mut coordinators {\n        let these_plans = substrate_block(\n          coordinator,\n          messages::substrate::CoordinatorMessage::SubstrateBlock {\n            context: SubstrateContext {\n              serai_time,\n              network_latest_finalized_block: batch.batch.block,\n            },\n            block: substrate_block_num,\n            burns: vec![OutInstructionWithBalance {\n              instruction: OutInstruction { address: wallet.address(), data: None },\n              balance: ExternalBalance { coin: balance_sent.coin, amount: amount_minted },\n            }],\n            batches: vec![batch.batch.id],\n          },\n        )\n        .await;\n\n        if plans.is_empty() {\n          plans = these_plans;\n        } else {\n          assert_eq!(plans, these_plans);\n        }\n      }\n      assert_eq!(plans.len(), 1);\n\n      // Start signing the TX\n      let (mut id, mut preprocesses) =\n        recv_sign_preprocesses(&mut coordinators, Session(0), 0).await;\n      assert_eq!(id, SignId { session: Session(0), id: plans[0].id, attempt: 0 });\n\n      // Trigger a random amount of re-attempts\n      for attempt in 1 ..= u32::try_from(OsRng.next_u64() % 4).unwrap() {\n        // TODO: Double check how the processor handles this ID field\n        // It should be able to assert its perfectly sequential\n        id.attempt = attempt;\n        for coordinator in &mut coordinators {\n          coordinator\n            .send_message(messages::sign::CoordinatorMessage::Reattempt { id: id.clone() })\n            .await;\n        }\n        (id, preprocesses) = recv_sign_preprocesses(&mut coordinators, Session(0), attempt).await;\n      }\n      let participating = preprocesses.keys().copied().collect::<Vec<_>>();\n\n      let tx_id = sign_tx(&mut coordinators, Session(0), id.clone(), preprocesses).await;\n\n      // Make sure all participating nodes published the TX\n      let participating =\n        participating.iter().map(|p| usize::from(u16::from(*p) - 1)).collect::<HashSet<_>>();\n      for participant in &participating {\n        assert!(coordinators[*participant].get_published_transaction(&ops, &tx_id).await.is_some());\n      }\n\n      // Publish this transaction to the left out nodes\n      let tx = coordinators[*participating.iter().next().unwrap()]\n        .get_published_transaction(&ops, &tx_id)\n        .await\n        .unwrap();\n      for (i, coordinator) in coordinators.iter_mut().enumerate() {\n        if !participating.contains(&i) {\n          coordinator.publish_eventuality_completion(&ops, &tx).await;\n          // Tell them of it as a completion of the relevant signing nodes\n          coordinator\n            .send_message(messages::sign::CoordinatorMessage::Completed {\n              session: Session(0),\n              id: id.id,\n              tx: tx_id.clone(),\n            })\n            .await;\n          // Verify they send Completed back\n          match coordinator.recv_message().await {\n            messages::ProcessorMessage::Sign(messages::sign::ProcessorMessage::Completed {\n              session,\n              id: this_id,\n              tx: this_tx,\n            }) => {\n              assert_eq!(session, Session(0));\n              assert_eq!(&this_id, &id.id);\n              assert_eq!(this_tx, tx_id);\n            }\n            _ => panic!(\"processor didn't send Completed\"),\n          }\n        }\n      }\n\n      // TODO: Test the Eventuality from the blockchain, instead of from the coordinator\n      // TODO: Test what happens when Completed is sent with a non-existent TX ID\n      // TODO: Test what happens when Completed is sent with a non-completing TX ID\n    });\n  }\n}\n"
  },
  {
    "path": "tests/reproducible-runtime/Cargo.toml",
    "content": "[package]\nname = \"serai-reproducible-runtime-tests\"\nversion = \"0.1.0\"\ndescription = \"Tests the Serai runtime can be reproducibly built\"\nlicense = \"AGPL-3.0-only\"\nrepository = \"https://github.com/serai-dex/serai/tree/develop/tests/reproducible-runtime\"\nauthors = [\"Luke Parker <lukeparker5132@gmail.com>\"]\nkeywords = []\nedition = \"2021\"\npublish = false\n\n[package.metadata.docs.rs]\nall-features = true\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints]\nworkspace = true\n\n[dependencies]\nrand_core = \"0.6\"\nhex = \"0.4\"\n\ndockertest = \"0.5\"\nserai-docker-tests = { path = \"../docker\" }\n\ntokio = { version = \"1\", features = [\"time\"] }\n"
  },
  {
    "path": "tests/reproducible-runtime/LICENSE",
    "content": "AGPL-3.0-only license\n\nCopyright (c) 2023 Luke Parker\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License Version 3 as\npublished by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n"
  },
  {
    "path": "tests/reproducible-runtime/src/lib.rs",
    "content": "#[test]\npub fn reproducibly_builds() {\n  use std::{collections::HashSet, process::Command};\n\n  use rand_core::{RngCore, OsRng};\n\n  use dockertest::{PullPolicy, Image, TestBodySpecification, DockerTest};\n\n  const RUNS: usize = 3;\n  const TIMEOUT: u16 = 180 * 60; // 3 hours\n\n  serai_docker_tests::build(\"runtime\".to_string());\n\n  let mut ids = vec![[0; 8]; RUNS];\n  for id in &mut ids {\n    OsRng.fill_bytes(id);\n  }\n\n  let mut test = DockerTest::new().with_network(dockertest::Network::Isolated);\n  for id in &ids {\n    test.provide_container(\n      TestBodySpecification::with_image(\n        Image::with_repository(\"serai-dev-runtime\").pull_policy(PullPolicy::Never),\n      )\n      .set_handle(format!(\"runtime-build-{}\", hex::encode(id)))\n      .replace_cmd(vec![\n        \"sh\".to_string(),\n        \"-c\".to_string(),\n        // Sleep for a minute after building to prevent the container from closing before we\n        // retrieve the hash\n        \"cd /serai/substrate/runtime && cargo clean && cargo build --release &&\n           printf \\\"Runtime hash: \\\" > hash &&\n           sha256sum /serai/target/release/wbuild/serai-runtime/serai_runtime.wasm >> hash &&\n           cat hash &&\n           sleep 60\"\n          .to_string(),\n      ]),\n    );\n  }\n\n  test.run(|_| async {\n    let ids = ids;\n    let mut containers = vec![];\n    for container in String::from_utf8(\n      Command::new(\"docker\").arg(\"ps\").arg(\"--format\").arg(\"{{.Names}}\").output().unwrap().stdout,\n    )\n    .expect(\"output wasn't utf-8\")\n    .lines()\n    {\n      for id in &ids {\n        if container.contains(&hex::encode(id)) {\n          containers.push(container.trim().to_string());\n        }\n      }\n    }\n    assert_eq!(containers.len(), RUNS, \"couldn't find all containers\");\n\n    let mut res = vec![None; RUNS];\n    'attempt: for _ in 0 .. (TIMEOUT / 10) {\n      tokio::time::sleep(core::time::Duration::from_secs(10)).await;\n\n      'runner: for (i, container) in containers.iter().enumerate() {\n        if res[i].is_some() {\n          continue;\n        }\n\n        let logs = Command::new(\"docker\").arg(\"logs\").arg(container).output().unwrap();\n        let Some(last_log) =\n          std::str::from_utf8(&logs.stdout).expect(\"output wasn't utf-8\").lines().last()\n        else {\n          continue 'runner;\n        };\n\n        let split = last_log.split(\"Runtime hash: \").collect::<Vec<_>>();\n        if split.len() == 2 {\n          res[i] = Some(split[1].to_string());\n          continue 'runner;\n        }\n      }\n\n      for item in &res {\n        if item.is_none() {\n          continue 'attempt;\n        }\n      }\n      break;\n    }\n\n    // If we didn't get results from all runners, panic\n    for item in &res {\n      if item.is_none() {\n        panic!(\"couldn't get runtime hashes within allowed time\");\n      }\n    }\n    let mut identical = HashSet::new();\n    for res in res.clone() {\n      identical.insert(res.unwrap());\n    }\n    assert_eq!(identical.len(), 1, \"got different runtime hashes {res:?}\");\n  });\n}\n"
  }
]