[
  {
    "path": ".cargo/audit.toml",
    "content": "[advisories]\nignore = []\n"
  },
  {
    "path": ".cargo/config.toml",
    "content": "[alias]\nxclippy = [\n    \"clippy\", \"--workspace\", \"--tests\", \"--all-targets\", \"--all-features\", \"--\",\n    \"-Wclippy::all\",\n    \"-Wclippy::disallowed-methods\",\n]\n"
  },
  {
    "path": ".config/nextest.toml",
    "content": "[profile.default]\nslow-timeout = { period = \"60s\", terminate-after = 1 }\nleak-timeout = \"10s\"\n\n[test-groups]\nserial-integration = { max-threads = 1 }\n\n[[profile.default.overrides]]\nfilter = 'test(serial_integration::)'\ntest-group = 'serial-integration'\n"
  },
  {
    "path": ".dockerignore",
    "content": "# Ignore everything\n**\n\n# Allow Rust source code\n!src\n!crates\n!tests\n!Cargo.*\n!tools/init.sh\n!tools/node_config\n!tools/node_config/**/*\n!tools/liveness.sh\n!tools/config/nextest.toml\n!.git\n\n!LICENSE\n"
  },
  {
    "path": ".github/CODEOWNERS",
    "content": "* @topos-protocol/protocol\n\n# Crypto Internals\n\n/crates/topos-crypto/ @topos-protocol/protocol @topos-protocol/crypto @Nashtare\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/bug_report.md",
    "content": "---\nname: Bug Report\nabout: Create a report to help us solve bugs!\nlabels: bug\n---\n\n<!--\n        ✰  Thanks for opening an issue! ✰\n  Before submitting please review the template.\n  Please also ensure that this is not a duplicate issue :)\n-->\n\n## Summary\n\n<!-- Concisely describe the issue -->\n\n## Steps to Reproduce\n\n<!-- What commands in order should someone run to reproduce your problem? -->\n\n## Expected behavior\n\n<!-- A clear and concise description of what you expected to happen -->\n\n## Screenshots\n\n<!-- If applicable, add screenshots to help explain your problem. -->\n\n## Version\n\n- OS Name: [e.g. Ubuntu]\n- OS Version [e.g. 20.04]\n- CLI Version (output of `topos --version`)\n\n## Additional context\n\n<!-- Add any other context about the problem here. -->\n"
  },
  {
    "path": ".github/actions/install-rust/action.yml",
    "content": "\nname: 'Install Rust toolchain'\ndescription: 'Install a rust toolchain and cache the crates index'\n\ninputs:\n  toolchain:\n    description: 'Default toolchain to install'\n    required: false\n    default: 'stable'\n  target:\n    description: 'Default target to add'\n    required: false\n    default: 'x86_64-unknown-linux-gnu'\n  msrv:\n    description: 'Enable rust-toolchain version for msrv'\n    required: false\n    type: boolean\n    default: false\n  lockfiles:\n    description: 'Path glob for Cargo.lock files to use as cache keys'\n    required: false\n    default: '**/Cargo.lock'\n  components:\n    description: 'Components to install'\n    required: false\n  tools:\n    description: 'Tools to install'\n    required: false\n    default: nextest,protoc\n  AWS_ACCESS_KEY_ID:\n    required: true\n  AWS_SECRET_ACCESS_KEY:\n    required: true\n  with_cache:\n    required: false\n    type: boolean\n    default: true\n\n\nruns:\n  using: composite\n  steps:\n    - name: Environment\n      shell: bash\n      run: |\n        rustup target add ${{ inputs.target }}\n        if ${{ inputs.msrv }}; then\n          rustup override unset\n          rustup show\n        else\n          rustup set profile minimal\n          rustup update \"${{ inputs.toolchain }}\" --no-self-update\n          rustup override set \"${{ inputs.toolchain }}\"\n        fi\n\n        if [ ! -z \"${{ inputs.components }}\" ]; then\n          rustup component add $(echo ${{ inputs.components }}|sed 's/,/ /')\n        fi\n\n        echo CARGO_TERM_COLOR=\"always\" >> \"$GITHUB_ENV\"\n\n        # Disable incremental compilation.\n        #\n        # Incremental compilation is useful as part of an edit-build-test-edit cycle,\n        # as it lets the compiler avoid recompiling code that hasn't changed. However,\n        # on CI, we're not making small edits; we're almost always building the entire\n        # project from scratch. Thus, incremental compilation on CI actually\n        # introduces *additional* overhead to support making future builds\n        # faster...but no future builds will ever occur in any given CI environment.\n        #\n        # See https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow\n        # for details.\n        echo CARGO_INCREMENTAL=0 >> \"$GITHUB_ENV\"\n\n        # Allow more retries for network requests in cargo (downloading crates) and\n        # rustup (installing toolchains). This should help to reduce flaky CI failures\n        # from transient network timeouts or other issues.\n        cat >> \"$GITHUB_ENV\" <<EOF\n        CARGO_NET_RETRY=10\n        RUSTUP_MAX_RETRIES=10\n        EOF\n\n        # Don't emit giant backtraces in the CI logs.\n        echo RUST_BACKTRACE=short >> \"$GITHUB_ENV\"\n        echo RUSTFLAGS=\"-D warnings\" >> \"$GITHUB_ENV\"\n        echo RUSTDOCFLAGS=\"-D warnings\" >> \"$GITHUB_ENV\"\n\n        if ${{ inputs.with_cache }}; then\n        cat >> \"$GITHUB_ENV\" <<EOF\n        RUSTC_WRAPPER=sccache\n        EOF\n        fi\n\n        cat >> \"$GITHUB_ENV\" <<EOF\n        CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse\n        EOF\n        cat >> \"$GITHUB_ENV\" <<EOF\n        AWS_SHARED_CREDENTIALS_FILE=${{ github.workspace }}/.aws/credentials\n        SCCACHE_S3_KEY_PREFIX=topos\n        SCCACHE_BUCKET=cicd-devnet-1-sccache\n        SCCACHE_REGION=us-east-1\n        EOF\n\n    - name: Run sccache-cache\n      if: ${{ inputs.with_cache == 'true' }}\n      uses: mozilla-actions/sccache-action@v0.0.3\n\n    - name: Configure AWS credentials for cicd-devnet-1 account\n      if: ${{ inputs.with_cache == 'true' }}\n      uses: aws-actions/configure-aws-credentials@v2\n      with:\n        aws-access-key-id: ${{ inputs.AWS_ACCESS_KEY_ID }}\n        aws-secret-access-key: ${{ inputs.AWS_SECRET_ACCESS_KEY }}\n        role-to-assume: arn:aws:iam::367397670706:role/CacheBucketAccessRole\n        role-session-name: RobotToposware-session\n        aws-region: us-east-1\n        role-skip-session-tagging: true\n        role-duration-seconds: 3600\n\n    - name: Add profile credentials to .aws/credentials\n      if: ${{ inputs.with_cache == 'true' }}\n      shell: bash\n      run: |\n        aws configure set aws_access_key_id ${{ env.AWS_ACCESS_KEY_ID }} --profile default\n        aws configure set aws_secret_access_key ${{ env.AWS_SECRET_ACCESS_KEY }} --profile default\n        aws configure set aws_session_token ${{ env.AWS_SESSION_TOKEN }} --profile default\n\n    - uses: taiki-e/install-action@v2\n      with:\n        tool: ${{ inputs.tools }}\n"
  },
  {
    "path": ".github/workflows/coverage.yml",
    "content": "name: Coverage\n\non:\n  push:\n    branches:\n      - main\n      - \"!release/**\"\n    paths: crates/**\n  pull_request:\n    types: [opened, synchronize, reopened, ready_for_review]\n    paths: crates/**\n  workflow_dispatch:\n\njobs:\n  codecov:\n    name: Code coverage\n    runs-on: ubuntu-latest-16-core\n    if: ${{ ! startsWith(github.head_ref, 'release') }}\n\n    steps:\n      - uses: actions/checkout@v4\n\n      - uses: ./.github/actions/install-rust\n        with:\n          components: llvm-tools-preview\n          tools: grcov,nextest,protoc\n          AWS_ACCESS_KEY_ID: ${{ secrets.ROBOT_AWS_ACCESS_KEY_ID}}\n          AWS_SECRET_ACCESS_KEY: ${{ secrets.ROBOT_AWS_SECRET_ACCESS_KEY}}\n\n      - name: Build\n        env:\n          RUSTFLAGS: \"-Cinstrument-coverage\"\n          RUSTDOCFLAGS: \"-Cinstrument-coverage\"\n        run: cargo build --all\n\n      - name: Run tests\n        env:\n          RUST_LOG: info\n          RUSTFLAGS: \"-Cinstrument-coverage\"\n          RUSTDOCFLAGS: \"-Cinstrument-coverage\"\n          LLVM_PROFILE_FILE: \"codecov-instrumentation-%p-%m.profraw\"\n        run: cargo nextest run --workspace --exclude topos-sequencer-subnet-runtime\n\n      - name: Run grcov\n        run: |\n          grcov . --binary-path target/debug/ -s . \\\n            -t lcov \\\n            --branch \\\n            --ignore-not-existing \\\n            --ignore 'crates/topos-test-sdk/src/grpc/behaviour/*' \\\n            --ignore 'crates/topos-core/src/api/grpc/generated/*' \\\n            --ignore '../**' \\\n            --ignore '/*' \\\n            -o coverage.lcov\n\n      - name: Upload to codecov.io\n        uses: codecov/codecov-action@v3\n        with:\n          token: ${{ secrets.CODECOV_TOKEN }}\n          fail_ci_if_error: true\n          verbose: true\n"
  },
  {
    "path": ".github/workflows/doc.yml",
    "content": "name: Build and host documentation on GH pages\non:\n  push:\n    branches:\n      - main\n\njobs:\n  build:\n    name: Build documentation\n    runs-on: ubuntu-latest\n    steps:\n    - name: Checkout \n      uses: actions/checkout@v4\n    - uses: ./.github/actions/install-rust\n      with:\n        toolchain: nightly\n        AWS_ACCESS_KEY_ID: ${{ secrets.ROBOT_AWS_ACCESS_KEY_ID}}\n        AWS_SECRET_ACCESS_KEY: ${{ secrets.ROBOT_AWS_SECRET_ACCESS_KEY}}\n\n    - name: Build Documentation\n      run: cargo +nightly doc --no-deps --all --all-features\n\n    - name: Add index file\n      run: |\n        mkdir host-docs\n        echo \"<meta http-equiv=\\\"refresh\\\" content=\\\"0; url=topos\\\">\" > target/doc/index.html\n        cp -r target/doc/* ./host-docs\n    - name: Upload documentation\n      uses: actions/upload-pages-artifact@v2.0.0\n      with:\n        path: \"host-docs/\"\n\n  deploy:\n    name: Deploy documentation\n    needs: build\n    permissions:\n      pages: write\n      id-token: write\n\n    environment:\n      name: github-pages\n      url: ${{ steps.deployment.outputs.page_url }}\n    runs-on: ubuntu-latest\n    steps:\n      - name: Deploy to GitHub Pages\n        id: deployment\n        uses: actions/deploy-pages@v2\n"
  },
  {
    "path": ".github/workflows/docker_build_push.yml",
    "content": "name: Docker build and push\n\non:\n  push:\n    branches: [main, debug/**]\n  pull_request:\n    types: [opened, synchronize, reopened, ready_for_review]\n  release:\n    types: [created]\n\nconcurrency:\n  group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}\n  cancel-in-progress: true\n\njobs:\n  docker:\n    uses: ./.github/workflows/docker_utils.yml\n    secrets: inherit\n\n  integration-erc20-e2e:\n    runs-on: ubuntu-latest\n    needs: docker\n    if: ${{ github.event_name == 'pull_request' }}\n    steps:\n      - name: Set environment\n        run: |\n          # It's fine to assume a single tag. Our tagging strategy follows a 1:1 mapping of image:tag\n          tags=${{ needs.docker.outputs.tags }}\n          echo \"docker_tag=${tags#*:}\" >> $GITHUB_ENV\n        shell: bash\n\n      - uses: convictional/trigger-workflow-and-wait@v1.6.1\n        with:\n          owner: topos-protocol\n          repo: e2e-tests\n          github_token: ${{ secrets.ROBOT_PAT_TRIGGER_E2E_WORKFLOWS }}\n          workflow_file_name: topos:integration-tests.yml\n          ref: main\n          wait_interval: 60\n          client_payload: '{ \"topos-docker-tag\": \"${{ env.docker_tag }}\" }'\n\n  frontend-erc20-e2e:\n    runs-on: ubuntu-latest\n    needs: docker\n    if: ${{ github.event_name == 'pull_request' }}\n    steps:\n      - name: Set environment\n        run: |\n          # It's fine to assume a single tag. Our tagging strategy follows a 1:1 mapping of image:tag\n          tags=${{ needs.docker.outputs.tags }}\n          echo \"docker_tag=${tags#*:}\" >> $GITHUB_ENV\n        shell: bash\n\n      - uses: convictional/trigger-workflow-and-wait@v1.6.1\n        with:\n          owner: topos-protocol\n          repo: e2e-tests\n          github_token: ${{ secrets.ROBOT_PAT_TRIGGER_E2E_WORKFLOWS }}\n          workflow_file_name: frontend:erc20-messaging.yml\n          ref: main\n          wait_interval: 60\n          client_payload: '{ \"topos-docker-tag\": \"${{ env.docker_tag }}\" }'\n"
  },
  {
    "path": ".github/workflows/docker_utils.yml",
    "content": "name: template - docker\n\nenv:\n  REGISTRY: ghcr.io\n  IMAGE_NAME: ${{ github.repository }}\n  AWS_SHARED_CREDENTIALS_FILE: \"${{ github.workspace }}/.aws/credentials\"\n\non:\n  workflow_call:\n    inputs:\n      # Docker target (test | fmt | lint | topos | etc)\n      target:\n        required: false\n        type: string\n        default: topos\n      # Rust toolchain version (stable | nightly)\n      toolchain_version:\n        required: false\n        type: string\n        default: stable\n    outputs:\n      tags:\n        description: \"Docker tags\"\n        value: ${{ jobs.docker.outputs.tags }}\n\njobs:\n  docker:\n    name: Build and push docker image to GitHub Container Registry\n    runs-on: ubuntu-latest-16-core\n    outputs:\n      tags: ${{ steps.meta.outputs.tags }}\n    steps:\n      - name: Checkout\n        uses: actions/checkout@v4\n\n      - name: Set up QEMU\n        uses: docker/setup-qemu-action@v2\n\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@v2\n\n      - name: Inject slug/short variables\n        uses: rlespinasse/github-slug-action@v4\n        with:\n          short-length: 7\n\n      - name: Login to GitHub Container Registry\n        uses: docker/login-action@v2\n        with:\n          registry: ${{ env.REGISTRY }}\n          username: ${{ github.actor }}\n          password: ${{ secrets.GITHUB_TOKEN }}\n\n      - name: Configure AWS credentials for cicd-devnet-1 account\n        uses: aws-actions/configure-aws-credentials@v2\n        with:\n          aws-access-key-id: ${{ secrets.ROBOT_AWS_ACCESS_KEY_ID }}\n          aws-secret-access-key: ${{ secrets.ROBOT_AWS_SECRET_ACCESS_KEY }}\n          role-to-assume: arn:aws:iam::367397670706:role/CacheBucketAccessRole\n          role-session-name: RobotToposware-session\n          aws-region: us-east-1\n          role-skip-session-tagging: true\n          role-duration-seconds: 3600\n\n      - name: Add profile credentials to .aws/credentials\n        run: |\n          aws configure set aws_access_key_id ${{ env.AWS_ACCESS_KEY_ID }} --profile default\n          aws configure set aws_secret_access_key ${{ env.AWS_SECRET_ACCESS_KEY }} --profile default\n          aws configure set aws_session_token ${{ env.AWS_SESSION_TOKEN }} --profile default\n\n      - name: Extract metadata (tags, labels) for Docker\n        id: meta\n        uses: docker/metadata-action@v4\n        with:\n          images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}\n          tags: |\n            type=ref,event=branch\n            type=ref,event=pr\n            type=semver,pattern={{version}}\n            type=semver,pattern={{major}}.{{minor}}\n\n      - name: Push to GitHub Container Registry\n        uses: docker/build-push-action@v3\n        with:\n          context: .\n          platforms: linux/amd64,linux/arm64\n          # push only images targeting topos (e.g.: exclude test, lint, etc.)\n          push: ${{ inputs.target == 'topos' }}\n          target: ${{ inputs.target }}\n          tags: ${{ steps.meta.outputs.tags }}\n          labels: ${{ steps.meta.outputs.labels }}\n          secret-files: |\n            \"aws=${{ github.workspace }}/.aws/credentials\"\n          build-args: |\n            RUSTUP_TOOLCHAIN=${{ inputs.toolchain_version }}\n            SCCACHE_S3_KEY_PREFIX=${{ inputs.target }}\n            SCCACHE_BUCKET=cicd-devnet-1-sccache\n            SCCACHE_REGION=us-east-1\n            RUSTC_WRAPPER=/usr/local/cargo/bin/sccache\n"
  },
  {
    "path": ".github/workflows/pr-checking.yml",
    "content": "name: Checking PR semantic\n\non:\n  pull_request_target:\n    types:\n      - opened\n      - edited\n      - synchronize\n\njobs:\n  title:\n    name: Validate PR title\n    runs-on: ubuntu-latest\n    steps:\n      - uses: amannn/action-semantic-pull-request@v5\n        env:\n          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n        with:\n          ignoreLabels: |\n            release\n\n  commits:\n    name: Validate PR commits\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v4\n        with:\n          fetch-depth: 0\n      - uses: wagoid/commitlint-github-action@v5\n"
  },
  {
    "path": ".github/workflows/quality.yml",
    "content": "name: Quality\n\non:\n  push:\n    branches:\n      - main\n  pull_request:\n    types: [opened, synchronize, reopened, ready_for_review]\n  workflow_dispatch:\n\njobs:\n\n  readme:\n    name: Readme - checking readme compatibility\n    runs-on: ubuntu-latest\n    steps:\n      - name: Checkout\n        uses: actions/checkout@v4\n\n      - uses: ./.github/actions/install-rust\n        with:\n          with_cache: false\n          tools: cargo-readme\n          AWS_ACCESS_KEY_ID: ${{ secrets.ROBOT_AWS_ACCESS_KEY_ID}}\n          AWS_SECRET_ACCESS_KEY: ${{ secrets.ROBOT_AWS_SECRET_ACCESS_KEY}}\n\n      - name: checking readme\n        run: ./scripts/check_readme.sh\n\n\n  audit:\n    name: Audit - crate security vulnerabilities\n    runs-on: ubuntu-latest\n    steps:\n      - name: Checkout\n        uses: actions/checkout@v4\n\n      - uses: ./.github/actions/install-rust\n        with:\n          with_cache: false\n          tools: cargo-audit\n          AWS_ACCESS_KEY_ID: ${{ secrets.ROBOT_AWS_ACCESS_KEY_ID}}\n          AWS_SECRET_ACCESS_KEY: ${{ secrets.ROBOT_AWS_SECRET_ACCESS_KEY}}\n\n      - name: Cargo audit\n        run: cargo audit\n\n  lint:\n    name: Lint - Clippy\n    runs-on: ubuntu-latest-16-core\n    env:\n      CARGO_TERM_COLOR: always\n      RUSTFLAGS: -Dwarnings\n      RUST_BACKTRACE: 1\n    steps:\n      - name: Checkout topos repo\n        uses: actions/checkout@v4\n\n      - name: Install Rust\n        uses: ./.github/actions/install-rust\n        with:\n          components: clippy\n          AWS_ACCESS_KEY_ID: ${{ secrets.ROBOT_AWS_ACCESS_KEY_ID}}\n          AWS_SECRET_ACCESS_KEY: ${{ secrets.ROBOT_AWS_SECRET_ACCESS_KEY}}\n\n      - name: Install Protoc\n        uses: arduino/setup-protoc@v1\n\n      - name: Checkout topos-smart-contracts repo\n        uses: actions/checkout@v4\n        with:\n          repository: topos-protocol/topos-smart-contracts\n          ref: ${{ env.CONTRACTS_REF }}\n          path: contracts\n\n      - name: Set up NodeJS\n        uses: actions/setup-node@v3\n        with:\n          node-version: 16\n          cache: \"npm\"\n          cache-dependency-path: contracts/package-lock.json\n\n      - name: Install dependencies\n        working-directory: contracts\n        run: npm ci\n\n      - name: Build contracts\n        working-directory: contracts\n        run: npm run build\n\n      - name: Move contract artifacts\n        run: mv contracts/artifacts ./\n\n      - name: Cargo xclippy\n        run: cargo xclippy\n\n  fmt:\n    name: Check - Format\n    runs-on: ubuntu-latest\n    steps:\n      - name: Checkout\n        uses: actions/checkout@v4\n\n      - uses: ./.github/actions/install-rust\n        with:\n          with_cache: false\n          toolchain: nightly\n          components: rustfmt\n          AWS_ACCESS_KEY_ID: ${{ secrets.ROBOT_AWS_ACCESS_KEY_ID}}\n          AWS_SECRET_ACCESS_KEY: ${{ secrets.ROBOT_AWS_SECRET_ACCESS_KEY}}\n\n      - name: Cargo fmt\n        run: cargo +nightly fmt --all -- --check\n\n  msrv:\n    name: Check - MSRV\n    runs-on: ubuntu-latest-16-core\n    steps:\n      - name: Checkout\n        uses: actions/checkout@v4\n\n      - uses: ./.github/actions/install-rust\n        with:\n          msrv: true\n          AWS_ACCESS_KEY_ID: ${{ secrets.ROBOT_AWS_ACCESS_KEY_ID}}\n          AWS_SECRET_ACCESS_KEY: ${{ secrets.ROBOT_AWS_SECRET_ACCESS_KEY}}\n\n      - name: Cargo check\n        run: cargo check --workspace --all-features --locked\n"
  },
  {
    "path": ".github/workflows/release.yml",
    "content": "name: Release\n\non:\n  release:\n    types: [published]\n\nenv:\n  CARGO_TERM_COLOR: always\n  CARGO_INCREMENTAL: 0\n  CARGO_NET_RETRY: 10\n  RUSTUP_MAX_RETRIES: 10\n  RUST_BACKTRACE: short\n  CARGO: cargo\n  CROSS_VERSION: v0.2.5\n\njobs:\n  release-build:\n    timeout-minutes: 30\n    runs-on: ${{ matrix.os }}\n    strategy:\n      matrix:\n        include:\n        - build: stable-x86\n          rust: stable\n          os: ubuntu-latest-16-core\n          target: x86_64-unknown-linux-gnu\n\n        - build: linux\n          rust: stable\n          os: ubuntu-latest-16-core\n          target: x86_64-unknown-linux-musl\n\n        - build: stable-aarch64\n          rust: stable\n          os: ubuntu-latest-16-core\n          target: aarch64-unknown-linux-gnu\n\n    steps:\n      - uses: actions/checkout@v4\n      - uses: ./.github/actions/install-rust\n        with:\n          toolchain: ${{ matrix.rust }}\n          target: ${{ matrix.target }}\n          with_cache: false\n\n      - name: Use Cross\n        if: matrix.os == 'ubuntu-latest-16-core' && matrix.target != ''\n        shell: bash\n        run: |\n          dir=\"$RUNNER_TEMP/cross-download\"\n          mkdir \"$dir\"\n          echo \"$dir\" >> $GITHUB_PATH\n          cd \"$dir\"\n          curl -LO \"https://github.com/cross-rs/cross/releases/download/$CROSS_VERSION/cross-x86_64-unknown-linux-musl.tar.gz\"\n          tar xf cross-x86_64-unknown-linux-musl.tar.gz\n          echo \"CARGO=cross\" >> $GITHUB_ENV\n\n      - name: Set target variables\n        shell: bash\n        run: |\n          echo \"TARGET_FLAGS=--target ${{ matrix.target }}\" >> $GITHUB_ENV\n          echo \"TARGET_DIR=./target/${{ matrix.target }}\" >> $GITHUB_ENV\n\n      - name: Show command used for Cargo\n        shell: bash\n        run: |\n          echo \"cargo command is: ${{ env.CARGO }}\"\n          echo \"target flag is: ${{ env.TARGET_FLAGS }}\"\n          echo \"target dir is: ${{ env.TARGET_DIR }}\"\n\n      - name: Build release binary\n        shell: bash\n        run: |\n          ${{ env.CARGO }} build --release ${{ env.TARGET_FLAGS }}\n          bin=\"target/${{ matrix.target }}/release/topos\"\n          echo \"BIN=$bin\" >> $GITHUB_ENV\n\n      - name: Rename binary\n        shell: bash\n        run: |\n          export arch=$(echo ${{ matrix.target }} | cut -d- -f1)\n          export version=${GITHUB_REF#refs/*/}\n          echo \"arch=${arch}\" >> $GITHUB_ENV\n          echo \"version=${version}\" >> $GITHUB_ENV\n          mv $BIN topos-${version}-${arch}\n          tar -czvf topos-${version}-${arch}.tgz topos-${version}-${arch}\n\n      - name: Upload release artifacts for ${{ matrix.target }} architecture\n        uses: actions/upload-artifact@v3\n        with:\n          name: topos-${{ matrix.target }}\n          if-no-files-found: error\n          path: |\n            topos-${{ env.version }}-${{ env.arch }}.tgz\n\n      - name: Publish binaries for ${{ matrix.target }} release\n        uses: softprops/action-gh-release@v1\n        with:\n          files: |\n            topos-${{ env.version }}-${{ env.arch }}.tgz\n\n  notify-release:\n    needs: release-build\n    runs-on: ubuntu-latest\n    if: ${{ github.event_name == 'release'}} && ${{ github.event.release.prerelease == 'false' }}\n    steps:\n      - name: Send Slack notification\n        uses: slackapi/slack-github-action@v1.24.0\n        with:\n          payload: |\n            {\n              \"repository\": \"${{ github.repository }}\",\n              \"version\": \"${{ github.ref }}\"\n            }\n        env:\n          SLACK_WEBHOOK_URL: ${{ vars.RELEASE_PIPELINE_SLACK_WEBHOOK_URL }}\n"
  },
  {
    "path": ".github/workflows/sequencer_topos_core_contract_test.yml",
    "content": "name: Sequencer Topos Core Contract interaction test\n\non:\n  push:\n    branches: [main]\n  pull_request:\n    types: [opened, synchronize, reopened, ready_for_review]\n  workflow_dispatch:\n\njobs:\n  sequencer-contracts-e2e:\n    runs-on: ubuntu-latest-16-core\n    steps:\n      - uses: convictional/trigger-workflow-and-wait@v1.6.1\n        with:\n          owner: topos-protocol\n          repo: e2e-tests\n          github_token: ${{ secrets.ROBOT_PAT_TRIGGER_E2E_WORKFLOWS }}\n          workflow_file_name: topos:sequencer-contracts.yml\n          ref: main\n          wait_interval: 60\n          client_payload: '{ \"topos-ref\": \"${{ github.head_ref }}\" }'\n"
  },
  {
    "path": ".github/workflows/test.yml",
    "content": "name: Test\n\non:\n  push:\n    branches:\n      - main\n  pull_request:\n    types: [opened, synchronize, reopened, ready_for_review]\n\n  workflow_dispatch:\n\nconcurrency:\n  group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}\n  cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}\n\njobs:\n  test-doc:\n    name: Test documentation\n    runs-on: ubuntu-latest\n    steps:\n    - name: Checkout \n      uses: actions/checkout@v4\n    - uses: ./.github/actions/install-rust\n      with:\n        toolchain: nightly\n        AWS_ACCESS_KEY_ID: ${{ secrets.ROBOT_AWS_ACCESS_KEY_ID}}\n        AWS_SECRET_ACCESS_KEY: ${{ secrets.ROBOT_AWS_SECRET_ACCESS_KEY}}\n\n    - name: Build Documentation\n      run: cargo +nightly doc --no-deps --all --all-features\n\n  test_stable:\n    runs-on: ubuntu-latest-16-core\n    strategy:\n      fail-fast: false\n    name: stable - Test\n    steps:\n      - uses: actions/checkout@v4\n      - uses: ./.github/actions/install-rust\n        with:\n          AWS_ACCESS_KEY_ID: ${{ secrets.ROBOT_AWS_ACCESS_KEY_ID}}\n          AWS_SECRET_ACCESS_KEY: ${{ secrets.ROBOT_AWS_SECRET_ACCESS_KEY}}\n\n      - run: cargo nextest run --workspace --exclude topos-sequencer-subnet-runtime && cargo test --doc --workspace\n        env:\n          RUST_LOG: warn,topos=info\n\n  test_nightly:\n    runs-on: ubuntu-latest-16-core\n    strategy:\n      fail-fast: false\n    name: nightly - Test\n    steps:\n      - uses: actions/checkout@v4\n      - uses: ./.github/actions/install-rust\n        with:\n          toolchain: nightly\n          AWS_ACCESS_KEY_ID: ${{ secrets.ROBOT_AWS_ACCESS_KEY_ID}}\n          AWS_SECRET_ACCESS_KEY: ${{ secrets.ROBOT_AWS_SECRET_ACCESS_KEY}}\n\n      - run: cargo nextest run --workspace --exclude topos-sequencer-subnet-runtime && cargo test --doc --workspace\n        env:\n          RUST_LOG: topos=warn\n\n  cert_delivery:\n    runs-on: ubuntu-latest-16-core\n    needs: [test_stable]\n    strategy:\n      fail-fast: true\n      matrix:\n        value: [\"first\", \"second\", \"third\"]\n    steps:\n      - uses: actions/checkout@v4\n      - uses: ./.github/actions/install-rust\n        with:\n          AWS_ACCESS_KEY_ID: ${{ secrets.ROBOT_AWS_ACCESS_KEY_ID}}\n          AWS_SECRET_ACCESS_KEY: ${{ secrets.ROBOT_AWS_SECRET_ACCESS_KEY}}\n      - run: cargo nextest run cert_delivery --locked --no-default-features\n        env:\n          RUST_LOG: topos=warn\n"
  },
  {
    "path": ".gitignore",
    "content": "# Generated by Cargo\n# will have compiled files and executables\n/target/\n*~\n\n# testing database\n/default_db/\n/db*/\n\n# These are backup files generated by rustfmt\n**/*.rs.bk\n\n# IntelliJ files\n.idea\n**/*.iml\n\n# VSCode\n.vscode\n**/tests/databases\n\n# ~~~ START TERRAFORM\n# Local .terraform directories\n**/.terraform/*\n\n# .tfstate files\n*.tfstate\n*.tfstate.*\n\n# Crash log files\ncrash.log\ncrash.*.log\n\n# Exclude all .tfvars files, which are likely to contain sensitive data, such as\n# password, private keys, and other secrets. These should not be part of version\n# control as they are data points which are potentially sensitive and subject\n# to change depending on the environment.\n*.tfvars\n*.tfvars.json\n\n# Ignore override files as they are usually used to override resources locally and so\n# are not checked in\noverride.tf\noverride.tf.json\n*_override.tf\n*_override.tf.json\n\n# Include override files you do wish to add to version control using negated pattern\n# !example_override.tf\n\n# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan\n# example: *tfplan*\n\n# Ignore CLI configuration files\n.terraformrc\nterraform.rc\n\n#k8s\n**/kubeconfig\n# ~~~ END TERRAFORM\n\n# Subnet integration tests\ncrates/topos-sequencer-subnet-runtime/tests/temp\n\n# Node modules\n**/node_modules\n\nartifacts/\n\npolygon-edge\n\n# macOS directory attributes\n**/.DS_Store\n\ntools/node_config/node/test/libp2p/\ntools/node_config/node/test/consensus/\n"
  },
  {
    "path": "CHANGELOG.md",
    "content": "![topos](./.github/assets/topos_logo_dark.png)\n## [0.1.0](https://github.com/topos-protocol/topos/compare/v0.0.11..0.1.0) - 2024-03-25\n\n### ⛰️  Features\n\n- Update smart-contracts to 3.4.0 stable - ([a714493](https://github.com/topos-protocol/topos/commit/a714493dd5aaf235d99f4b903f49a355e0c38d14))\n- Add p2p layer health check ([#464](https://github.com/topos-protocol/topos/issues/464)) - ([d2ec941](https://github.com/topos-protocol/topos/commit/d2ec941ec24d3cbc27c11d4fcf6b0495911f85e2))\n- Terminate stream if client is dropping the connection ([#463](https://github.com/topos-protocol/topos/issues/463)) - ([2c73f0b](https://github.com/topos-protocol/topos/commit/2c73f0bae1dc25aad504d45dcc789360cce7dbaa))\n- Introduce topos-node crate ([#459](https://github.com/topos-protocol/topos/issues/459)) - ([d8db631](https://github.com/topos-protocol/topos/commit/d8db631d970d6b855e5a47f0c561abe8bab9832d))\n- Add proper error handling to setup command ([#452](https://github.com/topos-protocol/topos/issues/452)) - ([3335846](https://github.com/topos-protocol/topos/commit/3335846327c9c0f32e85694861f30520a9f2a6c5))\n- Remove dht publication ([#449](https://github.com/topos-protocol/topos/issues/449)) - ([7030341](https://github.com/topos-protocol/topos/commit/70303412f139c7fe5ca0d4775f367d27543ac791))\n- Add benchmark dns option to spam subcommand ([#448](https://github.com/topos-protocol/topos/issues/448)) - ([90405f3](https://github.com/topos-protocol/topos/commit/90405f3f4bd468c33685158ddddb793b109e3f22))\n- Move telemetry-otlp setup into telemetry crate ([#446](https://github.com/topos-protocol/topos/issues/446)) - ([8a15fc4](https://github.com/topos-protocol/topos/commit/8a15fc4c0aa07ba71ca8376f21056949d71e92c5))\n\n### 🐛 Bug Fixes\n\n- *(config)* Fix the parse of edge_path ENV var ([#482](https://github.com/topos-protocol/topos/issues/482)) - ([b2a1af0](https://github.com/topos-protocol/topos/commit/b2a1af06dfa6987261a08ccbc8f05ed1bdc0d0b8))\n- *(p2p)* Accept listener connection during bootstrap ([#484](https://github.com/topos-protocol/topos/issues/484)) - ([b8cd730](https://github.com/topos-protocol/topos/commit/b8cd730c2e2a6d2799a5c741b026cf03d9eadd33))\n- *(p2p)* Rework ticks of bootstrap query interval ([#483](https://github.com/topos-protocol/topos/issues/483)) - ([5b6ddb8](https://github.com/topos-protocol/topos/commit/5b6ddb80ded50525a27617ca5f7c911525752619))\n- Bump smart contract version ([#478](https://github.com/topos-protocol/topos/issues/478)) - ([642203c](https://github.com/topos-protocol/topos/commit/642203c962ede91d821af2b54e5f7bc0d845d407))\n- Concurrency insert between pending and delivered ([#467](https://github.com/topos-protocol/topos/issues/467)) - ([bd5e3f5](https://github.com/topos-protocol/topos/commit/bd5e3f52ba00bafa25e0f3ce8b42b326e4fb5ef0))\n- Block handling during certificate generation ([#471](https://github.com/topos-protocol/topos/issues/471)) - ([a5299c8](https://github.com/topos-protocol/topos/commit/a5299c80068d6612d1aba162556f9ccd3dd3d0a8))\n- Update mio ([#473](https://github.com/topos-protocol/topos/issues/473)) - ([8291740](https://github.com/topos-protocol/topos/commit/82917405e7bf102c06194caadc973f57ac735649))\n- Revert update smart contract event ([#470](https://github.com/topos-protocol/topos/issues/470)) - ([c41a51a](https://github.com/topos-protocol/topos/commit/c41a51a2ff86198f44dd6b24ff534507a17cf519))\n- Update smart contract event ([#462](https://github.com/topos-protocol/topos/issues/462)) - ([f995859](https://github.com/topos-protocol/topos/commit/f9958599a1da31d7c92a8e8a0e925e79ce6140cb))\n- Remove duplicated certificate push on gossipsub ([#458](https://github.com/topos-protocol/topos/issues/458)) - ([b0e88dc](https://github.com/topos-protocol/topos/commit/b0e88dce2b7ea060ce34377b40a10e54edd16e02))\n- Add next_pending_certificate on end task ([#455](https://github.com/topos-protocol/topos/issues/455)) - ([2aaa500](https://github.com/topos-protocol/topos/commit/2aaa50071ca14415bb0284930c404659b6c463d8))\n\n### 🚜 Refactor\n\n- Improve delivery timing ([#466](https://github.com/topos-protocol/topos/issues/466)) - ([96e862f](https://github.com/topos-protocol/topos/commit/96e862f5b886a38a5c67590d1e152ab9894d6f15))\n- Store instantiation ([#461](https://github.com/topos-protocol/topos/issues/461)) - ([213b8d4](https://github.com/topos-protocol/topos/commit/213b8d482cf6e08ec0f1cae0e9dfd981b156a98d))\n- Update error management and config/process ([#460](https://github.com/topos-protocol/topos/issues/460)) - ([cc0c7b5](https://github.com/topos-protocol/topos/commit/cc0c7b538d9f6b91c184db10eedd9d94c4f368fb))\n- Move edge config to config crate ([#445](https://github.com/topos-protocol/topos/issues/445)) - ([23cc558](https://github.com/topos-protocol/topos/commit/23cc55887703bac01b7ec26486f47b03316046c1))\n- Tce-broadcast config ([#444](https://github.com/topos-protocol/topos/issues/444)) - ([10c3879](https://github.com/topos-protocol/topos/commit/10c3879cd30bf0172996cfbf48ab5c991e767eaf))\n\n### ⚙️ Miscellaneous Tasks\n\n- Update changelog for 0.1.0 - ([65fc8cd](https://github.com/topos-protocol/topos/commit/65fc8cd05d1fdaecd809e92a0643dc02557ad460))\n- Update changelog for 0.1.0 - ([a82617a](https://github.com/topos-protocol/topos/commit/a82617a6c653f02a00fc9565f2c5abb42c9b6c26))\n- Disable coverage report on release branch (push) - ([09f3663](https://github.com/topos-protocol/topos/commit/09f36639ef62a02a2a84bde8f36a98ce6274ea6f))\n- Disable coverage report on release branch (push) - ([e909e22](https://github.com/topos-protocol/topos/commit/e909e22d6dac251e4026816cd8dd5c84851e9db5))\n- Disable coverage report on release branch ([#481](https://github.com/topos-protocol/topos/issues/481)) - ([8f10090](https://github.com/topos-protocol/topos/commit/8f10090094bf110670137f73a115bda54f64aba5))\n- Update changelog for 0.1.0 - ([c68798e](https://github.com/topos-protocol/topos/commit/c68798eeed366a421a076cc1908aaca8013d80cf))\n- Creating CHANGELOG.md for 0.0.11 - ([463f52f](https://github.com/topos-protocol/topos/commit/463f52feb73f10d2a194cf44863842a9f0cf13a0))\n- Bumping version 0.1.0 - ([16de6a6](https://github.com/topos-protocol/topos/commit/16de6a675b0fe44afd20526202a2e5178b40994d))\n- Update deps ([#474](https://github.com/topos-protocol/topos/issues/474)) - ([264c569](https://github.com/topos-protocol/topos/commit/264c5694980fded79ea0749d03f54a345d90c741))\n- Refactor logs and fix typo ([#465](https://github.com/topos-protocol/topos/issues/465)) - ([8044310](https://github.com/topos-protocol/topos/commit/8044310b8ee330d5a14d509137dc4243cb2c2372))\n- Removing cache_size ([#472](https://github.com/topos-protocol/topos/issues/472)) - ([b2e4cf8](https://github.com/topos-protocol/topos/commit/b2e4cf88ac0c0b2ee92b7ef120a4c4e97493150c))\n- Backport fix of 0.0.11 ([#453](https://github.com/topos-protocol/topos/issues/453)) - ([53328ac](https://github.com/topos-protocol/topos/commit/53328acc813816757c57f3279cbd5f2aa738d2f0))\n\n### Build\n\n- Ignore pr checking name for release ([#480](https://github.com/topos-protocol/topos/issues/480)) - ([cfd8890](https://github.com/topos-protocol/topos/commit/cfd8890a0cb03f25fdaae8b181ab9c33f785e34e))\n\n## [0.0.11](https://github.com/topos-protocol/topos/compare/v0.0.10..v0.0.11) - 2024-02-08\n\n### ⛰️  Features\n\n- Introduce topos-config crate ([#443](https://github.com/topos-protocol/topos/issues/443)) - ([4ff2a23](https://github.com/topos-protocol/topos/commit/4ff2a23e3a05ea3e950763bd4bde3d3ef6ef891b))\n- Adding positions to certificate ([#440](https://github.com/topos-protocol/topos/issues/440)) - ([5315710](https://github.com/topos-protocol/topos/commit/531571025a4d81f9d9aa713ca12594756ca56a7e))\n- Improve sequencer error handling and shutdown/restart sequence ([#428](https://github.com/topos-protocol/topos/issues/428)) - ([ab8bb9e](https://github.com/topos-protocol/topos/commit/ab8bb9e83afee545c3730f974ae8591c7fc70f3d))\n- Update double echo to use pending CF ([#418](https://github.com/topos-protocol/topos/issues/418)) - ([8fb4003](https://github.com/topos-protocol/topos/commit/8fb4003d5579a8fee6d81c463707131959f076c3))\n- Use anvil for sequencer tests ([#427](https://github.com/topos-protocol/topos/issues/427)) - ([5b0257b](https://github.com/topos-protocol/topos/commit/5b0257bed685c064c3eafbea2b5c77125e6c9041))\n- Update tce config addresses ([#415](https://github.com/topos-protocol/topos/issues/415)) - ([476948f](https://github.com/topos-protocol/topos/commit/476948fa671b431bfa797aabf6b96949ac734db6))\n- Remove the register commands macro ([#426](https://github.com/topos-protocol/topos/issues/426)) - ([985d0be](https://github.com/topos-protocol/topos/commit/985d0be0c75ddd1d41e94a172824b706ca0f9c5f))\n- Run e2e topos integration workflow ([#408](https://github.com/topos-protocol/topos/issues/408)) - ([f0b7637](https://github.com/topos-protocol/topos/commit/f0b763786aa869454c9e30076ed08d0a456ed319))\n- Adding filter on message when non validator ([#405](https://github.com/topos-protocol/topos/issues/405)) - ([b096482](https://github.com/topos-protocol/topos/commit/b0964825a5f386d75507482ee9068e26c9d74fe0))\n- Add no-edge-process flag to node init ([#401](https://github.com/topos-protocol/topos/issues/401)) - ([28a553b](https://github.com/topos-protocol/topos/commit/28a553b6d17933bfbcca835640cc0c165bcd0124))\n- Refactor peer selection for synchronization ([#382](https://github.com/topos-protocol/topos/issues/382)) - ([6982d33](https://github.com/topos-protocol/topos/commit/6982d336296a9b9ec5eacb025d938b6cb47b6e0a))\n- Remove task manager channels ([#391](https://github.com/topos-protocol/topos/issues/391)) - ([f5fa427](https://github.com/topos-protocol/topos/commit/f5fa4276d8a524fd04bbf2a0d1d036d7f5af34bb))\n- Add batch message and update double echo ([#383](https://github.com/topos-protocol/topos/issues/383)) - ([f0bc90c](https://github.com/topos-protocol/topos/commit/f0bc90c7480a84c0c12016e748f2a002477f4417))\n\n### 🐛 Bug Fixes\n\n- Fixing wrong use of IntCounterVec ([#442](https://github.com/topos-protocol/topos/issues/442)) - ([fe062a5](https://github.com/topos-protocol/topos/commit/fe062a5bdfa9ade2b94de88cbd6b3946b72a94c3))\n- Clippy unused ([#434](https://github.com/topos-protocol/topos/issues/434)) - ([4aa6a9e](https://github.com/topos-protocol/topos/commit/4aa6a9e4723b8aaa2c2da0fdf32d8c8c926f7764))\n- Remove an unused channel that was locking the broadcast ([#433](https://github.com/topos-protocol/topos/issues/433)) - ([43c6fe5](https://github.com/topos-protocol/topos/commit/43c6fe5caffd35103b20ec11d7ae2f35b1af98b9))\n- RUSTSEC-2024-0003 ([#431](https://github.com/topos-protocol/topos/issues/431)) - ([cad4d76](https://github.com/topos-protocol/topos/commit/cad4d76ccf88be3f1399d371cf9dbdd7211343bc))\n- RUSTSEC-2023-0078 ([#429](https://github.com/topos-protocol/topos/issues/429)) - ([35f8930](https://github.com/topos-protocol/topos/commit/35f8930056f641b33c56b4799b8c0cbe6f0a5eda))\n- Fixing release notification ([#423](https://github.com/topos-protocol/topos/issues/423)) - ([7503fa7](https://github.com/topos-protocol/topos/commit/7503fa7f2385294f2a12a86eaa521af61fb9bc95))\n- Move test abi generation to separate module ([#424](https://github.com/topos-protocol/topos/issues/424)) - ([d4ff358](https://github.com/topos-protocol/topos/commit/d4ff3581d43eabb14c3d977f34b69aee396e98d4))\n- Return error from subprocesses ([#422](https://github.com/topos-protocol/topos/issues/422)) - ([53b3229](https://github.com/topos-protocol/topos/commit/53b3229b7e26b7bb9b3a8d97725e7cc86174df9b))\n\n### 🚜 Refactor\n\n- Graphql types to differentiate inputs ([#435](https://github.com/topos-protocol/topos/issues/435)) - ([4b0ec9b](https://github.com/topos-protocol/topos/commit/4b0ec9b2b3b6ab075d1b9bfde54dc8bb179bbde8))\n\n### ⚙️ Miscellaneous Tasks\n\n- Debug 0.0.11 synchronization ([#447](https://github.com/topos-protocol/topos/issues/447)) - ([edf86ee](https://github.com/topos-protocol/topos/commit/edf86ee32f8b34c5b11eecd5b0ed6fe5bdd0191a))\n- Update dependencies ([#450](https://github.com/topos-protocol/topos/issues/450)) - ([62126e0](https://github.com/topos-protocol/topos/commit/62126e0417d8d4225eb8bb6eebb7fc0a0f526cc6))\n- Remove mention of topos-api from coverage YAML ([#438](https://github.com/topos-protocol/topos/issues/438)) - ([6c7e342](https://github.com/topos-protocol/topos/commit/6c7e342715d86bcbd75e1bcd4054eb4cbeddf5cf))\n- Bump version for topos to 0.0.11 ([#439](https://github.com/topos-protocol/topos/issues/439)) - ([917eaf9](https://github.com/topos-protocol/topos/commit/917eaf993336780a373dbcdfea4dd0d8b3e81f50))\n- Refactor struct in topos-core ([#437](https://github.com/topos-protocol/topos/issues/437)) - ([acedac7](https://github.com/topos-protocol/topos/commit/acedac7e09094364b5406ee72f9a65241784c478))\n- Update crates structure for api/uci and core ([#436](https://github.com/topos-protocol/topos/issues/436)) - ([355b08a](https://github.com/topos-protocol/topos/commit/355b08acf91052564dae65872904950d20b72ebd))\n- Fix logs for pending ([#432](https://github.com/topos-protocol/topos/issues/432)) - ([342b2c7](https://github.com/topos-protocol/topos/commit/342b2c71ef0621a93b5f4460abd313f1c8b4c62b))\n- Updating double echo for devnet ([#416](https://github.com/topos-protocol/topos/issues/416)) - ([1e91086](https://github.com/topos-protocol/topos/commit/1e91086a68ec01d304c5d8867e8fbcd671798599))\n- Fixing clippy warning for 1.75.0 ([#425](https://github.com/topos-protocol/topos/issues/425)) - ([22a3745](https://github.com/topos-protocol/topos/commit/22a374506e087ab9cba68f9e0ed00682df6be6df))\n- Refactor push certificate tests and cleanup regtest ([#399](https://github.com/topos-protocol/topos/issues/399)) - ([c60170d](https://github.com/topos-protocol/topos/commit/c60170dc19a9add84648afaf7962252ec77c160f))\n- Update signal handle by tce ([#417](https://github.com/topos-protocol/topos/issues/417)) - ([beca28b](https://github.com/topos-protocol/topos/commit/beca28ba224b4a217a147f88142692acd1613a32))\n- Cleanup topos tools ([#397](https://github.com/topos-protocol/topos/issues/397)) - ([0820306](https://github.com/topos-protocol/topos/commit/08203062a1cada7470d8b8207539d7a660ece466))\n- Adding context on connection to self ([#413](https://github.com/topos-protocol/topos/issues/413)) - ([6e72999](https://github.com/topos-protocol/topos/commit/6e729992202ed4c56a1564cc39755ff1e0766ff8))\n- Adding context on connection to self ([#411](https://github.com/topos-protocol/topos/issues/411)) - ([3a799ac](https://github.com/topos-protocol/topos/commit/3a799ac41542bd216a0512d9cf3a634a6ed2af07))\n- Adding context to p2p msg received ([#410](https://github.com/topos-protocol/topos/issues/410)) - ([e1b2ccf](https://github.com/topos-protocol/topos/commit/e1b2ccf99a114a60e04c8ed187a8c9bd2cf066e4))\n- Adding context to certificate broadcast ([#409](https://github.com/topos-protocol/topos/issues/409)) - ([d170b6b](https://github.com/topos-protocol/topos/commit/d170b6b386005d44457979bcd0a0f2435153f3f2))\n- Adding storage context on startup ([#404](https://github.com/topos-protocol/topos/issues/404)) - ([ffae4c6](https://github.com/topos-protocol/topos/commit/ffae4c63d00bb099a49216c2af560f6b59601133))\n- Remove audit ignore report ([#407](https://github.com/topos-protocol/topos/issues/407)) - ([70bce47](https://github.com/topos-protocol/topos/commit/70bce479d16c750ff1a322db1e88bfae08303855))\n- Update SynchronizerService to remove unwrap ([#403](https://github.com/topos-protocol/topos/issues/403)) - ([b424aa9](https://github.com/topos-protocol/topos/commit/b424aa91f68197134faec742a23eec513cde837f))\n- Adding no-color option to CLI ([#402](https://github.com/topos-protocol/topos/issues/402)) - ([4989936](https://github.com/topos-protocol/topos/commit/4989936ae000a85897d805453c91a53f4edd6580))\n- Adding cross compilation ([#400](https://github.com/topos-protocol/topos/issues/400)) - ([887762f](https://github.com/topos-protocol/topos/commit/887762f740241d49d2886cd55d6a9da1d3d83e7e))\n- Adding openssl as dependency ([#396](https://github.com/topos-protocol/topos/issues/396)) - ([60f873c](https://github.com/topos-protocol/topos/commit/60f873c619b8132a2276634205e3c39561eb3faf))\n- Update release action target ([#395](https://github.com/topos-protocol/topos/issues/395)) - ([54db400](https://github.com/topos-protocol/topos/commit/54db40020e8e22d7b100b0b6944a45485e2939a4))\n- Update release action target ([#394](https://github.com/topos-protocol/topos/issues/394)) - ([f0f28c3](https://github.com/topos-protocol/topos/commit/f0f28c33a5332232b2921b13051ae5de714cf58b))\n- Adding aarch64 image ([#393](https://github.com/topos-protocol/topos/issues/393)) - ([9f48dc8](https://github.com/topos-protocol/topos/commit/9f48dc88582bfbc71cabb2c6bbc27297cc9b87ee))\n- Cleanup topos test network setup ([#390](https://github.com/topos-protocol/topos/issues/390)) - ([2820664](https://github.com/topos-protocol/topos/commit/2820664a66bdfe039f1aaa80d79f4ff339c8a65c))\n\n\n"
  },
  {
    "path": "Cargo.toml",
    "content": "[workspace]\nresolver = \"2\"\ndefault-members = [\"crates/topos\", \"crates/topos-node\"]\nmembers = [\n    \"crates/*\"\n]\n\n[workspace.package]\nversion = \"0.1.0\"\n\n[workspace.lints.rust]\n# Deny\nmissing_docs = \"allow\"\n\n# Warn\ndeprecated-in-future = \"warn\"\n\n[profile.release]\nstrip = true\n\n[workspace.dependencies]\ntopos-core = { path = \"./crates/topos-core\", default-features = false }\ntopos-crypto = { path = \"./crates/topos-crypto\", default-features = false }\ntopos-metrics = { path = \"./crates/topos-metrics/\", default-features = false }\n\n# Various utility crates\nclap = { version = \"4.0\", features = [\"derive\", \"env\", \"string\"] }\nlazy_static = \"1\"\nrand = { version = \"0.8\", default-features = false }\nrand_core = { version = \"0.6\", default-features = false }\nrand_distr = { version = \"0.4\", default-features = false }\n\n# Async & Tokio related\nasync-stream = { version = \"0.3\", default-features = false }\nasync-trait = { version = \"0.1\", default-features = false }\nfutures = { version = \"0.3\" }\ntokio = { version = \"1.24\", default-features = false }\ntokio-util = { version = \"0.7.8\" }\ntokio-stream = { version = \"0.1\", default-features = false }\ntower = \"0.4\"\n\n# Blockchain\nethereum-types = { version = \"0.13.1\"}\nsecp256k1 = {version = \"0.27\", features = [\"recovery\"]}\ntiny-keccak = {version = \"1.5\"}\nethers = {version = \"2.0.9\", features = [\"legacy\", \"abigen-online\"]}\n\n# Log, Tracing & telemetry\nopentelemetry = { version = \"0.22\", features = [\"metrics\"] }\nopentelemetry-otlp = { version = \"0.15\", features = [\"grpc-tonic\", \"metrics\", \"tls-roots\"] }\nopentelemetry_sdk = { version = \"0.22\" }\n\nprometheus = \"0.13.3\"\nprometheus-client = \"0.22\"\ntracing = { version = \"0.1\", default-features = false }\ntracing-attributes = \"0.1\"\ntracing-opentelemetry = \"0.23\"\ntracing-subscriber = { version = \"0.3\", default-features = false }\n\n# gRPC\nprost = {version = \"0.12\"}\ntonic = { version = \"0.11\", default-features = false }\ntonic-build = { version = \"0.11\", default-features = false, features = [\n    \"prost\", \"transport\"\n] }\n\n# Axum server (GraphQL + Metrics)\naxum = \"0.6\"\nasync-graphql = \"6\"\nasync-graphql-axum = \"6\"\nhttp = \"0.2.9\"\ntower-http = { version = \"0.4\", features = [\"cors\"] }\n\n# P2P related\nlibp2p = { version = \"0.53\", default-features = false, features = [\"noise\"]}\n\n# Serialization & Deserialization\nbincode = { version = \"1.3\", default-features = false }\nbyteorder = { version = \"1.4\", default-features = false }\nbytes = { version = \"1.3\", default-features = false }\nhex = { version = \"0.4\", default-features = false }\nserde = { version = \"1.0\", default-features = false }\nserde_json = { version = \"1.0\", default-features = false }\nthiserror = { version = \"1.0\", default-features = false }\nuuid = { version = \"1.1.2\", default-features = false, features = [\"v4\"] }\nbase64ct = { version = \"1\", features = [\"alloc\"] }\n\n# Network related\nbackoff = { version = \"0.4\", features = [\"tokio\", \"futures\"] }\nhyper = { version = \"0.14.26\", features = [\"full\"] }\nreqwest = { version = \"0.11\", features = [\"json\"] }\n\n# Tests\nrstest = { version = \"0.17.0\", default-features = false }\ntest-log = { version = \"0.2\", features = [\"trace\"] }\nenv_logger = { version = \"0.10.0\"} # Needed by test-log to print traces in tests\nserial_test = {version = \"0.9.0\"}\n"
  },
  {
    "path": "Cross.toml",
    "content": "[build]\npre-build = [\n    \"apt update && apt install -y unzip\",\n    \"curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v25.1/protoc-25.1-linux-x86_64.zip && unzip protoc-25.1-linux-x86_64.zip -d /usr/ && chmod 755 /usr/bin/protoc\"\n]\n\n[target.x86_64-unknown-linux-gnu]\nimage = \"ghcr.io/cross-rs/x86_64-unknown-linux-gnu:main\"\n\n[target.aarch64-unknown-linux-gnu]\nimage = \"ghcr.io/cross-rs/aarch64-unknown-linux-gnu:main\"\n"
  },
  {
    "path": "Dockerfile",
    "content": "ARG RUSTUP_TOOLCHAIN=stable\nFROM --platform=${BUILDPLATFORM:-linux/amd64} ghcr.io/topos-protocol/rust_builder:bullseye-${RUSTUP_TOOLCHAIN} AS base\n\nARG FEATURES\n# Rust cache\nARG SCCACHE_S3_KEY_PREFIX\nARG SCCACHE_BUCKET\nARG SCCACHE_REGION\nARG RUSTC_WRAPPER\nARG PROTOC_VERSION=22.2\n\nWORKDIR /usr/src/app\n\nFROM --platform=${BUILDPLATFORM:-linux/amd64} base AS build\nCOPY . .\nRUN --mount=type=secret,id=aws,target=/root/.aws/credentials \\\n    --mount=type=cache,id=sccache,target=/root/.cache/sccache \\\n  cargo build --release --no-default-features --features=${FEATURES} \\\n  && sccache --show-stats\n\nFROM --platform=${BUILDPLATFORM:-linux/amd64} debian:bullseye-slim AS topos\n\nENV TCE_PORT=9090\nENV USER=topos\nENV UID=10001\nENV PATH=\"${PATH}:/usr/src/app\"\n\nWORKDIR /usr/src/app\n\nCOPY --from=build /usr/src/app/target/release/topos .\n\nRUN apt-get update && apt-get install -y \\\n    ca-certificates \\\n    jq \\\n  && apt-get clean \\\n  && rm -rf /var/lib/apt/lists/*\n\nRUN mkdir /tmp/node_config\nRUN mkdir /tmp/shared\n\nENTRYPOINT [\"./topos\"]\n"
  },
  {
    "path": "LICENSE",
    "content": "The Transmission Control Engine (TCE) and Sequencer are licensed under BSL-1.1.\n\n-----------------------------------------------------------------------------\n\nBusiness Source License 1.1\n\nLicense text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved.\n\"Business Source License\" is a trademark of MariaDB Corporation Ab.\n\nParameters\n\nLicensor:             zk Foundation\n\nLicensed Work:        Code contained within the \"topos\" repository\n                      Transmission Control Engine (TCE) and Sequencer\n                      The Licensed Work is (c) zk Foundation\n\nAdditional Use Grant:\n\nYou may use the Licensed Work in a production environment in the following circumstances:\n\n(1) solely for non-malicious use of the Topos Protocol via zk Foundation\npromulgated networks for the operation of a node on a network or for the operation\nof software in support of connecting a Subnet to a network; and\n\n(2) for non-commercial academic research purposes only.\n\n“Topos Protocol” as used herein means the generalized implementation of “Topos:\nA Secure, Trustless, and Decentralized Interoperability Protocol”\n(https://arxiv.org/pdf/2206.03481.pdf) distributed by zk Foundation.\nA “Subnet” as used herein means a process, set of processes, computer system,\ndatabase, or network of systems and/or processes as defined by the Topos Protocol.\n\nFor any additional licensing arrangements, please contact zk Foundation at:\ninfo@zkfoundation.io.\n\nChange Date:          July 1, 2025\n\nChange License:       GPL v3.0\n\n-----------------------------------------------------------------------------\n\nTerms\n\nThe Licensor hereby grants you the right to copy, modify, create derivative\nworks, redistribute, and make non-production use of the Licensed Work. The\nLicensor may make an Additional Use Grant, above, permitting limited\nproduction use.\n\nEffective on the Change Date, or the fourth anniversary of the first publicly\navailable distribution of a specific version of the Licensed Work under this\nLicense, whichever comes first, the Licensor hereby grants you rights under\nthe terms of the Change License, and the rights granted in the paragraph\nabove terminate.\n\nIf your use of the Licensed Work does not comply with the requirements\ncurrently in effect as described in this License, you must purchase a\ncommercial license from the Licensor, its affiliated entities, or authorized\nresellers, or you must refrain from using the Licensed Work.\n\nAll copies of the original and modified Licensed Work, and derivative works\nof the Licensed Work, are subject to this License. This License applies\nseparately for each version of the Licensed Work and the Change Date may vary\nfor each version of the Licensed Work released by Licensor.\n\nYou must conspicuously display this License on each original or modified copy\nof the Licensed Work. If you receive the Licensed Work in original or\nmodified form from a third party, the terms and conditions set forth in this\nLicense apply to your use of that work.\n\nAny use of the Licensed Work in violation of this License will automatically\nterminate your rights under this License for the current and all other\nversions of the Licensed Work.\n\nThis License does not grant you any right in any trademark or logo of\nLicensor or its affiliates (provided that you may use a trademark or logo of\nLicensor as expressly required by this License).\n\nTO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON\nAN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS,\nEXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND\nTITLE.\n\nMariaDB hereby grants you permission to use this License’s text to license\nyour works, and to refer to it using the trademark “Business Source License”,\nas long as you comply with the Covenants of Licensor below.\n\nCovenants of Licensor\n\nIn consideration of the right to use this License’s text and the “Business\nSource License” name and trademark, Licensor covenants to MariaDB, and to all\nother recipients of the licensed work to be provided by Licensor:\n\n1. To specify as the Change License the GPL Version 2.0 or any later version,\n   or a license that is compatible with GPL Version 2.0 or a later version,\n   where “compatible” means that software provided under the Change License can\n   be included in a program with software provided under GPL Version 2.0 or a\n   later version. Licensor may specify additional Change Licenses without\n   limitation.\n\n2. To either: (a) specify an additional grant of rights to use that does not\n   impose any additional restriction on the right granted in this License, as\n   the Additional Use Grant; or (b) insert the text “None”.\n\n3. To specify a Change Date.\n\n4. Not to modify this License in any other way.\n\n-----------------------------------------------------------------------------\n\nNotice\n\nThe Business Source License (this document, or the \"License\") is not an Open\nSource license. However, the Licensed Work will eventually be made available\nunder an Open Source License, as stated in this License.\n\n"
  },
  {
    "path": "README.md",
    "content": "<div id=\"top\"></div>\n<!-- PROJECT LOGO -->\n<br />\n<div align=\"center\">\n\n  <img src=\"./.github/assets/topos_logo.png#gh-light-mode-only\" alt=\"Logo\" width=\"200\">\n  <img src=\"./.github/assets/topos_logo_dark.png#gh-dark-mode-only\" alt=\"Logo\" width=\"200\">\n\n<br />\n\n<p align=\"center\">\n<b>topos</b> is the unified command line interface to the <a href=\"https://docs.topos.technology/\">Topos</a> protocol.\n</p>\n</div>\n\n<br />\n\n<div align=\"center\">\n\n![Test workflow](https://github.com/topos-protocol/topos/actions/workflows/test.yml/badge.svg)\n![Quality workflow](https://github.com/topos-protocol/topos/actions/workflows/quality.yml/badge.svg)\n[![codecov](https://codecov.io/gh/topos-protocol/topos/branch/main/graph/badge.svg?token=FOH2B2GRL9)](https://codecov.io/gh/topos-protocol/topos)\n![MSRV](https://img.shields.io/badge/MSRV-1.71.1-blue?labelColor=1C2C2E&logo=Rust)\n[![](https://dcbadge.vercel.app/api/server/7HZ8F8ykBT?style=flat)](https://discord.gg/7HZ8F8ykBT)\n\n</div>\n\n## Getting Started\n\n**Install Rust**\n\nThe first step is to install Rust along with `cargo` by following the instructions [here](https://doc.rust-lang.org/book/ch01-01-installation.html#installing-rustup-on-linux-or-macos).\n\n**Install `topos`**\n\n```\ncargo install topos --git https://github.com/topos-protocol/topos\n```\n\n**Try out `topos`!**\n\n```\ntopos --help\n```\n\nFind more about how topos works in the [documentation](https://docs.topos.technology/).\n\n### Topos Docker image\n\nThe docker images use `stable` Rust toolchain by default. You can use a different one by defining `RUSTUP_TOOLCHAIN` argument, the list of available toolchain is [here](https://github.com/topos-protocol/topos-ci-docker/pkgs/container/rust_builder)\n\nBuild Topos docker image:\n\n```\nDOCKER_BUILDKIT=1 docker build . --build-arg RUSTUP_TOOLCHAIN=[...] -t topos:latest\n```\nRun Topos docker image:\n\n```\ndocker run -it --rm topos:latest --help\n```\n\n\n## Development\n\nContributions are very welcomed, the guidelines are outlined in [`CONTRIBUTING.md`](https://github.com/topos-protocol/.github/blob/main/CONTRIBUTING.md).<br />\n\n## Support\n\nFeel free to [open an issue](https://github.com/topos-protocol/topos/issues/new) if you have any feature request or bug report.<br />\nIf you have any questions, do not hesitate to reach us on [Discord](https://discord.gg/7HZ8F8ykBT)!\n\n## Resources\n\n- Website: <https://toposware.com>\n- Technical Documentation: <https://docs.topos.technology/>\n- Medium: <https://toposware.medium.com>\n- Whitepaper: [Topos: A Secure, Trustless, and Decentralized\n  Interoperability Protocol](https://arxiv.org/pdf/2206.03481.pdf)\n\n## License\n\nThis project is released under the terms specified in the [LICENSE](LICENSE) file.\n"
  },
  {
    "path": "cliff.toml",
    "content": "# git-cliff ~ configuration file\n# https://git-cliff.org/docs/configuration\n#\n# Lines starting with \"#\" are comments.\n# Configuration options are organized into tables and keys.\n# See documentation for more information on available options.\n\n[changelog]\n# changelog header\nheader = \"\"\"\n![topos](./.github/assets/topos_logo_dark.png)\n\"\"\"\n# template for the changelog body\n# https://keats.github.io/tera/docs/#introduction\nbody = \"\"\"\n{%- macro remote_url() -%}\n  https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }}\n{%- endmacro -%}\n\n{% macro print_commit(commit) -%}\n    - {% if commit.scope %}*({{ commit.scope }})* {% endif %}\\\n        {% if commit.breaking %}[**breaking**] {% endif %}\\\n        {{ commit.message | upper_first }} - \\\n        ([{{ commit.id | truncate(length=7, end=\"\") }}]({{ self::remote_url() }}/commit/{{ commit.id }}))\\\n{% endmacro -%}\n\n{% if version %}\\\n    {% if previous.version %}\\\n        ## [{{ version | trim_start_matches(pat=\"v\") }}]\\\n          ({{ self::remote_url() }}/compare/{{ previous.version }}..{{ version }}) - {{ timestamp | date(format=\"%Y-%m-%d\") }}\n    {% else %}\\\n        ## [{{ version | trim_start_matches(pat=\"v\") }}] - {{ timestamp | date(format=\"%Y-%m-%d\") }}\n    {% endif %}\\\n{% else %}\\\n    ## [unreleased]\n{% endif %}\\\n\n{% for group, commits in commits | group_by(attribute=\"group\") %}\n    ### {{ group | striptags | trim | upper_first }}\n    {% for commit in commits\n    | filter(attribute=\"scope\")\n    | sort(attribute=\"scope\") %}\n        {{ self::print_commit(commit=commit) }}\n    {%- endfor -%}\n    {% raw %}\\n{% endraw %}\\\n    {%- for commit in commits %}\n        {%- if not commit.scope -%}\n            {{ self::print_commit(commit=commit) }}\n        {% endif -%}\n    {% endfor -%}\n{% endfor %}\\n\n\"\"\"\n# template for the changelog footer\nfooter = \"\"\"\n\"\"\"\n# remove the leading and trailing whitespace from the templates\ntrim = true\n# postprocessors\npostprocessors = [\n  { pattern = '<REPO>', replace = \"https://github.com/topos-protocol/topos\" }, # replace repository URL\n]\n\n[git]\n# parse the commits based on https://www.conventionalcommits.org\nconventional_commits = true\n# filter out the commits that are not conventional\nfilter_unconventional = true\n# process each line of a commit as an individual commit\nsplit_commits = false\n# regex for preprocessing the commit messages\ncommit_preprocessors = [\n  { pattern = '\\((\\w+\\s)?#([0-9]+)\\)', replace = \"([#${2}](<REPO>/issues/${2}))\" },\n  # Check spelling of the commit with https://github.com/crate-ci/typos\n  # If the spelling is incorrect, it will be automatically fixed.\n  { pattern = '.*', replace_command = 'typos --write-changes -' },\n]\n# regex for parsing and grouping commits\ncommit_parsers = [\n  { message = \"^feat\", group = \"<!-- 0 -->⛰️  Features\" },\n  { message = \"^fix\", group = \"<!-- 1 -->🐛 Bug Fixes\" },\n  { message = \"^doc\", group = \"<!-- 3 -->📚 Documentation\" },\n  { message = \"^perf\", group = \"<!-- 4 -->⚡ Performance\" },\n  { message = \"^refactor\\\\(clippy\\\\)\", skip = true },\n  { message = \"^refactor\", group = \"<!-- 2 -->🚜 Refactor\" },\n  { message = \"^style\", group = \"<!-- 5 -->🎨 Styling\" },\n  { message = \"^test\", group = \"<!-- 6 -->🧪 Testing\" },\n  { message = \"^chore\\\\(release\\\\): prepare for\", skip = true },\n  { message = \"^chore\\\\(deps.*\\\\)\", skip = true },\n  { message = \"^chore\\\\(pr\\\\)\", skip = true },\n  { message = \"^chore\\\\(pull\\\\)\", skip = true },\n  { message = \"^chore|^ci\", group = \"<!-- 7 -->⚙️ Miscellaneous Tasks\" },\n  { body = \".*security\", group = \"<!-- 8 -->🛡️ Security\" },\n  { message = \"^revert\", group = \"<!-- 9 -->◀️ Revert\" },\n]\n# protect breaking changes from being skipped due to matching a skipping commit_parser\nprotect_breaking_commits = false\n# filter out the commits that are not matched by commit parsers\nfilter_commits = false\n# regex for matching git tags\ntag_pattern = \"v[0-9].*\"\n# regex for skipping tags\nskip_tags = \"beta|alpha\"\n# regex for ignoring tags\nignore_tags = \"rc|v2.1.0|v2.1.1\"\n# sort the tags topologically\ntopo_order = false\n# sort the commits inside sections by oldest/newest order\nsort_commits = \"newest\"\n"
  },
  {
    "path": "crates/topos/Cargo.toml",
    "content": "[package]\nname = \"topos\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n[lints]\nworkspace = true\n\n[dependencies]\ntopos-node = { path = \"../topos-node/\" }\ntopos-config = { path = \"../topos-config/\" }\ntopos-tce = { path = \"../topos-tce/\" }\ntopos-p2p = { path = \"../topos-p2p\" }\ntopos-sequencer = { path = \"../topos-sequencer\" }\ntopos-core = { workspace = true, features = [\"api\"] }\ntopos-certificate-spammer = { path = \"../topos-certificate-spammer\" }\ntopos-tce-broadcast = { path = \"../topos-tce-broadcast\", optional = true }\ntopos-wallet = { path = \"../topos-wallet\" }\ntopos-telemetry = { path = \"../topos-telemetry/\", features = [\"tracing\"] }\n\nasync-stream.workspace = true\nasync-trait.workspace = true\nclap.workspace = true\nhex.workspace = true\nfutures.workspace = true\nopentelemetry.workspace = true\nserde.workspace = true\nserde_json.workspace = true\ntokio = { workspace = true, features = [\"full\"] }\ntokio-util.workspace = true\ntonic.workspace = true\ntower.workspace = true\ntracing = { workspace = true, features = [\"log\"] }\ntracing-opentelemetry.workspace = true\ntracing-subscriber = { workspace = true, features = [\"env-filter\", \"json\", \"ansi\", \"fmt\"] }\nuuid.workspace = true\nrand.workspace = true\nreqwest.workspace = true\nthiserror.workspace = true\nopentelemetry-otlp = { workspace = true, features = [\"grpc-tonic\", \"metrics\", \"tls-roots\"] }\ndirs = \"5.0\"\ntracing-log = { version = \"0.1.3\", features = [\"env_logger\"] }\ntar = \"0.4.38\"\nflate2 =\"1.0.26\"\nurl = \"2.3.1\"\nonce_cell = \"1.17.1\"\nregex = \"1\"\nrlp = \"0.5.1\"\nopenssl = { version = \"0.10.61\", features = [\"vendored\"] }\n\n[dev-dependencies]\ntoml = \"0.7.4\"\ntopos-tce-broadcast = { path = \"../topos-tce-broadcast\" }\ntopos-tce-synchronizer = { path = \"../topos-tce-synchronizer\" }\ntopos-tce-gatekeeper = { path = \"../topos-tce-gatekeeper\" }\ntopos-tce-api = { path = \"../topos-tce-api\" }\ntopos-tce-storage = { path = \"../topos-tce-storage\" }\ntopos-test-sdk = { path = \"../topos-test-sdk\" }\nserde.workspace = true\nserde_json.workspace = true\ntest-log.workspace = true\nenv_logger.workspace = true\nrand.workspace = true\nfutures.workspace = true\nlibp2p = { workspace = true, features = [\"identify\"] }\nassert_cmd = \"2.0.6\"\ninsta = { version = \"1.21\", features = [\"json\", \"redactions\"] }\nrstest = { workspace = true, features = [\"async-timeout\"] }\ntempfile = \"3.8.0\"\npredicates = \"3.0.3\"\nsysinfo = \"0.29.11\"\nserial_test = {version = \"0.9.0\"}\n\n[features]\ndefault = []\n"
  },
  {
    "path": "crates/topos/build.rs",
    "content": "use std::process::Command;\n\nconst DEFAULT_VERSION: &str = \"detached\";\n\nfn main() {\n    // Set TOPOS_VERSION to HEAD short commit hash if None\n    if std::option_env!(\"TOPOS_VERSION\").is_none() {\n        let output = Command::new(\"git\")\n            .args([\"rev-parse\", \"--short\", \"HEAD\"])\n            .output()\n            .expect(\"failed to access the HEAD commit hash\");\n\n        let git_hash = String::from_utf8(output.stdout).unwrap();\n\n        let topos_version = if git_hash.is_empty() {\n            DEFAULT_VERSION\n        } else {\n            git_hash.as_str()\n        };\n\n        println!(\"cargo:rustc-env=TOPOS_VERSION={topos_version}\");\n    }\n}\n"
  },
  {
    "path": "crates/topos/src/components/mod.rs",
    "content": "pub(crate) mod node;\npub(crate) mod regtest;\npub(crate) mod setup;\n"
  },
  {
    "path": "crates/topos/src/components/node/commands/init.rs",
    "content": "use std::path::PathBuf;\n\nuse clap::Args;\nuse serde::Serialize;\nuse topos_config::node::NodeRole;\n\n#[derive(Args, Debug, Serialize)]\n#[command(about = \"Setup your node\", trailing_var_arg = true)]\n#[serde(rename_all = \"kebab-case\")]\npub struct Init {\n    /// Name to identify your node\n    #[arg(long, env = \"TOPOS_NODE_NAME\", default_value = \"default\")]\n    pub name: Option<String>,\n\n    /// Role of your node\n    #[arg(long, value_enum, env = \"TOPOS_NODE_ROLE\", default_value_t = NodeRole::Validator)]\n    pub role: NodeRole,\n\n    /// Subnet of your node\n    #[arg(long, env = \"TOPOS_NODE_SUBNET\", default_value = \"topos\")]\n    pub subnet: Option<String>,\n\n    /// The path to the SecretsManager config file. Used for Hashicorp Vault.\n    /// If omitted, the local FS secrets manager is used\n    #[arg(long, env = \"TOPOS_SECRETS_MANAGER\")]\n    pub secrets_config: Option<String>,\n\n    /// For certain use cases, we manually provide private keys to a running node, and don't want to\n    /// rely on polygon-edge during runtime. Example: A sequencer which runs for an external EVM chain\n    #[arg(long, env = \"TOPOS_NO_EDGE_PROCESS\", action)]\n    pub no_edge_process: bool,\n\n    /// Installation directory path for Polygon Edge binary\n    #[clap(from_global)]\n    pub(crate) edge_path: PathBuf,\n}\n"
  },
  {
    "path": "crates/topos/src/components/node/commands/status.rs",
    "content": "use super::NodeArgument;\nuse clap::Args;\nuse serde::Serialize;\n\n#[derive(Args, Debug, Serialize)]\n#[command(about = \"Get node status\")]\npub(crate) struct Status {\n    #[command(flatten)]\n    pub(crate) node_args: NodeArgument,\n\n    #[arg(long)]\n    pub(crate) sample: bool,\n}\n"
  },
  {
    "path": "crates/topos/src/components/node/commands/up.rs",
    "content": "use std::path::PathBuf;\n\nuse clap::Args;\nuse serde::Serialize;\n\n#[derive(Args, Clone, Debug, Serialize)]\n#[command(about = \"Spawn your node\")]\n#[serde(rename_all = \"kebab-case\")]\npub struct Up {\n    /// Name to identify your node\n    #[arg(long, env = \"TOPOS_NODE_NAME\", default_value = \"default\")]\n    pub name: Option<String>,\n\n    /// The path to the SecretsManager config file. Used for Hashicorp Vault.\n    /// If omitted, the local FS secrets manager is used\n    #[arg(long, env = \"TOPOS_SECRETS_MANAGER\")]\n    pub secrets_config: Option<String>,\n\n    /// Defines that an external edge node will be use, replacing the one normally run by the node.\n    /// Usable for cases where edge endpoint is available as infura (or similar cloud provider) endpoint\n    #[arg(long, env = \"TOPOS_NO_EDGE_PROCESS\", action)]\n    pub no_edge_process: bool,\n    /// Socket of the opentelemetry agent endpoint.\n    /// If not provided open telemetry will not be used\n    #[arg(long, env = \"TOPOS_OTLP_AGENT\")]\n    pub otlp_agent: Option<String>,\n    /// Otlp service name.\n    /// If not provided open telemetry will not be used\n    #[arg(long, env = \"TOPOS_OTLP_SERVICE_NAME\")]\n    pub otlp_service_name: Option<String>,\n\n    /// Installation directory path for Polygon Edge binary\n    #[clap(from_global)]\n    pub(crate) edge_path: PathBuf,\n}\n"
  },
  {
    "path": "crates/topos/src/components/node/commands.rs",
    "content": "use std::path::PathBuf;\n\nuse clap::{Args, Subcommand};\nuse serde::Serialize;\n\nmod init;\nmod status;\nmod up;\n\npub(crate) use init::Init;\npub(crate) use status::Status;\npub(crate) use up::Up;\n\n#[derive(Args, Debug, Serialize)]\npub(crate) struct NodeArgument {\n    #[clap(short, long, default_value = \"http://[::1]:1340\")]\n    pub(crate) node: String,\n}\n\n/// Utility to manage your nodes in the Topos network\n#[derive(Args, Debug)]\npub(crate) struct NodeCommand {\n    #[clap(from_global)]\n    pub(crate) verbose: u8,\n\n    #[clap(from_global)]\n    pub(crate) no_color: bool,\n\n    #[clap(from_global)]\n    pub(crate) home: PathBuf,\n\n    /// Installation directory path for Polygon Edge binary\n    #[arg(\n        global = true,\n        long,\n        env = \"TOPOS_POLYGON_EDGE_BIN_PATH\",\n        default_value = \".\"\n    )]\n    pub(crate) edge_path: PathBuf,\n\n    #[clap(subcommand)]\n    pub(crate) subcommands: Option<NodeCommands>,\n}\n\n#[derive(Subcommand, Debug, Serialize)]\npub(crate) enum NodeCommands {\n    Up(Box<Up>),\n    Init(Box<Init>),\n    Status(Status),\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn test_run() {\n        assert!(NodeCommands::has_subcommand(\"up\"));\n        assert!(NodeCommands::has_subcommand(\"init\"));\n    }\n}\n"
  },
  {
    "path": "crates/topos/src/components/node/mod.rs",
    "content": "use std::{\n    fs::{create_dir_all, remove_dir_all, OpenOptions},\n    io::Write,\n};\nuse std::{path::Path, sync::Arc};\nuse tokio::sync::Mutex;\nuse tonic::transport::{Channel, Endpoint};\nuse topos_telemetry::tracing::setup_tracing;\nuse tower::Service;\nuse tracing::error;\n\nuse self::commands::{NodeCommand, NodeCommands};\nuse topos_config::node::NodeConfig;\nuse topos_config::{edge::command::BINARY_NAME, Config};\nuse topos_core::api::grpc::tce::v1::console_service_client::ConsoleServiceClient;\n\npub(crate) mod commands;\npub(crate) mod services;\n\npub(crate) struct NodeService {\n    pub(crate) console_client: Arc<Mutex<ConsoleServiceClient<Channel>>>,\n}\n\nimpl NodeService {\n    pub(crate) fn with_grpc_endpoint(endpoint: &str) -> Self {\n        Self {\n            console_client: setup_console_tce_grpc(endpoint),\n        }\n    }\n}\n\npub(crate) async fn handle_command(\n    NodeCommand {\n        subcommands,\n        verbose,\n        no_color,\n        home,\n        edge_path,\n    }: NodeCommand,\n) -> Result<(), Box<dyn std::error::Error>> {\n    match subcommands {\n        Some(NodeCommands::Init(cmd)) => {\n            let cmd = *cmd;\n            let name = cmd.name.clone().expect(\"No name or default was given\");\n\n            _ = setup_tracing(verbose, no_color, None, None, env!(\"TOPOS_VERSION\"));\n            // Construct path to node config\n            // will be $TOPOS_HOME/node/default/ with no given name\n            // and $TOPOS_HOME/node/<name>/ with a given name\n            let node_path = home.join(\"node\").join(&name);\n\n            // If the folders don't exist yet, create it\n            create_dir_all(&node_path).expect(\"failed to create node folder\");\n\n            // Check if the config file exists\n            let config_path = node_path.join(\"config.toml\");\n\n            if Path::new(&config_path).exists() {\n                println!(\"Config file: {} already exists\", config_path.display());\n                std::process::exit(1);\n            }\n\n            if cmd.no_edge_process {\n                println!(\"Init the node without polygon-edge process...\");\n            } else {\n                // Generate the Edge configuration\n                match topos_config::edge::generate_edge_config(\n                    edge_path.join(BINARY_NAME),\n                    node_path.clone(),\n                )\n                .await\n                {\n                    Ok(Ok(status)) => {\n                        if let Some(0) = status.code() {\n                            println!(\"Edge configuration successfully generated\");\n                        } else {\n                            println!(\n                                \"Edge configuration generation terminated with error status: {:?}\",\n                                status\n                            );\n                            remove_dir_all(node_path).expect(\"failed to remove config folder\");\n                            std::process::exit(1);\n                        }\n                    }\n                    Ok(Err(e)) => {\n                        println!(\"Failed to generate edge config with error {e}\");\n                        remove_dir_all(node_path).expect(\"failed to remove config folder\");\n                        std::process::exit(1);\n                    }\n                    Err(_) => {\n                        println!(\"Failed to generate edge config\");\n                        remove_dir_all(node_path).expect(\"failed to remove config folder\");\n                        std::process::exit(1);\n                    }\n                }\n            }\n\n            let node_config = NodeConfig::create(&home, &name, Some(&cmd));\n\n            // Creating the TOML output\n            let config_toml = match node_config.to_toml() {\n                Ok(config) => config,\n                Err(error) => {\n                    println!(\"Failed to generate TOML config: {error}\");\n                    remove_dir_all(node_path).expect(\"failed to remove config folder\");\n                    std::process::exit(1);\n                }\n            };\n\n            let config_path = node_path.join(\"config.toml\");\n            let mut node_config_file = OpenOptions::new()\n                .write(true)\n                .create(true)\n                .truncate(true)\n                .open(config_path)\n                .expect(\"failed to create default node file\");\n\n            node_config_file\n                .write_all(config_toml.to_string().as_bytes())\n                .expect(\"failed to write to default node file\");\n\n            println!(\n                \"Created node config file at {}/config.toml\",\n                node_path.display()\n            );\n\n            Ok(())\n        }\n        Some(NodeCommands::Up(cmd)) => {\n            let cmd_cloned = cmd.clone();\n            let command = *cmd;\n\n            let name = cmd_cloned\n                .name\n                .as_ref()\n                .expect(\"No name or default was given for node\");\n\n            let config = NodeConfig::try_from(&home, name, Some(&command))?;\n\n            topos_node::start(\n                verbose,\n                no_color,\n                cmd_cloned.otlp_agent,\n                cmd_cloned.otlp_service_name,\n                cmd_cloned.no_edge_process,\n                config,\n            )\n            .await?;\n\n            Ok(())\n        }\n        Some(NodeCommands::Status(status)) => {\n            let mut node_service = NodeService::with_grpc_endpoint(&status.node_args.node);\n            let exit_code = i32::from(!(node_service.call(status).await?));\n            std::process::exit(exit_code);\n        }\n        None => Ok(()),\n    }\n}\n\nfn setup_console_tce_grpc(endpoint: &str) -> Arc<Mutex<ConsoleServiceClient<Channel>>> {\n    match Endpoint::from_shared(endpoint.to_string()) {\n        Ok(endpoint) => Arc::new(Mutex::new(ConsoleServiceClient::new(\n            endpoint.connect_lazy(),\n        ))),\n        Err(e) => {\n            error!(\"Failure to setup the gRPC API endpoint on {endpoint}: {e}\");\n            std::process::exit(1);\n        }\n    }\n}\n"
  },
  {
    "path": "crates/topos/src/components/node/services/status.rs",
    "content": "use std::{\n    future::Future,\n    io::Error,\n    pin::Pin,\n    task::{Context, Poll},\n};\n\nuse futures::FutureExt;\nuse topos_core::api::grpc::tce::v1::StatusRequest;\nuse tower::Service;\nuse tracing::{debug, error};\n\nuse crate::components::node::{commands::Status, NodeService};\n\nimpl Service<Status> for NodeService {\n    type Response = bool;\n\n    type Error = std::io::Error;\n\n    type Future =\n        Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;\n\n    fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {\n        Poll::Ready(Ok(()))\n    }\n\n    fn call(&mut self, _: Status) -> Self::Future {\n        let client = self.console_client.clone();\n\n        async move {\n            debug!(\"Sending the request to the TCE server...\");\n            match client.lock().await.status(StatusRequest {}).await {\n                Ok(status_response) => {\n                    let status = status_response.into_inner();\n                    debug!(\"Successfully fetched the status {:?} from the TCE\", status);\n                    Ok(status.has_active_sample)\n                }\n                Err(err) => {\n                    error!(\"TCE server returned an error: {:?}\", err);\n                    Err(Error::new(std::io::ErrorKind::Other, err))\n                }\n            }\n        }\n        .boxed()\n    }\n}\n"
  },
  {
    "path": "crates/topos/src/components/node/services.rs",
    "content": "pub(crate) mod status;\n"
  },
  {
    "path": "crates/topos/src/components/regtest/commands/spam.rs",
    "content": "use clap::Args;\n\n#[derive(Args, Debug)]\n#[command(\n    about = \"Run a test topos certificate spammer to send test certificates to the network, \\\n             generating randomly among the `nb_subnets` subnets the batch of `cert_per_batch` \\\n             certificates at every `batch-interval`\"\n)]\npub struct Spam {\n    /// The target node api endpoint.\n    /// Multiple nodes could be specified as comma separated list\n    /// e.g. `--target-nodes=http://[::1]:1340,http://[::1]:1341`\n    #[clap(\n        long,\n        env = \"TOPOS_NETWORK_SPAMMER_TARGET_NODES\",\n        value_delimiter = ','\n    )]\n    pub target_nodes: Option<Vec<String>>,\n    /// Path to json file with list of target nodes as alternative to `--target-nodes`\n    #[clap(long, env = \"TOPOS_NETWORK_SPAMMER_TARGET_NODES_PATH\")]\n    pub target_nodes_path: Option<String>,\n    /// Seed for generation of local private signing keys and corresponding subnet ids.\n    #[arg(\n        long,\n        env = \"TOPOS_NETWORK_SPAMMER_LOCAL_KEY_SEED\",\n        default_value = \"1\"\n    )]\n    pub local_key_seed: u64,\n    /// Certificates generated in one batch. Batch is generated every `batch-interval` milliseconds.\n    #[arg(\n        long,\n        env = \"TOPOS_NETWORK_SPAMMER_CERT_PER_BATCH\",\n        default_value = \"1\"\n    )]\n    pub cert_per_batch: u64,\n    /// Number of subnets to use for certificate generation. For every certificate subnet id will be picked randomly.\n    #[arg(\n        long,\n        env = \"TOPOS_NETWORK_SPAMMER_NUMBER_OF_SUBNETS\",\n        default_value = \"1\"\n    )]\n    pub nb_subnets: u8,\n    /// Number of batches to generate before finishing execution.\n    /// If not specified, batches will be generated indefinitely.\n    #[arg(long, env = \"TOPOS_NETWORK_SPAMMER_NUMBER_OF_BATCHES\")]\n    pub nb_batches: Option<u64>,\n    /// Time interval in milliseconds between generated batches of certificates\n    #[arg(\n        long,\n        env = \"TOPOS_NETWORK_SPAMMER_BATCH_INTERVAL\",\n        default_value = \"2000\"\n    )]\n    pub batch_interval: u64,\n    /// List of generated certificate target subnets. No target subnets by default.\n    /// For example `--target-subnets=0x3bc19e36ff1673910575b6727a974a9abd80c9a875d41ab3e2648dbfb9e4b518,0xa00d60b2b408c2a14c5d70cdd2c205db8985ef737a7e55ad20ea32cc9e7c417c`\n    #[arg(\n        long,\n        env = \"TOPOS_NETWORK_SPAMMER_TARGET_SUBNETS\",\n        value_delimiter = ','\n    )]\n    pub target_subnets: Option<Vec<String>>,\n    /// Socket of the opentelemetry agent endpoint.\n    /// If not provided open telemetry will not be used\n    #[arg(long, env = \"TOPOS_OTLP_AGENT\")]\n    pub otlp_agent: Option<String>,\n    /// Otlp service name.\n    /// If not provided open telemetry will not be used\n    #[arg(long, env = \"TOPOS_OTLP_SERVICE_NAME\")]\n    pub otlp_service_name: Option<String>,\n    /// Flag to indicate usage of Kubernetes.\n    #[arg(\n        long,\n        env = \"TOPOS_NETWORK_SPAMMER_BENCHMARK\",\n        requires = \"target_hosts\",\n        requires = \"number\"\n    )]\n    pub benchmark: bool,\n    /// Template for generating target node entrypoints.\n    /// e.g. `--hosts=\"http://validator-{N}:1340\"`\n    #[arg(\n        long,\n        env = \"TOPOS_NETWORK_SPAMMER_TARGET_HOSTS\",\n        requires = \"benchmark\"\n    )]\n    pub target_hosts: Option<String>,\n    /// Number of nodes to generate based on the DNS template.\n    #[arg(long, env = \"TOPOS_NETWORK_SPAMMER_NUMBER\", requires = \"benchmark\")]\n    pub number: Option<u32>,\n}\n\nimpl Spam {}\n"
  },
  {
    "path": "crates/topos/src/components/regtest/commands.rs",
    "content": "use clap::{Args, Subcommand};\n\nmod spam;\n\npub(crate) use spam::Spam;\n\n/// Run test commands (e.g., pushing a certificate to a TCE process)\n#[derive(Args, Debug)]\npub(crate) struct RegtestCommand {\n    #[clap(from_global)]\n    pub(crate) verbose: u8,\n\n    #[clap(subcommand)]\n    pub(crate) subcommands: Option<RegtestCommands>,\n}\n\n#[derive(Subcommand, Debug)]\npub(crate) enum RegtestCommands {\n    Spam(Box<Spam>),\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn test_run() {\n        assert!(RegtestCommands::has_subcommand(\"spam\"));\n    }\n}\n"
  },
  {
    "path": "crates/topos/src/components/regtest/mod.rs",
    "content": "use self::commands::{RegtestCommand, RegtestCommands};\n\nuse opentelemetry::global;\nuse tokio::{\n    spawn,\n    sync::{mpsc, oneshot},\n};\nuse topos_certificate_spammer::{error::Error, CertificateSpammerConfig};\nuse topos_telemetry::tracing::setup_tracing;\nuse tracing::{error, info};\n\npub(crate) mod commands;\n\npub(crate) async fn handle_command(\n    RegtestCommand {\n        verbose,\n        subcommands,\n    }: RegtestCommand,\n) -> Result<(), Box<dyn std::error::Error>> {\n    match subcommands {\n        Some(RegtestCommands::Spam(cmd)) => {\n            let config = CertificateSpammerConfig {\n                target_nodes: cmd.target_nodes,\n                target_nodes_path: cmd.target_nodes_path,\n                local_key_seed: cmd.local_key_seed,\n                cert_per_batch: cmd.cert_per_batch,\n                nb_subnets: cmd.nb_subnets,\n                nb_batches: cmd.nb_batches,\n                batch_interval: cmd.batch_interval,\n                target_subnets: cmd.target_subnets,\n                benchmark: cmd.benchmark,\n                target_hosts: cmd.target_hosts,\n                number: cmd.number,\n            };\n\n            // Setup instrumentation if both otlp agent and otlp service name\n            // are provided as arguments\n            setup_tracing(\n                verbose,\n                false,\n                cmd.otlp_agent,\n                cmd.otlp_service_name,\n                env!(\"TOPOS_VERSION\"),\n            )?;\n\n            let (shutdown_sender, shutdown_receiver) = mpsc::channel::<oneshot::Sender<()>>(1);\n            let mut runtime = spawn(topos_certificate_spammer::run(config, shutdown_receiver));\n\n            loop {\n                tokio::select! {\n                    _ = tokio::signal::ctrl_c() => {\n                        info!(\"Received ctrl_c, shutting down application...\");\n\n                        let (shutdown_finished_sender, shutdown_finished_receiver) = oneshot::channel::<()>();\n                        if let Err(e) = shutdown_sender.send(shutdown_finished_sender).await {\n                            error!(\"Error sending shutdown signal to Spammer application: {e}\");\n                        }\n                        if let Err(e) = shutdown_finished_receiver.await {\n                            error!(\"Error with shutdown receiver: {e}\");\n                        }\n                        info!(\"Shutdown procedure finished, exiting...\");\n                    }\n                    result = &mut runtime =>{\n                        global::shutdown_tracer_provider();\n\n                        if let Ok(Err(Error::BenchmarkConfig(ref msg))) = result {\n                            error!(\"Benchmark configuration error:\\n{}\", msg);\n                            std::process::exit(1);\n                        }\n\n                        if let Err(ref error) = result {\n\n                            error!(\"Unable to execute network spam command due to: {error}\");\n                            std::process::exit(1);\n                        }\n                        break;\n                    }\n                }\n            }\n\n            Ok(())\n        }\n        None => Ok(()),\n    }\n}\n"
  },
  {
    "path": "crates/topos/src/components/setup/commands/subnet.rs",
    "content": "use clap::Args;\n\nuse std::path::PathBuf;\n\n#[derive(Args, Debug)]\n#[command(about = \"Install Polygon Edge node binary\")]\npub struct Subnet {\n    /// Installation directory path for Polygon Edge binary.\n    /// If not provided, Polygon Edge binary will be installed to the current directory\n    #[clap(long, env = \"TOPOS_SETUP_POLYGON_EDGE_DIR\", default_value = \".\")]\n    pub path: PathBuf,\n    /// Polygon Edge release version. If not provided, latest release version will be installed\n    #[arg(long, env = \"TOPOS_SETUP_SUBNET_RELEASE\")]\n    pub release: Option<String>,\n    /// Polygon Edge Github repository\n    #[arg(\n        long,\n        env = \"TOPOS_SETUP_SUBNET_REPOSITORY\",\n        default_value = \"topos-protocol/polygon-edge\"\n    )]\n    pub repository: String,\n    /// List all available Polygon Edge release versions without installation\n    #[arg(long, action)]\n    pub list_releases: bool,\n}\n\nimpl Subnet {}\n"
  },
  {
    "path": "crates/topos/src/components/setup/commands.rs",
    "content": "use clap::{Args, Subcommand};\n\nmod subnet;\n\npub(crate) use subnet::Subnet;\n\n/// Topos CLI subcommand for the setup of various Topos related components (e.g., installation of Polygon Edge binary)\n#[derive(Args, Debug)]\npub(crate) struct SetupCommand {\n    #[clap(subcommand)]\n    pub(crate) subcommands: Option<SetupCommands>,\n}\n\n#[derive(Subcommand, Debug)]\npub(crate) enum SetupCommands {\n    Subnet(Box<Subnet>),\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn test_run() {\n        assert!(SetupCommands::has_subcommand(\"subnet\"));\n    }\n}\n"
  },
  {
    "path": "crates/topos/src/components/setup/mod.rs",
    "content": "use self::commands::{SetupCommand, SetupCommands};\nuse tokio::{signal, spawn};\nuse tracing::{error, info};\n\nuse topos::{install_polygon_edge, list_polygon_edge_releases};\n\npub(crate) mod commands;\n\npub(crate) async fn handle_command(\n    SetupCommand { subcommands }: SetupCommand,\n) -> Result<(), Box<dyn std::error::Error>> {\n    match subcommands {\n        Some(SetupCommands::Subnet(cmd)) => {\n            spawn(async move {\n                if cmd.list_releases {\n                    info!(\n                        \"Retrieving release version list from repository: {}\",\n                        &cmd.repository\n                    );\n                    if let Err(e) = list_polygon_edge_releases(cmd.repository).await {\n                        error!(\"Error listing Polygon Edge release versions: {e}\");\n                        std::process::exit(1);\n                    } else {\n                        std::process::exit(0);\n                    }\n                } else {\n                    info!(\n                        \"Starting installation of Polygon Edge binary to target path: {}\",\n                        &cmd.path.display()\n                    );\n                    println!(\n                        \"Starting installation of Polygon Edge binary to target path: {}\",\n                        &cmd.path.display()\n                    );\n                    if let Err(e) =\n                        install_polygon_edge(cmd.repository, cmd.release, cmd.path.as_path()).await\n                    {\n                        error!(\"Error installing Polygon Edge: {e}\");\n                        eprintln!(\"Error installing Polygon Edge: {e}\");\n                        std::process::exit(1);\n                    } else {\n                        info!(\"Polygon Edge installation successful\");\n                        println!(\"Polygon Edge installation successful\");\n                        std::process::exit(0);\n                    }\n                }\n            });\n\n            signal::ctrl_c()\n                .await\n                .expect(\"failed to listen for signals\");\n\n            Ok(())\n        }\n        None => {\n            error!(\"No subcommand provided. You can use `--help` to see available subcommands.\");\n            eprintln!(\"No subcommand provided. You can use `--help` to see available subcommands.\");\n            std::process::exit(1);\n        }\n    }\n}\n"
  },
  {
    "path": "crates/topos/src/lib.rs",
    "content": "use flate2::read::GzDecoder;\nuse serde_json::Value;\nuse std::collections::HashSet;\nuse std::fs::File;\nuse std::io::Write;\nuse std::path::Path;\nuse tar::Archive;\nuse tracing::{error, info};\n\nconst GITHUB_REPO_API: &str = \"https://api.github.com/repos/\";\n\n#[derive(Debug, thiserror::Error)]\npub enum Error {\n    #[error(\"Http client error: {0}\")]\n    Http(reqwest::Error),\n    #[error(\"Json parsing error: {0}\")]\n    InvalidJson(serde_json::Error),\n    #[error(\"There is no valid Polygon Edge release available\")]\n    NoValidRelease,\n    #[error(\"Invalid release metadata\")]\n    InvalidReleaseMetadata,\n    #[error(\"File io error: {0}\")]\n    File(std::io::Error),\n    #[error(\"Invalid private key\")]\n    InvalidPrivateKey,\n    #[error(\"Invalid Validator address\")]\n    InvalidValidatorAddress,\n}\n\nfn map_arch(arch: &str) -> &str {\n    match arch {\n        \"x86\" => \"x86\",\n        \"x86_64\" => \"amd64\",\n        \"aarch64\" => \"arm64\",\n        _ => \"unknown\",\n    }\n}\n\nfn map_os(arch: &str) -> &str {\n    match arch {\n        \"linux\" => \"linux\",\n        \"macos\" => \"darwin\",\n        \"windows\" => \"windows\",\n        _ => \"unknown\",\n    }\n}\n\n/// Calculate expected polygon edge binary name for this platform\n/// By convention it is in the format `polygon-edge-<cpu architecture>-<operating system>`\nfn determine_binary_release_name(release: &PolygonEdgeRelease) -> String {\n    \"polygon-edge\".to_string()\n        + \"_\"\n        + &release.version[1..]\n        + \"_\"\n        + map_os(std::env::consts::OS)\n        + \"_\"\n        + map_arch(std::env::consts::ARCH)\n        + \".tar.gz\"\n}\n\n/// Download Polygon Edge binary from repository to requested target directory\nasync fn download_binary(file_name: &str, uri: &str, target_directory: &Path) -> Result<(), Error> {\n    info!(\n        \"Downloading binary `{}` to target directory: {}\",\n        file_name,\n        target_directory.display()\n    );\n\n    let response = reqwest::get(uri).await.map_err(Error::Http)?;\n    let download_file_path = target_directory.join(Path::new(file_name));\n    {\n        //Download file\n        let mut target_archive_file = File::create(&download_file_path).map_err(|error| {\n            error!(\"Unable to create file: {error}\");\n            Error::File(error)\n        })?;\n\n        target_archive_file\n            .write_all(response.bytes().await.map_err(Error::Http)?.as_ref())\n            .map_err(Error::File)?;\n    }\n\n    {\n        // Decompress archive\n        let archive_file = File::open(&download_file_path).map_err(Error::File)?;\n        let mut archive = Archive::new(GzDecoder::new(archive_file));\n        archive.unpack(target_directory).map_err(Error::File)?;\n    }\n\n    // Remove downloaded archive\n    std::fs::remove_file(&download_file_path).map_err(Error::File)?;\n\n    Ok(())\n}\n\n#[derive(Debug)]\nstruct PolygonEdgeRelease {\n    version: String,\n    binary: String,\n    download_url: String,\n}\n\nasync fn get_available_releases(repository: &str) -> Result<Vec<PolygonEdgeRelease>, Error> {\n    // Retrieve list of releases\n    let uri = GITHUB_REPO_API.to_string() + repository + \"/releases\";\n\n    info!(\"Retrieving Polygon Edge release list {uri}\");\n    let client = reqwest::Client::new();\n    let body = client\n        .get(&uri)\n        .header(reqwest::header::USER_AGENT, \"Topos CLI\")\n        .send()\n        .await\n        .map_err(Error::Http)?\n        .text()\n        .await\n        .map_err(Error::Http)?;\n\n    let body: Vec<Value> = match serde_json::from_str(&body) {\n        Ok(v) => v,\n        Err(e) => {\n            error!(\"Error parsing release list response: {e}\");\n            return Err(Error::InvalidJson(e));\n        }\n    };\n\n    if body.is_empty() {\n        error!(\"There is no valid Polygon Edge release available\");\n        return Err(Error::NoValidRelease);\n    }\n\n    let mut releases: Vec<PolygonEdgeRelease> = Vec::new();\n    // Parse all releases\n    // List of retrieved releases is already sorted, latest release being\n    // the first one in the list\n    for release in &body {\n        let tag_name = release\n            .get(\"name\")\n            .ok_or(Error::InvalidReleaseMetadata)?\n            .to_string()\n            .replace('\\\"', \"\");\n\n        let assets = release\n            .get(\"assets\")\n            .ok_or(Error::InvalidReleaseMetadata)?\n            .as_array()\n            .ok_or(Error::InvalidReleaseMetadata)?;\n        for asset in assets {\n            if let Some(name) = asset.get(\"name\").map(|v| v.to_string().replace('\\\"', \"\")) {\n                if let Some(url) = asset\n                    .get(\"browser_download_url\")\n                    .map(|v| v.to_string().replace('\\\"', \"\"))\n                {\n                    releases.push(PolygonEdgeRelease {\n                        binary: name,\n                        download_url: url,\n                        version: tag_name.clone(),\n                    })\n                }\n            }\n        }\n    }\n\n    Ok(releases)\n}\n\n/// Get list of releases from github repository\n/// Download required release by version, or latest one if desired release was not provided\nasync fn get_release(\n    repository: &str,\n    version: &Option<String>,\n) -> Result<PolygonEdgeRelease, Error> {\n    let releases = get_available_releases(repository).await?;\n    for release in releases {\n        let expected_binary = determine_binary_release_name(&release);\n        if let Some(version) = version {\n            if &release.version == version && release.binary == expected_binary {\n                return Ok(release);\n            }\n        } else if release.binary == expected_binary {\n            return Ok(release);\n        }\n    }\n\n    Err(Error::NoValidRelease)\n}\n\npub async fn install_polygon_edge(\n    repository: String,\n    release: Option<String>,\n    path: &Path,\n) -> Result<(), Error> {\n    // Select release for installation\n    let release = get_release(repository.as_str(), &release).await?;\n\n    info!(\n        \"Selected release: {} from {}\",\n        release.version, release.download_url\n    );\n\n    // Download and install Polygon Edge binary\n    if let Err(e) = download_binary(&release.binary, &release.download_url, path).await {\n        error!(\"Unable to install Polygon Edge binary {e}\");\n        return Err(e);\n    }\n\n    Ok(())\n}\n\npub async fn list_polygon_edge_releases(repository: String) -> Result<(), Error> {\n    // Retrieve list of available releases from the Github repository\n    let releases = get_available_releases(&repository).await?;\n    println!(\"Available Polygon Edge releases:\");\n    releases\n        .into_iter()\n        .map(|r| r.version)\n        .collect::<HashSet<String>>()\n        .iter()\n        .for_each(|r| {\n            println!(\"   {}\", r);\n        });\n\n    Ok(())\n}\n"
  },
  {
    "path": "crates/topos/src/main.rs",
    "content": "use clap::Parser;\n\npub(crate) mod components;\npub(crate) mod options;\n\nuse crate::options::ToposCommand;\nuse tracing_log::LogTracer;\n\n#[tokio::main]\nasync fn main() -> Result<(), Box<dyn std::error::Error>> {\n    LogTracer::init()?;\n\n    let args = options::Opt::parse();\n\n    match args.commands {\n        ToposCommand::Setup(cmd) => components::setup::handle_command(cmd).await,\n        ToposCommand::Node(cmd) => components::node::handle_command(cmd).await,\n        ToposCommand::Regtest(cmd) => components::regtest::handle_command(cmd).await,\n    }\n}\n"
  },
  {
    "path": "crates/topos/src/options/input_format.rs",
    "content": "use clap::ValueEnum;\nuse serde::Serialize;\n\n#[derive(ValueEnum, Copy, Clone, Debug, Serialize)]\npub(crate) enum InputFormat {\n    Json,\n    Plain,\n}\n"
  },
  {
    "path": "crates/topos/src/options.rs",
    "content": "use clap::{Parser, Subcommand};\nuse std::{ffi::OsString, path::PathBuf};\n\nuse crate::components::node::commands::NodeCommand;\nuse crate::components::regtest::commands::RegtestCommand;\nuse crate::components::setup::commands::SetupCommand;\n\npub(crate) mod input_format;\n\n#[derive(Parser, Debug)]\n#[clap(name = \"topos\", about = \"Topos CLI\")]\npub(crate) struct Opt {\n    /// Defines the verbosity level\n    #[arg(\n        long,\n        short = 'v',\n        action = clap::ArgAction::Count,\n        global = true\n    )]\n    pub(crate) verbose: u8,\n\n    /// Disable color in logs\n    #[arg(long, global = true, env = \"TOPOS_LOG_NOCOLOR\")]\n    no_color: bool,\n\n    /// Home directory for the configuration\n    #[arg(\n        long,\n        env = \"TOPOS_HOME\",\n        default_value = get_default_home(),\n        global = true\n    )]\n    pub(crate) home: PathBuf,\n\n    #[command(subcommand)]\n    pub(crate) commands: ToposCommand,\n}\n\n/// If no path is given for the --home argument, we use the default one\n/// ~/.config/topos for a UNIX subsystem\nfn get_default_home() -> OsString {\n    let mut home = dirs::home_dir().unwrap();\n    home.push(\".config\");\n    home.push(\"topos\");\n    home.into_os_string()\n}\n\n#[derive(Subcommand, Debug)]\npub(crate) enum ToposCommand {\n    Setup(SetupCommand),\n    Node(NodeCommand),\n    Regtest(RegtestCommand),\n}\n"
  },
  {
    "path": "crates/topos/tests/cert_delivery.rs",
    "content": "use futures::{future::join_all, StreamExt};\nuse libp2p::PeerId;\nuse rand::seq::{IteratorRandom, SliceRandom};\nuse rstest::*;\nuse serial_test::serial;\nuse std::collections::{HashMap, HashSet};\nuse std::time::Duration;\nuse test_log::test;\nuse tokio::spawn;\nuse tokio::sync::mpsc;\nuse tonic::transport::Uri;\nuse topos_core::{\n    api::grpc::{\n        shared::v1::checkpoints::TargetCheckpoint,\n        tce::v1::{\n            api_service_client::ApiServiceClient,\n            console_service_client::ConsoleServiceClient,\n            watch_certificates_request::OpenStream,\n            watch_certificates_response::{CertificatePushed, Event},\n            StatusRequest, SubmitCertificateRequest,\n        },\n    },\n    uci::{Certificate, SubnetId, CERTIFICATE_ID_LENGTH, SUBNET_ID_LENGTH},\n};\nuse topos_test_sdk::{certificates::create_certificate_chains, tce::create_network};\nuse tracing::{debug, info, warn};\n\nconst NUMBER_OF_SUBNETS_PER_CLIENT: usize = 1;\n\nfn get_subset_of_subnets(subnets: &[SubnetId], subset_size: usize) -> Vec<SubnetId> {\n    let mut rng = rand::thread_rng();\n    Vec::from_iter(\n        subnets\n            .iter()\n            .cloned()\n            .choose_multiple(&mut rng, subset_size),\n    )\n}\n\n#[rstest]\n#[test(tokio::test)]\n#[timeout(Duration::from_secs(10))]\n#[serial]\nasync fn start_a_cluster() {\n    let mut peers_context = create_network(5, &[]).await;\n\n    let mut status: Vec<bool> = Vec::new();\n\n    for (_peer_id, client) in peers_context.iter_mut() {\n        let response = client\n            .console_grpc_client\n            .status(StatusRequest {})\n            .await\n            .expect(\"Can't get status\");\n\n        status.push(response.into_inner().has_active_sample);\n    }\n\n    assert!(status.iter().all(|s| *s));\n}\n\n#[rstest]\n#[tokio::test]\n#[timeout(Duration::from_secs(30))]\n#[serial]\n// FIXME: This test is flaky, it fails sometimes because of gRPC connection error (StreamClosed)\nasync fn cert_delivery() {\n    let subscriber = tracing_subscriber::FmtSubscriber::builder()\n        .with_env_filter(tracing_subscriber::EnvFilter::from_default_env())\n        .with_test_writer()\n        .finish();\n    let _ = tracing::subscriber::set_global_default(subscriber);\n\n    let peer_number = 5;\n    let number_of_certificates_per_subnet = 2;\n    let number_of_subnets = 3;\n\n    let all_subnets: Vec<SubnetId> = (1..=number_of_subnets)\n        .map(|v| [v as u8; SUBNET_ID_LENGTH].into())\n        .collect();\n\n    // Generate certificates with respect to parameters\n    let mut subnet_certificates =\n        create_certificate_chains(all_subnets.as_ref(), number_of_certificates_per_subnet)\n            .into_iter()\n            .map(|(s, v)| (s, v.into_iter().map(|v| v.certificate).collect::<Vec<_>>()))\n            .collect::<HashMap<_, _>>();\n\n    debug!(\n        \"Generated certificates for distribution per subnet: {:#?}\",\n        &subnet_certificates\n    );\n\n    // Calculate expected final set of delivered certificates (every subnet  should receive certificates that has cross\n    // chain transaction targeting it)\n    let mut expected_certificates: HashMap<SubnetId, HashSet<Certificate>> = HashMap::new();\n    for certificates in subnet_certificates.values() {\n        for cert in certificates {\n            for target_subnet in &cert.target_subnets {\n                expected_certificates\n                    .entry(*target_subnet)\n                    .or_default()\n                    .insert(cert.clone());\n            }\n        }\n    }\n\n    warn!(\"Starting the cluster...\");\n    // List of peers (tce nodes) with their context\n    let mut peers_context = create_network(peer_number, &[]).await;\n\n    warn!(\"Cluster started, starting clients...\");\n    // Connected tce clients are passing received certificates to this mpsc::Receiver, collect all of them\n    let mut clients_delivered_certificates: Vec<mpsc::Receiver<(PeerId, SubnetId, Certificate)>> =\n        Vec::new(); // (Peer Id, Subnet Id, Certificate)\n    let mut client_tasks: Vec<tokio::task::JoinHandle<()>> = Vec::new(); // Clients connected to TCE API Service run in async tasks\n\n    let mut assign_at_least_one_client_to_every_subnet = all_subnets.clone();\n    for (peer_id, ctx) in peers_context.iter_mut() {\n        let peer_id = *peer_id;\n        // Make sure that every subnet is represented (connected through client) to at least 1 peer\n        // After that assign subnets randomly to clients, 1 subnet per connection to TCE\n        // as it is assumed that NUMBER_OF_SUBNETS_PER_CLIENT is 1 - that is also realistic case, topos node representing one subnet\n        let client_subnet_id: SubnetId = if assign_at_least_one_client_to_every_subnet.is_empty() {\n            get_subset_of_subnets(&all_subnets, NUMBER_OF_SUBNETS_PER_CLIENT).remove(0)\n        } else {\n            assign_at_least_one_client_to_every_subnet.pop().unwrap()\n        };\n\n        // Number of subnets one client is representing, normally 1\n        ctx.connected_subnets = Some(vec![client_subnet_id]);\n        debug!(\n            \"Opening client for peer id: {} with subnet_ids: {:?}\",\n            &peer_id, &client_subnet_id,\n        );\n\n        // (Peer id, Subnet Id, Certificate)\n        let (tx, rx) = mpsc::channel::<(PeerId, SubnetId, Certificate)>(\n            number_of_certificates_per_subnet * number_of_subnets,\n        );\n        clients_delivered_certificates.push(rx);\n\n        let in_stream_subnet_id = client_subnet_id;\n        let in_stream = async_stream::stream! {\n            yield OpenStream {\n                target_checkpoint: Some(TargetCheckpoint{\n                    target_subnet_ids: vec![in_stream_subnet_id.into()],\n                    positions: Vec::new()\n                }),\n                source_checkpoint: None\n            }.into();\n        };\n\n        // Number of certificates expected to receive for every subnet,\n        // to know when to close the TCE clients (and finish test)\n        let mut incoming_certificates_number =\n            expected_certificates.get(&client_subnet_id).unwrap().len();\n        // Open client connection to TCE service in separate async tasks\n        let mut client = ctx.api_grpc_client.clone();\n        let expected_certificate_debug: Vec<_> = expected_certificates\n            .get(&client_subnet_id)\n            .unwrap()\n            .iter()\n            .map(|c| c.id)\n            .collect();\n\n        let response = client.watch_certificates(in_stream).await.unwrap();\n\n        let client_task = spawn(async move {\n            debug!(\n                \"Spawning client task for peer: {} waiting for {} certificates: {:?}\",\n                peer_id, incoming_certificates_number, expected_certificate_debug\n            );\n\n            let mut resp_stream = response.into_inner();\n            while let Some(received) = resp_stream.next().await {\n                let received = received.unwrap();\n\n                if let Some(Event::CertificatePushed(CertificatePushed {\n                    certificate: Some(certificate),\n                    ..\n                })) = received.event\n                {\n                    debug!(\n                        \"Client peer_id: {} certificate id: {} delivered to subnet id {}, \",\n                        &peer_id,\n                        certificate.id.clone().unwrap(),\n                        &client_subnet_id\n                    );\n                    tx.send((peer_id, client_subnet_id, certificate.try_into().unwrap()))\n                        .await\n                        .unwrap();\n                    incoming_certificates_number -= 1;\n                    if incoming_certificates_number == 0 {\n                        // We have received all expected certificates for this subnet, end client\n                        break;\n                    }\n                }\n            }\n            debug!(\"Finishing client for peer_id: {}\", &peer_id);\n        });\n        client_tasks.push(client_task);\n    }\n\n    info!(\n        \"Waiting for expected delivered certificates {:?}\",\n        expected_certificates\n    );\n    // Delivery tasks collect certificates that clients of every TCE node\n    // are receiving to reduce them to one channel (delivery_rx)\n    let mut delivery_tasks = Vec::new();\n    // delivery_tx/delivery_rx Pass certificates from delivery tasks of every client to final collection of delivered certificates\n    let (delivery_tx, mut delivery_rx) = mpsc::channel::<(PeerId, SubnetId, Certificate)>(\n        peer_number * number_of_certificates_per_subnet * number_of_subnets,\n    );\n    for (index, mut client_delivered_certificates) in\n        clients_delivered_certificates.into_iter().enumerate()\n    {\n        let delivery_tx = delivery_tx.clone();\n        let delivery_task = tokio::spawn(async move {\n            // Read certificates that every client has received\n            info!(\"Delivery task for receiver {}\", index);\n            loop {\n                let x = client_delivered_certificates.recv().await;\n\n                match x {\n                    Some((peer_id, target_subnet_id, cert)) => {\n                        info!(\n                            \"Delivered certificate on peer_Id: {} cert id: {} from source subnet \\\n                             id: {} to target subnet id {}\",\n                            &peer_id, cert.id, cert.source_subnet_id, target_subnet_id\n                        );\n                        // Send certificates from every peer to one delivery_rx receiver\n                        delivery_tx\n                            .send((peer_id, target_subnet_id, cert))\n                            .await\n                            .unwrap();\n                    }\n                    _ => break,\n                }\n            }\n            // We will end this loop when sending TCE client has dropped channel sender and there\n            // are not certificates in channel\n            info!(\"End delivery task for receiver {}\", index);\n        });\n        delivery_tasks.push(delivery_task);\n    }\n    drop(delivery_tx);\n\n    // Broadcast multiple certificates from all subnets\n    info!(\"Broadcasting certificates...\");\n    for (peer_id, client) in peers_context.iter_mut() {\n        // If there exist of connected subnets to particular TCE\n        if let Some(ref connected_subnets) = client.connected_subnets {\n            // Iterate all subnets connected to TCE (normally 1)\n            for subnet_id in connected_subnets {\n                if let Some(certificates) = subnet_certificates.get_mut(subnet_id) {\n                    // Iterate all certificates meant to be sent to the particular network\n                    for cert in certificates.iter() {\n                        info!(\n                            \"Sending certificate id={} from subnet id: {} to peer id: {}\",\n                            &cert.id, &subnet_id, &peer_id\n                        );\n                        let _ = client\n                            .api_grpc_client\n                            .submit_certificate(SubmitCertificateRequest {\n                                certificate: Some(cert.clone().into()),\n                            })\n                            .await\n                            .expect(\"Can't send certificate\");\n                    }\n                    // Remove sent certificate, every certificate is sent only once to TCE network\n                    certificates.clear();\n                }\n            }\n        }\n    }\n    let assertion = async move {\n        info!(\"Waiting for all delivery tasks\");\n        join_all(delivery_tasks).await;\n        info!(\"All expected clients delivered\");\n        let mut delivered_certificates: HashMap<PeerId, HashMap<SubnetId, HashSet<Certificate>>> =\n            HashMap::new();\n        // Collect all certificates per peer_id and subnet_id\n        while let Some((peer_id, receiving_subnet_id, cert)) = delivery_rx.recv().await {\n            debug!(\"Counting delivered certificate cert id: {:?}\", cert.id);\n            delivered_certificates\n                .entry(peer_id)\n                .or_default()\n                .entry(receiving_subnet_id)\n                .or_default()\n                .insert(cert);\n        }\n        info!(\"All incoming certificates received\");\n        // Check received certificates for every peer and every subnet\n        for delivered_certificates_per_peer in delivered_certificates.values() {\n            for (subnet_id, delivered_certificates_per_subnet) in delivered_certificates_per_peer {\n                assert_eq!(\n                    expected_certificates.get(subnet_id).unwrap().len(),\n                    delivered_certificates_per_subnet.len()\n                );\n                assert_eq!(\n                    expected_certificates.get(subnet_id).unwrap(),\n                    delivered_certificates_per_subnet\n                );\n            }\n        }\n    };\n\n    // Set big timeout to prevent flaky fails. Instead fail/panic early in the test to indicate actual error\n    if tokio::time::timeout(std::time::Duration::from_secs(30), assertion)\n        .await\n        .is_err()\n    {\n        panic!(\"Timeout waiting for command\");\n    }\n}\n\n// Picks a random peer and sends it a certificate. All other peers listen for broadcast certs.\n// Three possible outcomes:\n// 1. No errors, returns Ok\n// 2. There were errors, returns a list of all errors encountered\n// 3. timeout\nasync fn assert_certificate_full_delivery(\n    timeout_broadcast: Duration,\n    peers: Vec<Uri>,\n) -> Result<(), Box<dyn std::error::Error>> {\n    use std::io::{Error, ErrorKind};\n    let random_peer: Uri = peers\n        .choose(&mut rand::thread_rng())\n        .ok_or_else(|| {\n            Error::new(\n                ErrorKind::Other,\n                \"Unable to select a random peer from the list: {peers:?}\",\n            )\n        })?\n        .try_into()?;\n\n    let pushed_certificate = Certificate::new_with_default_fields(\n        [0u8; CERTIFICATE_ID_LENGTH],\n        [1u8; SUBNET_ID_LENGTH].into(),\n        &[[2u8; SUBNET_ID_LENGTH].into()],\n    )?;\n    let certificate_id = pushed_certificate.id;\n\n    let mut join_handlers = Vec::new();\n\n    // check that all nodes delivered the certificate\n    for peer in peers {\n        join_handlers.push(tokio::spawn(async move {\n            let peer_string = peer.clone();\n            let mut client = ConsoleServiceClient::connect(peer_string.clone())\n                .await\n                .map_err(|_| (peer_string.clone(), \"Unable to connect to the api console\"))?;\n\n            let result = client.status(StatusRequest {}).await.map_err(|_| {\n                (\n                    peer_string.clone(),\n                    \"Unable to get the status from the api console\",\n                )\n            })?;\n\n            let status = result.into_inner();\n            if !status.has_active_sample {\n                return Err((peer_string, \"failed to find active sample\"));\n            }\n\n            let mut client = ApiServiceClient::connect(peer_string.clone())\n                .await\n                .map_err(|_| (peer_string.clone(), \"Unable to connect to the TCE api\"))?;\n\n            let in_stream = async_stream::stream! {\n                yield OpenStream {\n                    target_checkpoint: Some(TargetCheckpoint {\n                        target_subnet_ids: vec![[2u8; SUBNET_ID_LENGTH].into()],\n                        positions: vec![]\n                    }),\n                    source_checkpoint: None\n                }.into()\n            };\n\n            let response = client.watch_certificates(in_stream).await.map_err(|_| {\n                (\n                    peer_string.clone(),\n                    \"Unable to execute the watch_certificates on TCE api\",\n                )\n            })?;\n            let mut resp_stream = response.into_inner();\n            async move {\n                while let Some(received) = resp_stream.next().await {\n                    let received = received.unwrap();\n                    if let Some(Event::CertificatePushed(CertificatePushed {\n                        certificate: Some(certificate),\n                        ..\n                    })) = received.event\n                    {\n                        // unwrap is safe because we are sure that the certificate is present\n                        if certificate_id == certificate.id.unwrap() {\n                            debug!(\"Received the certificate on {}\", peer_string);\n                            return Ok(());\n                        }\n                    }\n                }\n\n                Err((peer_string.clone(), \"didn't receive any certificate\"))\n            }\n            .await\n        }));\n    }\n\n    let mut client = ApiServiceClient::connect(random_peer.clone()).await?;\n\n    // submit a certificate to one node\n    _ = client\n        .submit_certificate(SubmitCertificateRequest {\n            certificate: Some(pushed_certificate.into()),\n        })\n        .await?;\n\n    tokio::time::sleep(timeout_broadcast).await;\n\n    join_all(join_handlers)\n        .await\n        .iter()\n        .for_each(|result| match result {\n            Err(e) => {\n                panic!(\"Join error: {e}\");\n            }\n            Ok(Err((peer, error))) => {\n                panic!(\"Peer {peer} error: {error}\");\n            }\n            _ => {}\n        });\n    Ok(())\n}\n\nasync fn run_assert_certificate_full_delivery(\n    number_of_nodes: usize,\n    timeout_broadcast: Duration,\n) -> Result<(), Box<dyn std::error::Error>> {\n    let mut peers_context = create_network(number_of_nodes, &[]).await;\n\n    for (_peer_id, client) in peers_context.iter_mut() {\n        let response = client\n            .console_grpc_client\n            .status(StatusRequest {})\n            .await\n            .expect(\"Can't get status\");\n\n        assert!(response.into_inner().has_active_sample);\n    }\n\n    let nodes = peers_context\n        .iter()\n        .map(|peer| peer.1.api_entrypoint.clone())\n        .collect::<Vec<_>>();\n\n    debug!(\"Nodes used in test: {:?}\", nodes);\n\n    let assertion = async move {\n        let peers: Vec<tonic::transport::Uri> = nodes\n            .into_iter()\n            .map(TryInto::try_into)\n            .collect::<Result<_, _>>()\n            .map_err(|e| format!(\"Unable to parse node list: {e}\"))\n            .expect(\"Valid node list\");\n\n        match assert_certificate_full_delivery(timeout_broadcast, peers).await {\n            Ok(()) => {\n                info!(\"Check certificate delivery passed for network of {number_of_nodes}!\");\n            }\n            Err(e) => {\n                panic!(\"Test error: {e}\");\n            }\n        }\n    };\n\n    assertion.await;\n    Ok(())\n}\n\nmod serial_integration {\n    use super::*;\n\n    #[rstest]\n    #[case(5usize)]\n    #[case(9usize)]\n    #[test_log::test(tokio::test)]\n    #[trace]\n    #[timeout(Duration::from_secs(30))]\n    async fn push_and_deliver_cert(\n        #[case] number_of_nodes: usize,\n    ) -> Result<(), Box<dyn std::error::Error>> {\n        run_assert_certificate_full_delivery(number_of_nodes, Duration::from_secs(10)).await\n    }\n}\n"
  },
  {
    "path": "crates/topos/tests/config.rs",
    "content": "use assert_cmd::prelude::*;\nuse regex::Regex;\nuse std::path::PathBuf;\nuse std::process::{Command, Stdio};\nuse tempfile::tempdir;\nuse tokio::fs::OpenOptions;\nuse tokio::io::{AsyncReadExt, AsyncSeekExt, AsyncWriteExt};\nuse toml::map::Map;\nuse toml::Value;\nuse topos_test_sdk::create_folder;\n\nuse crate::utils::setup_polygon_edge;\n\nmod utils;\n\nmod serial_integration {\n    use rstest::rstest;\n    use sysinfo::{Pid, PidExt, ProcessExt, Signal, System, SystemExt};\n\n    use super::*;\n\n    #[rstest]\n    #[tokio::test]\n    async fn handle_command_init(\n        #[from(create_folder)] home: PathBuf,\n    ) -> Result<(), Box<dyn std::error::Error>> {\n        let path = setup_polygon_edge(home.to_str().unwrap()).await;\n\n        let mut cmd = Command::cargo_bin(\"topos\")?;\n        cmd.arg(\"node\")\n            .arg(\"--edge-path\")\n            .arg(path)\n            .arg(\"init\")\n            .arg(\"--home\")\n            .arg(home.to_str().unwrap());\n\n        let output = cmd.assert().success();\n        let result: &str = std::str::from_utf8(&output.get_output().stdout)?;\n\n        assert!(result.contains(\"Created node config file\"));\n\n        // Verification: check that the config file was created\n        let config_path = home.join(\"node\").join(\"default\").join(\"config.toml\");\n        assert!(config_path.exists());\n\n        // Further verification might include checking the contents of the config file\n        let config_contents = std::fs::read_to_string(&config_path).unwrap();\n\n        assert!(config_contents.contains(\"[base]\"));\n        assert!(config_contents.contains(\"name = \\\"default\\\"\"));\n        assert!(config_contents.contains(\"[edge]\"));\n        assert!(config_contents.contains(\"[tce]\"));\n\n        Ok(())\n    }\n\n    #[tokio::test]\n    async fn handle_command_init_without_polygon_edge() -> Result<(), Box<dyn std::error::Error>> {\n        let tmp_home_dir = tempdir()?;\n\n        let mut cmd = Command::cargo_bin(\"topos\")?;\n        cmd.arg(\"node\")\n            .arg(\"init\")\n            .arg(\"--home\")\n            .arg(tmp_home_dir.path().to_str().unwrap())\n            .arg(\"--no-edge-process\");\n\n        let output = cmd.assert().success();\n        let result: &str = std::str::from_utf8(&output.get_output().stdout)?;\n\n        assert!(result.contains(\"Created node config file\"));\n\n        let home = PathBuf::from(tmp_home_dir.path());\n\n        // Verification: check that the config file was created\n        let config_path = home.join(\"node\").join(\"default\").join(\"config.toml\");\n        assert!(config_path.exists());\n\n        // Further verification might include checking the contents of the config file\n        let config_contents = std::fs::read_to_string(&config_path).unwrap();\n\n        assert!(config_contents.contains(\"[base]\"));\n        assert!(config_contents.contains(\"name = \\\"default\\\"\"));\n        assert!(config_contents.contains(\"[tce]\"));\n\n        Ok(())\n    }\n\n    #[test]\n    fn nothing_written_if_failure() -> Result<(), Box<dyn std::error::Error>> {\n        let tmp_home_dir = tempdir()?;\n\n        let mut cmd = Command::cargo_bin(\"topos\")?;\n        cmd.arg(\"node\")\n            .arg(\"--edge-path\")\n            .arg(\"./inexistent/folder/\") // Command will fail\n            .arg(\"init\")\n            .arg(\"--home\")\n            .arg(tmp_home_dir.path().to_str().unwrap());\n\n        // Should fail\n        cmd.assert().failure();\n\n        let home = PathBuf::from(tmp_home_dir.path().to_str().unwrap());\n\n        // Check that files were NOT created\n        let config_path = home.join(\"node\").join(\"default\");\n        assert!(!config_path.exists());\n\n        Ok(())\n    }\n\n    #[tokio::test]\n    async fn handle_command_init_with_custom_name() -> Result<(), Box<dyn std::error::Error>> {\n        let tmp_home_dir = tempdir()?;\n        let node_name = \"TEST_NODE\";\n        let path = setup_polygon_edge(tmp_home_dir.path().to_str().unwrap()).await;\n\n        let mut cmd = Command::cargo_bin(\"topos\")?;\n        cmd.arg(\"node\")\n            .arg(\"--edge-path\")\n            .arg(path.clone())\n            .arg(\"init\")\n            .arg(\"--home\")\n            .arg(tmp_home_dir.path().to_str().unwrap())\n            .arg(\"--name\")\n            .arg(node_name);\n\n        let output = cmd.assert().success();\n        let result: &str = std::str::from_utf8(&output.get_output().stdout)?;\n\n        assert!(result.contains(\"Created node config file\"));\n\n        let home = PathBuf::from(path);\n\n        // Verification: check that the config file was created\n        let config_path = home.join(\"node\").join(node_name).join(\"config.toml\");\n        assert!(config_path.exists());\n\n        // Further verification might include checking the contents of the config file\n        let config_contents = std::fs::read_to_string(&config_path).unwrap();\n        assert!(config_contents.contains(\"[base]\"));\n        assert!(config_contents.contains(node_name));\n        assert!(config_contents.contains(\"[tce]\"));\n\n        Ok(())\n    }\n\n    /// Test node init env arguments\n    #[rstest]\n    #[tokio::test]\n    async fn command_init_precedence_env(\n        create_folder: PathBuf,\n    ) -> Result<(), Box<dyn std::error::Error>> {\n        let tmp_home_directory = create_folder;\n        // Test node init with env variables\n        let node_init_home_env = tmp_home_directory\n            .to_str()\n            .expect(\"path names are valid utf-8\");\n        let node_edge_path_env = setup_polygon_edge(node_init_home_env).await;\n        let node_init_name_env = \"TEST_NODE_ENV\";\n        let node_init_role_env = \"full-node\";\n        let node_init_subnet_env = \"topos-env\";\n\n        let mut cmd = Command::cargo_bin(\"topos\")?;\n        cmd.arg(\"node\")\n            .env(\"TOPOS_POLYGON_EDGE_BIN_PATH\", &node_edge_path_env)\n            .env(\"TOPOS_HOME\", node_init_home_env)\n            .env(\"TOPOS_NODE_NAME\", node_init_name_env)\n            .env(\"TOPOS_NODE_ROLE\", node_init_role_env)\n            .env(\"TOPOS_NODE_SUBNET\", node_init_subnet_env)\n            .arg(\"init\");\n\n        let output = cmd.assert().success();\n        let result: &str = std::str::from_utf8(&output.get_output().stdout)?;\n\n        // Test node init with cli flags\n        assert!(result.contains(\"Created node config file\"));\n\n        let home = PathBuf::from(node_init_home_env);\n        // Verification: check that the config file was created\n        let config_path = home\n            .join(\"node\")\n            .join(node_init_name_env)\n            .join(\"config.toml\");\n        assert!(config_path.exists());\n        // Check if config file params are according to env params\n        let config_contents = std::fs::read_to_string(&config_path).unwrap();\n        assert!(config_contents.contains(\"name = \\\"TEST_NODE_ENV\\\"\"));\n        assert!(config_contents.contains(\"role = \\\"fullnode\\\"\"));\n        assert!(config_contents.contains(\"subnet = \\\"topos-env\\\"\"));\n\n        Ok(())\n    }\n\n    /// Test node cli arguments precedence over env arguments\n    #[tokio::test]\n    async fn command_init_precedence_cli_env() -> Result<(), Box<dyn std::error::Error>> {\n        let tmp_home_dir_env = create_folder(\"command_init_precedence_cli_env\");\n        let tmp_home_dir_cli = create_folder(\"command_init_precedence_cli_env\");\n\n        // Test node init with both cli and env flags\n        // Cli arguments should take precedence over env variables\n        let node_init_home_env = tmp_home_dir_env.to_str().unwrap();\n        let node_edge_path_env = setup_polygon_edge(node_init_home_env).await;\n        let node_init_name_env = \"TEST_NODE_ENV\";\n        let node_init_role_env = \"full-node\";\n        let node_init_subnet_env = \"topos-env\";\n        let node_init_home_cli = tmp_home_dir_cli.to_str().unwrap();\n        let node_edge_path_cli = node_edge_path_env.clone();\n        let node_init_name_cli = \"TEST_NODE_CLI\";\n        let node_init_role_cli = \"sequencer\";\n        let node_init_subnet_cli = \"topos-cli\";\n\n        let mut cmd = Command::cargo_bin(\"topos\")?;\n        cmd.arg(\"node\")\n            .env(\"TOPOS_POLYGON_EDGE_BIN_PATH\", &node_edge_path_env)\n            .env(\"TOPOS_HOME\", node_init_home_env)\n            .env(\"TOPOS_NODE_NAME\", node_init_name_env)\n            .env(\"TOPOS_NODE_ROLE\", node_init_role_env)\n            .env(\"TOPOS_NODE_SUBNET\", node_init_subnet_env)\n            .arg(\"--edge-path\")\n            .arg(node_edge_path_cli)\n            .arg(\"init\")\n            .arg(\"--name\")\n            .arg(node_init_name_cli)\n            .arg(\"--home\")\n            .arg(node_init_home_cli)\n            .arg(\"--role\")\n            .arg(node_init_role_cli)\n            .arg(\"--subnet\")\n            .arg(node_init_subnet_cli);\n\n        let output = cmd.assert().success();\n        let result: &str = std::str::from_utf8(&output.get_output().stdout)?;\n\n        assert!(result.contains(\"Created node config file\"));\n        let home = PathBuf::from(node_init_home_cli);\n        // Verification: check that the config file was created\n        let config_path = home\n            .join(\"node\")\n            .join(node_init_name_cli)\n            .join(\"config.toml\");\n        assert!(config_path.exists());\n        // Check if config file params are according to cli params\n        let config_contents = std::fs::read_to_string(&config_path).unwrap();\n        assert!(config_contents.contains(\"name = \\\"TEST_NODE_CLI\\\"\"));\n        assert!(config_contents.contains(\"role = \\\"sequencer\\\"\"));\n        assert!(config_contents.contains(\"subnet = \\\"topos-cli\\\"\"));\n\n        Ok(())\n    }\n    /// Test node up running from config file\n    #[rstest]\n    #[test_log::test(tokio::test)]\n    async fn command_node_up(\n        #[from(create_folder)] tmp_home_dir: PathBuf,\n    ) -> Result<(), Box<dyn std::error::Error>> {\n        // Create config file\n        let node_up_home_env = tmp_home_dir.to_str().unwrap();\n        let node_edge_path_env = setup_polygon_edge(node_up_home_env).await;\n        let node_up_name_env = \"TEST_NODE_UP\";\n        let node_up_role_env = \"full-node\";\n        let node_up_subnet_env = \"topos\";\n\n        let mut cmd = Command::cargo_bin(\"topos\")?;\n        cmd.arg(\"node\")\n            .env(\"TOPOS_POLYGON_EDGE_BIN_PATH\", &node_edge_path_env)\n            .env(\"TOPOS_HOME\", node_up_home_env)\n            .env(\"TOPOS_NODE_NAME\", node_up_name_env)\n            .env(\"TOPOS_NODE_ROLE\", node_up_role_env)\n            .env(\"TOPOS_NODE_SUBNET\", node_up_subnet_env)\n            .arg(\"init\");\n\n        let output = cmd.assert().success();\n        let result: &str = std::str::from_utf8(&output.get_output().stdout)?;\n        assert!(result.contains(\"Created node config file\"));\n\n        // Run node init with cli flags\n        let home = PathBuf::from(node_up_home_env);\n        // Verification: check that the config file was created\n        let config_path = home.join(\"node\").join(node_up_name_env).join(\"config.toml\");\n        assert!(config_path.exists());\n\n        // Generate polygon edge genesis file\n        let polygon_edge_bin = format!(\"{}/polygon-edge\", node_edge_path_env);\n        utils::generate_polygon_edge_genesis_file(\n            &polygon_edge_bin,\n            node_up_home_env,\n            node_up_name_env,\n            node_up_subnet_env,\n        )\n        .await?;\n        let polygon_edge_genesis_path = home\n            .join(\"subnet\")\n            .join(node_up_subnet_env)\n            .join(\"genesis.json\");\n        assert!(polygon_edge_genesis_path.exists());\n\n        let mut cmd = Command::cargo_bin(\"topos\")?;\n        cmd.arg(\"node\")\n            .env(\"TOPOS_POLYGON_EDGE_BIN_PATH\", &node_edge_path_env)\n            .env(\"TOPOS_HOME\", node_up_home_env)\n            .env(\"TOPOS_NODE_NAME\", node_up_name_env)\n            .env(\"RUST_LOG\", \"topos=debug\")\n            .arg(\"up\")\n            .stdout(Stdio::piped());\n\n        let cmd = tokio::process::Command::from(cmd).spawn().unwrap();\n        let pid = cmd.id().unwrap();\n        let _ = tokio::time::sleep(std::time::Duration::from_secs(10)).await;\n\n        let s = System::new_all();\n        if let Some(process) = s.process(Pid::from_u32(pid)) {\n            if process.kill_with(Signal::Term).is_none() {\n                eprintln!(\"This signal isn't supported on this platform\");\n            }\n        }\n\n        if let Ok(output) = cmd.wait_with_output().await {\n            assert!(output.status.success());\n            let stdout = output.stdout;\n            let stdout = String::from_utf8_lossy(&stdout);\n\n            let reg =\n                Regex::new(r#\"Local node is listening on \"\\/ip4\\/.*\\/tcp\\/9090\\/p2p\\/\"#).unwrap();\n            assert!(reg.is_match(&stdout));\n        } else {\n            panic!(\"Failed to shutdown gracefully\");\n        }\n        // Cleanup\n        std::fs::remove_dir_all(node_up_home_env)?;\n\n        Ok(())\n    }\n\n    /// Test node up running from config file\n    #[rstest::rstest]\n    #[test_log::test(tokio::test)]\n    async fn command_node_up_with_old_config(\n        #[from(create_folder)] tmp_home_dir: PathBuf,\n    ) -> Result<(), Box<dyn std::error::Error>> {\n        // Create config file\n        let node_up_home_env = tmp_home_dir.to_str().unwrap();\n        let node_edge_path_env = setup_polygon_edge(node_up_home_env).await;\n        let node_up_name_env = \"test_node_up_old_config\";\n        let node_up_subnet_env = \"topos\";\n\n        let mut cmd = Command::cargo_bin(\"topos\")?;\n        cmd.arg(\"node\")\n            .env(\"TOPOS_POLYGON_EDGE_BIN_PATH\", &node_edge_path_env)\n            .env(\"TOPOS_HOME\", node_up_home_env)\n            .env(\"TOPOS_NODE_NAME\", node_up_name_env)\n            .env(\"TOPOS_NODE_SUBNET\", node_up_subnet_env)\n            .arg(\"init\");\n\n        let output = cmd.assert().success();\n        let result: &str = std::str::from_utf8(&output.get_output().stdout)?;\n        assert!(result.contains(\"Created node config file\"));\n\n        // Run node init with cli flags\n        let home = PathBuf::from(node_up_home_env);\n        // Verification: check that the config file was created\n        let config_path = home.join(\"node\").join(node_up_name_env).join(\"config.toml\");\n        assert!(config_path.exists());\n\n        let mut file = OpenOptions::new()\n            .read(true)\n            .write(true)\n            .open(config_path.clone())\n            .await?;\n\n        let mut buf = String::new();\n        let _ = file.read_to_string(&mut buf).await?;\n\n        let mut current: Map<String, Value> = toml::from_str(&buf)?;\n        let tce = current.get_mut(\"tce\").unwrap();\n\n        if let Value::Table(tce_table) = tce {\n            tce_table.insert(\n                \"libp2p-api-addr\".to_string(),\n                Value::String(\"0.0.0.0:9091\".to_string()),\n            );\n            tce_table.insert(\"minimum-tce-cluster-size\".to_string(), Value::Integer(0));\n            tce_table.insert(\"network-bootstrap-timeout\".to_string(), Value::Integer(5));\n            tce_table.remove(\"p2p\");\n        } else {\n            panic!(\"TCE configuration table malformed\");\n        }\n\n        let _ = file.set_len(0).await;\n        let _ = file.seek(std::io::SeekFrom::Start(0)).await;\n        let _ = file.write_all(toml::to_string(&current)?.as_bytes()).await;\n\n        drop(file);\n\n        // Generate polygon edge genesis file\n        let polygon_edge_bin = format!(\"{}/polygon-edge\", node_edge_path_env);\n        utils::generate_polygon_edge_genesis_file(\n            &polygon_edge_bin,\n            node_up_home_env,\n            node_up_name_env,\n            node_up_subnet_env,\n        )\n        .await?;\n        let polygon_edge_genesis_path = home\n            .join(\"subnet\")\n            .join(node_up_subnet_env)\n            .join(\"genesis.json\");\n        assert!(polygon_edge_genesis_path.exists());\n\n        let mut cmd = Command::cargo_bin(\"topos\")?;\n        cmd.arg(\"node\")\n            .env(\"TOPOS_POLYGON_EDGE_BIN_PATH\", &node_edge_path_env)\n            .env(\"TOPOS_NODE_NAME\", node_up_name_env)\n            .env(\"TOPOS_HOME\", node_up_home_env)\n            .env(\"RUST_LOG\", \"topos=info\")\n            .arg(\"up\")\n            .stdout(Stdio::piped());\n\n        let cmd = tokio::process::Command::from(cmd).spawn().unwrap();\n        let pid = cmd.id().unwrap();\n        let _ = tokio::time::sleep(std::time::Duration::from_secs(10)).await;\n\n        let s = System::new_all();\n        if let Some(process) = s.process(Pid::from_u32(pid)) {\n            if process.kill_with(Signal::Term).is_none() {\n                eprintln!(\"This signal isn't supported on this platform\");\n            }\n        }\n\n        if let Ok(output) = cmd.wait_with_output().await {\n            assert!(output.status.success());\n            let stdout = output.stdout;\n            let stdout = String::from_utf8_lossy(&stdout);\n\n            let reg =\n                Regex::new(r#\"Local node is listening on \"\\/ip4\\/.*\\/tcp\\/9091\\/p2p\\/\"#).unwrap();\n            assert!(reg.is_match(&stdout));\n        } else {\n            panic!(\"Failed to shutdown gracefully\");\n        }\n        // Cleanup\n        std::fs::remove_dir_all(node_up_home_env)?;\n\n        Ok(())\n    }\n}\n"
  },
  {
    "path": "crates/topos/tests/node.rs",
    "content": "mod utils;\n\nuse std::{path::PathBuf, process::Command};\n\nuse assert_cmd::prelude::*;\nuse sysinfo::{Pid, PidExt, ProcessExt, Signal, System, SystemExt};\nuse tempfile::tempdir;\n\nuse crate::utils::generate_polygon_edge_genesis_file;\n\n#[test]\nfn help_display() -> Result<(), Box<dyn std::error::Error>> {\n    let mut cmd = Command::cargo_bin(\"topos\")?;\n    cmd.arg(\"node\").arg(\"-h\");\n\n    let output = cmd.assert().success();\n\n    let result: &str = std::str::from_utf8(&output.get_output().stdout)?;\n\n    insta::assert_snapshot!(utils::sanitize_config_folder_path(result));\n\n    Ok(())\n}\n\nmod serial_integration {\n    use super::*;\n    /// Test node up running from config file\n    #[test_log::test(tokio::test)]\n    async fn command_node_up_sigterm() -> Result<(), Box<dyn std::error::Error>> {\n        let tmp_home_dir = tempdir()?;\n\n        // Create config file\n        let node_up_home_env = tmp_home_dir.path().to_str().unwrap();\n        let node_edge_path_env = utils::setup_polygon_edge(node_up_home_env).await;\n        let node_up_name_env = \"TEST_NODE_UP\";\n        let node_up_role_env = \"full-node\";\n        let node_up_subnet_env = \"topos-up-env-subnet\";\n\n        let mut cmd = Command::cargo_bin(\"topos\")?;\n        cmd.arg(\"node\")\n            .env(\"TOPOS_POLYGON_EDGE_BIN_PATH\", &node_edge_path_env)\n            .env(\"TOPOS_HOME\", node_up_home_env)\n            .env(\"TOPOS_NODE_NAME\", node_up_name_env)\n            .env(\"TOPOS_NODE_ROLE\", node_up_role_env)\n            .env(\"TOPOS_NODE_SUBNET\", node_up_subnet_env)\n            .arg(\"init\");\n\n        let output = cmd.assert().success();\n        let result: &str = std::str::from_utf8(&output.get_output().stdout)?;\n        assert!(result.contains(\"Created node config file\"));\n\n        // Run node init with cli flags\n        let home = PathBuf::from(node_up_home_env);\n        // Verification: check that the config file was created\n        let config_path = home.join(\"node\").join(node_up_name_env).join(\"config.toml\");\n        assert!(config_path.exists());\n\n        // Generate polygon edge genesis file\n        let polygon_edge_bin = format!(\"{}/polygon-edge\", node_edge_path_env);\n        generate_polygon_edge_genesis_file(\n            &polygon_edge_bin,\n            node_up_home_env,\n            node_up_name_env,\n            node_up_subnet_env,\n        )\n        .await?;\n        let polygon_edge_genesis_path = home\n            .join(\"subnet\")\n            .join(node_up_subnet_env)\n            .join(\"genesis.json\");\n        assert!(polygon_edge_genesis_path.exists());\n\n        let mut cmd = Command::cargo_bin(\"topos\")?;\n        cmd.arg(\"node\")\n            .env(\"TOPOS_POLYGON_EDGE_BIN_PATH\", &node_edge_path_env)\n            .env(\"TOPOS_HOME\", node_up_home_env)\n            .env(\"TOPOS_NODE_NAME\", node_up_name_env)\n            .arg(\"up\");\n\n        let mut cmd = tokio::process::Command::from(cmd).spawn().unwrap();\n        let pid = cmd.id().unwrap();\n        let _ = tokio::time::sleep(std::time::Duration::from_secs(10)).await;\n\n        let s = System::new_all();\n        if let Some(process) = s.process(Pid::from_u32(pid)) {\n            if process.kill_with(Signal::Term).is_none() {\n                eprintln!(\"This signal isn't supported on this platform\");\n            }\n        }\n\n        if let Ok(code) = cmd.wait().await {\n            assert!(code.success());\n        } else {\n            panic!(\"Failed to shutdown gracefully\");\n        }\n\n        // Cleanup\n        std::fs::remove_dir_all(node_up_home_env)?;\n\n        Ok(())\n    }\n\n    #[test_log::test(tokio::test)]\n    async fn command_node_up_custom_polygon() -> Result<(), Box<dyn std::error::Error>> {\n        let tmp_home_dir = tempdir()?;\n\n        // Create config file\n        let node_up_home_env = tmp_home_dir.path().to_str().unwrap();\n        let custom_path = tmp_home_dir.path().join(\"custom_path\");\n        let node_edge_path_env = utils::setup_polygon_edge(custom_path.to_str().unwrap()).await;\n        let node_up_name_env = \"TEST_NODE_UP\";\n        let node_up_role_env = \"full-node\";\n        let node_up_subnet_env = \"topos-up-env-subnet\";\n\n        let mut cmd = Command::cargo_bin(\"topos\")?;\n        cmd.arg(\"node\")\n            .env(\"TOPOS_POLYGON_EDGE_BIN_PATH\", &node_edge_path_env)\n            .env(\"TOPOS_HOME\", node_up_home_env)\n            .env(\"TOPOS_NODE_NAME\", node_up_name_env)\n            .env(\"TOPOS_NODE_ROLE\", node_up_role_env)\n            .env(\"TOPOS_NODE_SUBNET\", node_up_subnet_env)\n            .arg(\"init\");\n\n        let output = cmd.assert().success();\n        let result: &str = std::str::from_utf8(&output.get_output().stdout)?;\n        assert!(result.contains(\"Created node config file\"));\n\n        // Run node init with cli flags\n        let home = PathBuf::from(node_up_home_env);\n        // Verification: check that the config file was created\n        let config_path = home.join(\"node\").join(node_up_name_env).join(\"config.toml\");\n        assert!(config_path.exists());\n\n        // Generate polygon edge genesis file\n        let polygon_edge_bin = format!(\"{}/polygon-edge\", node_edge_path_env);\n        generate_polygon_edge_genesis_file(\n            &polygon_edge_bin,\n            node_up_home_env,\n            node_up_name_env,\n            node_up_subnet_env,\n        )\n        .await?;\n        let polygon_edge_genesis_path = home\n            .join(\"subnet\")\n            .join(node_up_subnet_env)\n            .join(\"genesis.json\");\n        assert!(polygon_edge_genesis_path.exists());\n\n        let mut cmd = Command::cargo_bin(\"topos\")?;\n        cmd.arg(\"node\")\n            .env(\"TOPOS_POLYGON_EDGE_BIN_PATH\", &node_edge_path_env)\n            .env(\"TOPOS_HOME\", node_up_home_env)\n            .env(\"TOPOS_NODE_NAME\", node_up_name_env)\n            .arg(\"up\");\n\n        let mut cmd = tokio::process::Command::from(cmd).spawn().unwrap();\n        let pid = cmd.id().unwrap();\n        let _ = tokio::time::sleep(std::time::Duration::from_secs(10)).await;\n\n        let s = System::new_all();\n        if let Some(process) = s.process(Pid::from_u32(pid)) {\n            if process.kill_with(Signal::Term).is_none() {\n                eprintln!(\"This signal isn't supported on this platform\");\n            }\n        }\n\n        if let Ok(code) = cmd.wait().await {\n            assert!(code.success());\n        } else {\n            panic!(\"Failed to shutdown gracefully\");\n        }\n\n        // Cleanup\n        std::fs::remove_dir_all(node_up_home_env)?;\n\n        Ok(())\n    }\n}\n"
  },
  {
    "path": "crates/topos/tests/regtest.rs",
    "content": "mod utils;\n\nuse std::process::Command;\n\nuse assert_cmd::prelude::*;\n\n#[test]\nfn regtest_spam_help_display() -> Result<(), Box<dyn std::error::Error>> {\n    let mut cmd = Command::cargo_bin(\"topos\")?;\n    cmd.arg(\"regtest\").arg(\"spam\").arg(\"-h\");\n\n    let output = cmd.assert().success();\n\n    let result: &str = std::str::from_utf8(&output.get_output().stdout)?;\n\n    insta::assert_snapshot!(utils::sanitize_config_folder_path(result));\n\n    Ok(())\n}\n\n#[test]\nfn regtest_spam_invalid_hosts() -> Result<(), Box<dyn std::error::Error>> {\n    let mut cmd = Command::cargo_bin(\"topos\")?;\n    cmd.arg(\"regtest\")\n        .arg(\"spam\")\n        .arg(\"--benchmark\")\n        .arg(\"--target-hosts\")\n        .arg(\"asd\")\n        .arg(\"--number\")\n        .arg(\"1\");\n\n    let output = cmd.assert().failure();\n\n    let result: &str = std::str::from_utf8(&output.get_output().stdout)?;\n\n    assert!(result.contains(\n        \"Invalid target-hosts pattern. Has to be in the format of http://validator-1:9090\"\n    ));\n\n    Ok(())\n}\n\n#[test]\nfn regtest_spam_invalid_number() -> Result<(), Box<dyn std::error::Error>> {\n    let mut cmd = Command::cargo_bin(\"topos\")?;\n    cmd.arg(\"regtest\")\n        .arg(\"spam\")\n        .arg(\"--benchmark\")\n        .arg(\"--target-hosts\")\n        .arg(\" http://validator-{N}:9090\")\n        .arg(\"--number\")\n        .arg(\"dasd\");\n\n    cmd.assert().failure();\n\n    Ok(())\n}\n"
  },
  {
    "path": "crates/topos/tests/setup.rs",
    "content": "mod utils;\n\nuse std::{fs, process::Command};\n\nuse assert_cmd::prelude::*;\nuse tempfile::tempdir;\n\n#[test]\nfn setup_subnet_install_edge() -> Result<(), Box<dyn std::error::Error>> {\n    let tmp_home_dir = tempdir()?;\n\n    let mut cmd = Command::cargo_bin(\"topos\")?;\n    cmd.arg(\"setup\")\n        .arg(\"subnet\")\n        .arg(\"--path\")\n        .arg(tmp_home_dir.path());\n\n    let output = cmd.assert().success();\n\n    let result: &str = std::str::from_utf8(&output.get_output().stdout)?;\n\n    assert!(result.contains(\"Polygon Edge installation successful\"));\n\n    Ok(())\n}\n\n#[test]\nfn setup_with_no_arguments() -> Result<(), Box<dyn std::error::Error>> {\n    let mut cmd = Command::cargo_bin(\"topos\")?;\n    cmd.arg(\"setup\");\n\n    let output = cmd.assert().failure();\n\n    let result: &str = std::str::from_utf8(&output.get_output().stderr)?;\n\n    assert!(result\n        .contains(\"No subcommand provided. You can use `--help` to see available subcommands.\"));\n\n    Ok(())\n}\n\n#[test]\nfn setup_subnet_fail_to_install_release() -> Result<(), Box<dyn std::error::Error>> {\n    let tmp_home_dir = tempdir()?;\n\n    let mut cmd = Command::cargo_bin(\"topos\")?;\n    cmd.arg(\"setup\")\n        .arg(\"subnet\")\n        .arg(\"--path\")\n        .arg(tmp_home_dir.path())\n        .arg(\"--release\")\n        .arg(\"invalid\");\n\n    let output = cmd.assert().failure();\n\n    let result: &str = std::str::from_utf8(&output.get_output().stderr)?;\n\n    assert!(result.contains(\n        \"Error installing Polygon Edge: There is no valid Polygon Edge release available\"\n    ));\n\n    Ok(())\n}\n\n#[test]\nfn setup_subnet_install_edge_custom_path() -> Result<(), Box<dyn std::error::Error>> {\n    let tmp_home_dir = tempdir()?;\n    let custom_path = tmp_home_dir.path().join(\"custom_path\");\n\n    fs::create_dir(&custom_path).unwrap();\n\n    let mut cmd = Command::cargo_bin(\"topos\")?;\n    cmd.arg(\"setup\")\n        .arg(\"subnet\")\n        .arg(\"--path\")\n        .arg(&custom_path);\n\n    let output = cmd.assert().success();\n\n    let result: &str = std::str::from_utf8(&output.get_output().stdout)?;\n\n    assert!(result.contains(\"Polygon Edge installation successful\"));\n\n    let file = fs::read_dir(&custom_path)\n        .unwrap()\n        .filter_map(|x| match x.ok() {\n            Some(f) if f.path().ends_with(\"polygon-edge\") => Some(f),\n            _ => None,\n        })\n        .last()\n        .unwrap();\n\n    assert!(file.path().starts_with(&custom_path));\n\n    Ok(())\n}\n\n#[test]\nfn setup_subnet_install_edge_custom_path_env() -> Result<(), Box<dyn std::error::Error>> {\n    let tmp_home_dir = tempdir()?;\n    let custom_path = tmp_home_dir.path().join(\"custom_path\");\n\n    fs::create_dir(&custom_path).unwrap();\n\n    let mut cmd = Command::cargo_bin(\"topos\")?;\n\n    cmd.env(\"TOPOS_SETUP_POLYGON_EDGE_DIR\", &custom_path)\n        .arg(\"setup\")\n        .arg(\"subnet\");\n\n    let output = cmd.assert().success();\n\n    let result: &str = std::str::from_utf8(&output.get_output().stdout)?;\n\n    assert!(result.contains(\"Polygon Edge installation successful\"));\n\n    let file = fs::read_dir(&custom_path)\n        .unwrap()\n        .filter_map(|x| match x.ok() {\n            Some(f) if f.path().ends_with(\"polygon-edge\") => Some(f),\n            _ => None,\n        })\n        .last()\n        .unwrap();\n\n    assert!(file.path().starts_with(&custom_path));\n\n    Ok(())\n}\n"
  },
  {
    "path": "crates/topos/tests/snapshots/node__help_display.snap",
    "content": "---\nsource: crates/topos/tests/node.rs\nexpression: \"utils::sanitize_config_folder_path(result)\"\n---\nUtility to manage your nodes in the Topos network\n\nUsage: topos node [OPTIONS] [COMMAND]\n\nCommands:\n  up      Spawn your node\n  init    Setup your node\n  status  Get node status\n  help    Print this message or the help of the given subcommand(s)\n\nOptions:\n      --edge-path <EDGE_PATH>  Installation directory path for Polygon Edge binary [env: TOPOS_POLYGON_EDGE_BIN_PATH=] [default: .]\n  -v, --verbose...             Defines the verbosity level\n      --no-color               Disable color in logs [env: TOPOS_LOG_NOCOLOR=]\n      --home <HOME>            Home directory for the configuration [env: TOPOS_HOME=] [default: /home/runner/.config/topos]\n  -h, --help                   Print help\n\n"
  },
  {
    "path": "crates/topos/tests/snapshots/push_certificate__help_display.snap",
    "content": "---\nsource: crates/topos/tests/push-certificate.rs\nexpression: \"utils::sanitize_config_folder_path(result)\"\n---\nPush a certificate to a TCE process\n\nUsage: topos regtest push-certificate [OPTIONS]\n\nOptions:\n  -f, --format <FORMAT>\n          [default: plain] [possible values: json, plain]\n  -v, --verbose...\n          Defines the verbosity level\n      --no-color\n          Disable color in logs [env: TOPOS_LOG_NOCOLOR=]\n  -t, --timeout <TIMEOUT>\n          Global timeout for the command [default: 60]\n      --home <HOME>\n          Home directory for the configuration [env: TOPOS_HOME=] [default: /home/runner/.config/topos]\n      --timeout-broadcast <TIMEOUT_BROADCAST>\n          Seconds to wait before asserting the broadcast [default: 30]\n  -n, --nodes <NODES>\n          The node list to be used, can be a file path or a comma separated list of Uri. If not provided, stdin is listened [env: TARGET_NODES_PATH=]\n  -h, --help\n          Print help\n\n"
  },
  {
    "path": "crates/topos/tests/snapshots/regtest__regtest_spam_help_display.snap",
    "content": "---\nsource: crates/topos/tests/regtest.rs\nexpression: \"utils::sanitize_config_folder_path(result)\"\n---\nRun a test topos certificate spammer to send test certificates to the network, generating randomly among the `nb_subnets` subnets the batch of `cert_per_batch` certificates at every `batch-interval`\n\nUsage: topos regtest spam [OPTIONS]\n\nOptions:\n      --target-nodes <TARGET_NODES>\n          The target node api endpoint. Multiple nodes could be specified as comma separated list e.g. `--target-nodes=http://[::1]:1340,http://[::1]:1341` [env: TOPOS_NETWORK_SPAMMER_TARGET_NODES=]\n  -v, --verbose...\n          Defines the verbosity level\n      --no-color\n          Disable color in logs [env: TOPOS_LOG_NOCOLOR=]\n      --target-nodes-path <TARGET_NODES_PATH>\n          Path to json file with list of target nodes as alternative to `--target-nodes` [env: TOPOS_NETWORK_SPAMMER_TARGET_NODES_PATH=]\n      --home <HOME>\n          Home directory for the configuration [env: TOPOS_HOME=] [default: /home/runner/.config/topos]\n      --local-key-seed <LOCAL_KEY_SEED>\n          Seed for generation of local private signing keys and corresponding subnet ids [env: TOPOS_NETWORK_SPAMMER_LOCAL_KEY_SEED=] [default: 1]\n      --cert-per-batch <CERT_PER_BATCH>\n          Certificates generated in one batch. Batch is generated every `batch-interval` milliseconds [env: TOPOS_NETWORK_SPAMMER_CERT_PER_BATCH=] [default: 1]\n      --nb-subnets <NB_SUBNETS>\n          Number of subnets to use for certificate generation. For every certificate subnet id will be picked randomly [env: TOPOS_NETWORK_SPAMMER_NUMBER_OF_SUBNETS=] [default: 1]\n      --nb-batches <NB_BATCHES>\n          Number of batches to generate before finishing execution. If not specified, batches will be generated indefinitely [env: TOPOS_NETWORK_SPAMMER_NUMBER_OF_BATCHES=]\n      --batch-interval <BATCH_INTERVAL>\n          Time interval in milliseconds between generated batches of certificates [env: TOPOS_NETWORK_SPAMMER_BATCH_INTERVAL=] [default: 2000]\n      --target-subnets <TARGET_SUBNETS>\n          List of generated certificate target subnets. No target subnets by default. For example `--target-subnets=0x3bc19e36ff1673910575b6727a974a9abd80c9a875d41ab3e2648dbfb9e4b518,0xa00d60b2b408c2a14c5d70cdd2c205db8985ef737a7e55ad20ea32cc9e7c417c` [env: TOPOS_NETWORK_SPAMMER_TARGET_SUBNETS=]\n      --otlp-agent <OTLP_AGENT>\n          Socket of the opentelemetry agent endpoint. If not provided open telemetry will not be used [env: TOPOS_OTLP_AGENT=]\n      --otlp-service-name <OTLP_SERVICE_NAME>\n          Otlp service name. If not provided open telemetry will not be used [env: TOPOS_OTLP_SERVICE_NAME=]\n      --benchmark\n          Flag to indicate usage of Kubernetes [env: TOPOS_NETWORK_SPAMMER_BENCHMARK=]\n      --target-hosts <TARGET_HOSTS>\n          Template for generating target node entrypoints. e.g. `--hosts=\"http://validator-{N}:1340\"` [env: TOPOS_NETWORK_SPAMMER_TARGET_HOSTS=]\n      --number <NUMBER>\n          Number of nodes to generate based on the DNS template [env: TOPOS_NETWORK_SPAMMER_NUMBER=]\n  -h, --help\n          Print help\n\n"
  },
  {
    "path": "crates/topos/tests/utils.rs",
    "content": "use assert_cmd::prelude::*;\nuse predicates::prelude::*;\nuse regex::Regex;\nuse std::path::PathBuf;\nuse std::process::Command;\nuse topos::install_polygon_edge;\n\n// Have to allow dead_code because clippy doesn't recognize it is being used in the tests\n#[cfg(test)]\n#[allow(dead_code)]\npub fn sanitize_config_folder_path(cmd_out: &str) -> String {\n    // Sanitize the result here:\n    // When run locally, we get /Users/<username>/.config/topos\n    // When testing on the CI, we get /home/runner/.config/topos\n    let pattern = Regex::new(r\"\\[default: .+?/.config/topos\\]\").unwrap();\n    pattern\n        .replace(cmd_out, \"[default: /home/runner/.config/topos]\")\n        .to_string()\n}\n\n// Have to allow dead_code because clippy doesn't recognize it is being used in the tests\n#[allow(dead_code)]\npub async fn setup_polygon_edge(path: &str) -> String {\n    let installation_path = std::env::current_dir().unwrap().join(path);\n    let binary_path = installation_path.join(\"polygon-edge\");\n\n    if !binary_path.exists() {\n        std::fs::create_dir_all(installation_path.clone())\n            .expect(\"Cannot create test binary folder\");\n\n        install_polygon_edge(\n            \"topos-protocol/polygon-edge\".to_string(),\n            None,\n            installation_path.clone().as_path(),\n        )\n        .await\n        .expect(\"Cannot install Polygon Edge binary\");\n    }\n\n    installation_path.to_str().unwrap().to_string()\n}\n\n// Have to allow dead_code because clippy doesn't recognize it is being used in the tests\n#[allow(dead_code)]\npub async fn generate_polygon_edge_genesis_file(\n    polygon_edge_bin: &str,\n    home_path: &str,\n    node_name: &str,\n    subnet: &str,\n) -> Result<(), Box<dyn std::error::Error>> {\n    let genesis_folder_path: PathBuf = PathBuf::from(format!(\"{}/subnet/{}\", home_path, subnet));\n    if !genesis_folder_path.exists() {\n        std::fs::create_dir_all(genesis_folder_path.clone())\n            .expect(\"Cannot create subnet genesis file folder\");\n    }\n    let genesis_file_path = format!(\"{}/genesis.json\", genesis_folder_path.display());\n    println!(\"Polygon edge path: {}\", polygon_edge_bin);\n    let mut cmd = Command::new(polygon_edge_bin);\n    let val_prefix_path = format!(\"{}/node/{}/\", home_path, node_name);\n    cmd.arg(\"genesis\")\n        .arg(\"--dir\")\n        .arg(&genesis_file_path)\n        .arg(\"--consensus\")\n        .arg(\"ibft\")\n        .arg(\"--ibft-validators-prefix-path\")\n        .arg(val_prefix_path)\n        .arg(\"--bootnode\") /* set dummy bootnode, we will not run edge to produce blocks */\n        .arg(\"/ip4/127.0.0.1/tcp/8545/p2p/16Uiu2HAmNYneHCbJ1Ntz1ojvTdiNGCMGWNT5MGMH28AzKNV66Paa\");\n    cmd.assert()\n        .success()\n        .stdout(predicate::str::contains(format!(\n            \"Genesis written to {}\",\n            genesis_folder_path.display()\n        )));\n    Ok(())\n}\n"
  },
  {
    "path": "crates/topos-certificate-spammer/Cargo.toml",
    "content": "[package]\nname = \"topos-certificate-spammer\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n[lints]\nworkspace = true\n\n[dependencies]\nclap.workspace = true\ntokio-stream.workspace = true\nrand_distr.workspace = true\nrand_core.workspace = true\nrand.workspace = true\nfutures.workspace = true\ntokio.workspace = true\nserde = { workspace = true, features = [\"derive\"] }\nserde_json.workspace = true\nhex.workspace = true\nthiserror.workspace = true\ntracing.workspace = true\ntracing-opentelemetry.workspace = true\nopentelemetry.workspace = true\ntiny-keccak.workspace = true\nuuid.workspace = true\nlazy_static.workspace = true\nhttp.workspace = true\n\ntoml = \"0.5.9\"\n\ntopos-core.workspace = true\ntopos-tce-proxy = { path = \"../topos-tce-proxy\"}\ntopos-crypto = {path = \"../topos-crypto\"}\n"
  },
  {
    "path": "crates/topos-certificate-spammer/README.md",
    "content": "# Topos Certificate Spammer\n\n## How does it work?\n\nThe Topos Certificate Spammer generates test certificate chain and sends them to one or more target nodes, specified with parameter `--target-nodes`, e.g. `--target-nodes=http://[::1]:1340,http://[::1]:1341`.\nEvery `--batch-interval`, a batch of certificates is generated and sent following the `--certs-per-batch` argument.\n\nSource subnet id and list of target subnets are randomly assigned to every certificate.\n\nWhen argument `--nb-batches` is specified, program will send specified number of batches and the command will gracefully shut down connections and exit. When unspecified, it will continuously generate and send batches of certificates.\n\nThe time delay in milliseconds between two batches of two certificates is set with `--batch-interval`.\n\nCertificates are signed with secp256k1 private key, and seed for generation of `nb-subnets` private keys is infuenced by `--local-key-seed`.\n\nThe dispatching of Certificate is done through the TCE service gRPC API.\n\n## Commands\n\nTo compile from the root `topos` workspace directory:\n```\ncargo build --release\n```\n\nThe extended list of commands:\n```\ntopos network spam -h\n```\n\n## Example\n\nContinuously spam local tce node `http://[::1]:1340` with batch of 1 certificate every 2 seconds. Certificate target subnet list is empty:\n```\ntopos network spam \n```\n\nSpam two tce target nodes with 3 batches (every batch containing 5 certificate with 2 possible source subnet id), \n also specifying two possible target subnets for every generated certificate:\n``` \ntopos network spam --nb-subnets=2  --cert-per-batch=5 --nb-batches=3 --target-nodes=http://[::1]:1340,http://[::1]:1341 --target-subnets=0x3bc19e36ff1673910575b6727a974a9abd80c9a875d41ab3e2648dbfb9e4b518,0xa00d60b2b408c2a14c5d70cdd2c205db8985ef737a7e55ad20ea32cc9e7c417c \n```\n\n\nAlternatively environment variables could be used instead of command line arguments to configure Topos Certificate Spammer:\n```\nTOPOS_NETWORK_SPAMMER_NUMBER_OF_CERTIFICATES\nTOPOS_NETWORK_SPAMMER_TARGET_NODES\nTOPOS_NETWORK_SPAMMER_SIGNING_KEY\nTOPOS_NETWORK_SPAMMER_INTERVAL\nTOPOS_NETWORK_SPAMMER_TARGET_SUBNETS\n```\n\n## Instrumentation\n\nBy specifying `--otlp-agent` and `--otlp-service-name` cli options, instrumentation event `NewTestCertificate` will be observable from Otlp/Jaeger.\n"
  },
  {
    "path": "crates/topos-certificate-spammer/config/target_nodes_example.json",
    "content": "{\n  \"nodes\": [\n    \"http://[::1]:1340\",\n    \"http://[::1]:1341\"\n  ]\n}\n"
  },
  {
    "path": "crates/topos-certificate-spammer/src/config.rs",
    "content": "#[derive(Clone, Debug)]\npub struct CertificateSpammerConfig {\n    pub target_nodes: Option<Vec<String>>,\n    pub target_nodes_path: Option<String>,\n    pub local_key_seed: u64,\n    pub cert_per_batch: u64,\n    pub nb_subnets: u8,\n    pub nb_batches: Option<u64>,\n    pub batch_interval: u64,\n    pub target_subnets: Option<Vec<String>>,\n    pub benchmark: bool,\n    pub target_hosts: Option<String>,\n    pub number: Option<u32>,\n}\n"
  },
  {
    "path": "crates/topos-certificate-spammer/src/error.rs",
    "content": "#[derive(Debug, thiserror::Error)]\npub enum Error {\n    #[error(\"target nodes are not specified\")]\n    TargetNodesNotSpecified,\n    #[error(\"error reading target nodes json file:{0}\")]\n    ReadingTargetNodesJsonFile(String),\n    #[error(\"error parsing target nodes json file:{0}\")]\n    InvalidTargetNodesJsonFile(String),\n    #[error(\"invalid subnet id error: {0}\")]\n    InvalidSubnetId(String),\n    #[error(\"hex conversion error {0}\")]\n    HexConversion(hex::FromHexError),\n    #[error(\"invalid signing key: {0}\")]\n    InvalidSigningKey(String),\n    #[error(\"Tce node connection error {0}\")]\n    TCENodeConnection(topos_tce_proxy::Error),\n    #[error(\"Certificate signing error: {0}\")]\n    CertificateSigning(topos_core::uci::Error),\n    #[error(\"BenchmkarkConfigError config error: {0}\")]\n    BenchmarkConfig(String),\n}\n"
  },
  {
    "path": "crates/topos-certificate-spammer/src/lib.rs",
    "content": "//! Utility to spam dummy certificates\n\nuse http::Uri;\nuse opentelemetry::trace::FutureExt;\nuse serde::Deserialize;\nuse std::collections::HashMap;\nuse std::fmt::Debug;\nuse std::sync::Arc;\nuse tokio::sync::{mpsc, oneshot, Mutex};\nuse tokio::time::{self, Duration};\nuse tokio_stream::StreamExt;\nuse topos_core::uci::*;\nuse topos_tce_proxy::client::{TceClient, TceClientBuilder};\nuse tracing::{debug, error, info, info_span, Instrument, Span};\nuse tracing_opentelemetry::OpenTelemetrySpanExt;\n\nmod config;\npub mod error;\nmod utils;\n\nuse error::Error;\n\nuse crate::utils::{generate_source_subnets, generate_test_certificate};\npub use config::CertificateSpammerConfig;\n\ntype NodeApiAddress = String;\n\n#[derive(Deserialize)]\nstruct FileNodes {\n    nodes: Vec<String>,\n}\n\n/// Represents connection from one sequencer to a TCE node\n/// Multiple different subnets could be connected to the same TCE node address (represented with TargetNodeConnection with different SubnetId and created client)\n/// Multiple topos-sequencers from the same subnet could be connected to the same TCE node address (so they would have same SubnetID, but different client instances)\nstruct TargetNodeConnection {\n    address: NodeApiAddress,\n    client: Arc<Mutex<TceClient>>,\n    shutdown: mpsc::Sender<oneshot::Sender<()>>,\n    source_subnet: SourceSubnet,\n}\n\n#[derive(Debug, Clone)]\npub struct SourceSubnet {\n    signing_key: [u8; 32],\n    source_subnet_id: SubnetId,\n    last_certificate_id: CertificateId,\n}\n\nimpl TargetNodeConnection {\n    pub async fn shutdown(&mut self) -> Result<(), Box<dyn std::error::Error>> {\n        let (sender, receiver) = oneshot::channel();\n        self.shutdown.send(sender).await?;\n        receiver.await?;\n\n        Ok(())\n    }\n}\n\nasync fn open_target_node_connection(\n    nodes: &[String],\n    source_subnet: &SourceSubnet,\n) -> Result<Vec<TargetNodeConnection>, Error> {\n    let mut target_node_connections: Vec<TargetNodeConnection> = Vec::new();\n    for tce_address in nodes {\n        info!(\n            \"Opening client for tce service {}, source subnet id: {}\",\n            &tce_address, &source_subnet.source_subnet_id\n        );\n\n        let (tce_client_shutdown_channel, shutdown_receiver) =\n            mpsc::channel::<oneshot::Sender<()>>(1);\n\n        let (tce_client, mut receiving_certificate_stream) = match TceClientBuilder::default()\n            .set_subnet_id(source_subnet.source_subnet_id)\n            .set_tce_endpoint(tce_address)\n            .build_and_launch(shutdown_receiver)\n            .await\n        {\n            Ok(value) => value,\n            Err(e) => {\n                error!(\n                    \"Unable to create TCE client for node {}: {}\",\n                    &tce_address, e\n                );\n                return Err(Error::TCENodeConnection(e));\n            }\n        };\n\n        match tce_client.open_stream(Vec::new()).await {\n            Ok(_) => {}\n            Err(e) => {\n                error!(\"Unable to connect to node {}: {}\", &tce_address, e);\n                return Err(Error::TCENodeConnection(e));\n            }\n        }\n\n        let (shutdown_channel, mut shutdown_receiver) = mpsc::channel::<oneshot::Sender<()>>(1);\n\n        let client = Arc::new(Mutex::new(tce_client));\n        {\n            let source_subnet_id = source_subnet.source_subnet_id;\n            let tce_address = tce_address.clone();\n            tokio::spawn(async move {\n                loop {\n                    // process certificates received from the TCE node\n                    tokio::select! {\n                         Some((cert, position)) = receiving_certificate_stream.next() => {\n                            info!(\"Delivered certificate from tce address: {} for subnet id: {} cert id {}, position {:?}\",\n                                &tce_address, &source_subnet_id, &cert.id, position);\n                         },\n                         Some(sender) = shutdown_receiver.recv() => {\n                            info!(\"Shutting down client for tce address: {} for subnet id: {}\",\n                                &tce_address, &source_subnet_id);\n\n                            let (killer, waiter) = oneshot::channel::<()>();\n                            tce_client_shutdown_channel.send(killer).await.unwrap();\n                            waiter.await.unwrap();\n\n                            info!(\"Finishing watch certificates task...\");\n                            _ = sender.send(());\n                            // Finish this task listener\n                            break;\n                         }\n                    }\n                }\n            });\n        }\n\n        target_node_connections.push(TargetNodeConnection {\n            address: tce_address.clone(),\n            client,\n            shutdown: shutdown_channel,\n            source_subnet: source_subnet.clone(),\n        });\n    }\n    Ok(target_node_connections)\n}\n\nasync fn close_target_node_connections(\n    target_node_connections: HashMap<SubnetId, Vec<TargetNodeConnection>>,\n) {\n    for mut target_node in target_node_connections\n        .into_iter()\n        .flat_map(|(_, connections)| connections)\n        .collect::<Vec<TargetNodeConnection>>()\n    {\n        info!(\"Closing connection to target node {}\", target_node.address);\n        if let Err(e) = target_node.shutdown().await {\n            error!(\"Failed to close stream with {}: {e}\", target_node.address);\n        }\n    }\n}\n\n/// Submit the certificate to the TCE node\nasync fn submit_cert_to_tce(node: &TargetNodeConnection, cert: Certificate) {\n    let client = node.client.clone();\n    let span = Span::current();\n    span.record(\"certificate_id\", cert.id.to_string());\n    span.record(\"source_subnet_id\", cert.source_subnet_id.to_string());\n\n    let mut tce_client = client.lock().await;\n    send_new_certificate(&mut tce_client, cert)\n        .with_context(span.context())\n        .instrument(span)\n        .await\n}\n\nasync fn send_new_certificate(tce_client: &mut TceClient, cert: Certificate) {\n    if let Err(e) = tce_client\n        .send_certificate(cert)\n        .with_current_context()\n        .instrument(Span::current())\n        .await\n    {\n        error!(\"Failed to send the Certificate to the TCE client: {}\", e);\n    }\n}\n\nasync fn dispatch(cert: Certificate, target_node: &TargetNodeConnection) {\n    info!(\n        \"Sending cert id={:?} prev_cert_id= {:?} subnet_id={:?} to tce node {}\",\n        &cert.id, &cert.prev_id, &cert.source_subnet_id, target_node.address\n    );\n    submit_cert_to_tce(target_node, cert).await\n}\n\npub async fn run(\n    args: CertificateSpammerConfig,\n    mut shutdown: mpsc::Receiver<oneshot::Sender<()>>,\n) -> Result<(), Error> {\n    // Is list of nodes is specified in the command line use them otherwise use\n    // config file provided nodes\n    let target_nodes = if args.benchmark {\n        if let (Some(target_hosts), Some(number)) = (args.target_hosts, args.number) {\n            let uri = target_hosts\n                .replace(\"{N}\", &0.to_string())\n                .parse::<Uri>()\n                .map_err(|e| Error::BenchmarkConfig(e.to_string()))?;\n\n            if uri.host().is_none() || uri.path().is_empty() || uri.port_u16().is_none() {\n                return Err(Error::BenchmarkConfig(\n                    \"Invalid target-hosts pattern. Has to be in the format of http://validator-1:9090\"\n                        .into(),\n                ));\n            }\n\n            (0..number)\n                .map(|n| target_hosts.replace(\"{N}\", &n.to_string()))\n                .collect::<Vec<String>>()\n        } else {\n            return Err(Error::BenchmarkConfig(\n                \"The --benchmark flag needs the following two additional flags being passed to it:\\n--target-hosts http://validator-{N}\\n--number 10\".into(),\n            ));\n        }\n    } else if let Some(nodes) = args.target_nodes {\n        nodes\n    } else if let Some(target_nodes_path) = args.target_nodes_path {\n        let json_str = std::fs::read_to_string(target_nodes_path)\n            .map_err(|e| Error::ReadingTargetNodesJsonFile(e.to_string()))?;\n\n        let json: FileNodes = serde_json::from_str(&json_str)\n            .map_err(|e| Error::InvalidTargetNodesJsonFile(e.to_string()))?;\n        json.nodes\n    } else {\n        return Err(Error::TargetNodesNotSpecified);\n    };\n\n    // Generate keys for all required subnets (`nb_subnets`)\n    let mut source_subnets = generate_source_subnets(args.local_key_seed, args.nb_subnets)?;\n    info!(\"Generated source subnets: {source_subnets:#?}\");\n\n    // Target subnets (randomly assigned to every generated certificate)\n    let target_subnet_ids: Vec<SubnetId> = args\n        .target_subnets\n        .iter()\n        .flat_map(|id| {\n            id.iter().map(|id| {\n                let id =\n                    hex::decode(&id[2..]).map_err(|e| Error::InvalidSubnetId(e.to_string()))?;\n                TryInto::<[u8; 32]>::try_into(id.as_slice())\n                    .map_err(|e| Error::InvalidSubnetId(e.to_string()))\n            })\n        })\n        .map(|id| id.map(SubnetId::from_array))\n        .collect::<Result<_, _>>()?;\n\n    let mut target_node_connections: HashMap<SubnetId, Vec<TargetNodeConnection>> = HashMap::new();\n\n    // For every source subnet, open connection to every target node, so we will have\n    // nb_subnets * len(target_nodes) connections\n    for source_subnet in &source_subnets {\n        let connections_for_source_subnet =\n            open_target_node_connection(target_nodes.as_slice(), source_subnet).await?;\n        target_node_connections.insert(\n            source_subnet.source_subnet_id,\n            connections_for_source_subnet,\n        );\n    }\n\n    target_node_connections\n        .iter()\n        .flat_map(|(_, connections)| connections)\n        .for_each(|connection| {\n            info!(\n                \"Certificate spammer target nodes address: {}, source_subnet_id: {}, target \\\n                 subnet ids {:?}\",\n                connection.address, connection.source_subnet.source_subnet_id, target_subnet_ids\n            );\n        });\n\n    let number_of_peer_nodes = target_nodes.len();\n    let mut batch_interval = time::interval(Duration::from_millis(args.batch_interval));\n    let mut batch_number: u64 = 0;\n\n    let shutdown_sender = loop {\n        let should_send_batch = tokio::select! {\n            _ = batch_interval.tick() => true,\n            Some(sender) = shutdown.recv() => {\n                info!(\"Received shutdown signal, stopping certificate spammer\");\n\n                for (_, connections) in target_node_connections {\n                    for mut connection in connections {\n                        info!(\"Closing connection to target node {}\", connection.address);\n                        _ = connection.shutdown().await;\n                    }\n                }\n\n\n                break Some(sender);\n            }\n        };\n\n        if should_send_batch {\n            // Starting batch, generate cert_per_batch certificates\n            batch_number += 1;\n            let batch_id = uuid::Uuid::new_v4().to_string();\n            // TODO: Need a better name for this span\n            let span = info_span!(\n                \"Batch\",\n                batch_id,\n                batch_number,\n                cert_per_batch = args.cert_per_batch,\n                number_of_peer_nodes\n            );\n            async {\n                info!(\"Starting batch {batch_number}\");\n\n                let mut batch: Vec<Certificate> = Vec::new(); // Certificates for this batch\n                for b in 0..args.cert_per_batch {\n                    // Randomize source subnet id\n                    let source_subnet =\n                        &mut source_subnets[rand::random::<usize>() % args.nb_subnets as usize];\n                    // Randomize number of target subnets if target subnet list cli argument is provided\n                    let target_subnets: Vec<SubnetId> = if target_subnet_ids.is_empty() {\n                        // Empty list of target subnets in certificate\n                        Vec::new()\n                    } else {\n                        // Generate random list in size of 0..len(target_subnet_ids) as target subnets\n                        let number_of_target_subnets =\n                            rand::random::<usize>() % (target_subnet_ids.len() + 1);\n                        let mut target_subnets = Vec::new();\n                        for _ in 0..number_of_target_subnets {\n                            target_subnets.push(\n                                target_subnet_ids\n                                    [rand::random::<usize>() % target_subnet_ids.len()],\n                            );\n                        }\n                        target_subnets\n                    };\n\n                    let new_cert =\n                        match generate_test_certificate(source_subnet, target_subnets.as_slice()) {\n                            Ok(cert) => cert,\n                            Err(e) => {\n                                error!(\"Unable to generate certificate: {e}\");\n                                continue;\n                            }\n                        };\n                    debug!(\"New cert number {b} in batch {batch_number} generated\");\n                    batch.push(new_cert);\n                }\n\n                // Dispatch certs in this batch\n                for cert in batch {\n                    // Randomly choose target tce node for every certificate from related source_subnet_id connection list\n                    let target_node_connection = &target_node_connections[&cert.source_subnet_id]\n                        [rand::random::<usize>() % target_nodes.len()];\n                    dispatch(cert, target_node_connection)\n                        .instrument(Span::current())\n                        .with_current_context()\n                        .await;\n                }\n            }\n            .instrument(span)\n            .await;\n\n            if let Some(nb_batches) = args.nb_batches {\n                if batch_number >= nb_batches {\n                    info!(\"Generated {nb_batches}, finishing certificate spammer...\");\n\n                    tokio::time::sleep(Duration::from_secs(5)).await;\n                    close_target_node_connections(target_node_connections).await;\n                    info!(\"Cert spammer finished\");\n                    break None;\n                }\n            }\n        }\n    };\n\n    info!(\"Certificate spammer finished\");\n    if let Some(sender) = shutdown_sender {\n        sender\n            .send(())\n            .expect(\"Failed to send shutdown signal from certificate spammer\");\n    }\n\n    Ok(())\n}\n"
  },
  {
    "path": "crates/topos-certificate-spammer/src/utils.rs",
    "content": "use topos_core::uci::{Certificate, SubnetId};\n\nuse crate::{error::Error, SourceSubnet};\n\nlazy_static::lazy_static! {\n    /// Size of the proof\n    static ref PROOF_SIZE_BYTES: usize =\n        std::env::var(\"TOPOS_PROOF_SIZE_BYTES\")\n            .ok()\n            .and_then(|s| s.parse().ok())\n            .unwrap_or(1000);\n\n    /// Dummy proof with specified size\n    static ref STARK_BLOB: Vec<u8> =\n        (0..*PROOF_SIZE_BYTES)\n           .map(|_| rand::random::<u8>())\n           .collect::<Vec<u8>>();\n}\n\npub fn generate_random_32b_array() -> [u8; 32] {\n    (0..32)\n        .map(|_| rand::random::<u8>())\n        .collect::<Vec<u8>>()\n        .try_into()\n        .expect(\"Valid 32 byte array\")\n}\n\n/// Generate test certificate\npub fn generate_test_certificate(\n    source_subnet: &mut SourceSubnet,\n    target_subnet_ids: &[SubnetId],\n) -> Result<Certificate, Box<dyn std::error::Error>> {\n    let mut new_cert = Certificate::new(\n        source_subnet.last_certificate_id,\n        source_subnet.source_subnet_id,\n        generate_random_32b_array(),\n        generate_random_32b_array(),\n        generate_random_32b_array(),\n        target_subnet_ids,\n        0,\n        STARK_BLOB.clone(),\n    )?;\n    new_cert\n        .update_signature(&source_subnet.signing_key)\n        .map_err(Error::CertificateSigning)?;\n\n    source_subnet.last_certificate_id = new_cert.id;\n    Ok(new_cert)\n}\n\npub fn generate_source_subnets(\n    local_key_seed: u64,\n    number_of_subnets: u8,\n) -> Result<Vec<SourceSubnet>, Error> {\n    let mut subnets = Vec::new();\n\n    let mut signing_key = [0u8; 32];\n    let (_, right) = signing_key.split_at_mut(24);\n    right.copy_from_slice(local_key_seed.to_be_bytes().as_slice());\n    for _ in 0..number_of_subnets {\n        signing_key = tiny_keccak::keccak256(&signing_key);\n\n        // Subnet id of the source subnet which will be used for every generated certificate\n        let source_subnet_id: SubnetId = topos_crypto::keys::derive_public_key(&signing_key)\n            .map_err(|e| Error::InvalidSigningKey(e.to_string()))?\n            .as_slice()[1..33]\n            .try_into()\n            .map_err(|_| Error::InvalidSubnetId(\"Unable to parse subnet id\".to_string()))?;\n\n        subnets.push(SourceSubnet {\n            signing_key,\n            source_subnet_id,\n            last_certificate_id: Default::default(),\n        });\n    }\n\n    Ok(subnets)\n}\n"
  },
  {
    "path": "crates/topos-clock/Cargo.toml",
    "content": "[package]\nname = \"topos-clock\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n[lints]\nworkspace = true\n\n[dependencies]\ntokio.workspace = true\nfutures.workspace = true\nthiserror.workspace = true\nchrono = {version = \"0.4\", default-features = false, features = [\"clock\"]}\ntracing.workspace = true\n"
  },
  {
    "path": "crates/topos-clock/src/lib.rs",
    "content": "//! This crate is responsible for managing the clock pace.\n//!\n//! The Clock is responsible of giving informations about Epoch and Delta timing by exposing\n//! reference to the data but also by broadcasting `EpochChange` events.\n\nuse std::sync::{atomic::AtomicU64, Arc};\n\nuse tokio::sync::broadcast;\n\nmod time;\n\npub use time::TimeClock;\n\nconst BROADCAST_CHANNEL_SIZE: usize = 100;\n\npub trait Clock {\n    /// Compute Epoch/Block numbers and spawn the clock task.\n    fn spawn(self) -> Result<broadcast::Receiver<Event>, Error>;\n    /// Return a reference to the current block number\n    fn block_ref(&self) -> Arc<AtomicU64>;\n    /// Return a reference to the current epoch number\n    fn epoch_ref(&self) -> Arc<AtomicU64>;\n}\n\n#[derive(Clone, Debug, PartialEq, Eq)]\npub enum Event {\n    /// Notify an Epoch change with the associated epoch_number\n    EpochChange(u64),\n}\n\n#[derive(Debug, thiserror::Error)]\npub enum Error {\n    #[error(\"Unable to generate spawn date\")]\n    SpawnDateFailure,\n}\n"
  },
  {
    "path": "crates/topos-clock/src/time.rs",
    "content": "use std::{\n    sync::{\n        atomic::{AtomicU64, Ordering},\n        Arc,\n    },\n    time::Duration,\n};\n\nuse chrono::{DateTime, Utc};\nuse tokio::{\n    spawn,\n    sync::broadcast,\n    time::{interval_at, Instant},\n};\n\nuse crate::{Clock, Error, Event, BROADCAST_CHANNEL_SIZE};\n\n/// Time based clock implementation.\n///\n/// Simulate blockchain block production by increasing block number by 1 every second.\n/// Epoch duration can be configured when creating the clock.\npub struct TimeClock {\n    genesis: DateTime<Utc>,\n    current_block: Arc<AtomicU64>,\n    epoch_duration: u64,\n    current_epoch: Arc<AtomicU64>,\n}\n\nimpl Clock for TimeClock {\n    fn spawn(mut self) -> Result<broadcast::Receiver<Event>, Error> {\n        let (sender, receiver) = broadcast::channel(BROADCAST_CHANNEL_SIZE);\n\n        self.compute_block();\n        self.compute_epoch();\n\n        spawn(async move {\n            self.run(sender).await;\n        });\n\n        Ok(receiver)\n    }\n\n    fn block_ref(&self) -> Arc<AtomicU64> {\n        self.current_block.clone()\n    }\n    fn epoch_ref(&self) -> Arc<AtomicU64> {\n        self.current_epoch.clone()\n    }\n}\n\nimpl TimeClock {\n    /// Create a new TimeClock instance based on a genesis datatime and an epoch duration.\n    pub fn new(genesis: DateTime<Utc>, epoch_duration: u64) -> Result<Self, Error> {\n        let mut clock = Self {\n            genesis,\n            current_block: Arc::new(AtomicU64::new(0)),\n            epoch_duration,\n            current_epoch: Arc::new(AtomicU64::new(0)),\n        };\n\n        clock.compute_block();\n        clock.compute_epoch();\n\n        Ok(clock)\n    }\n\n    async fn run(&mut self, sender: broadcast::Sender<Event>) {\n        let mut interval = interval_at(Instant::now(), Duration::from_secs(1));\n        loop {\n            interval.tick().await;\n\n            let _previous_block = self.current_block.fetch_add(1, Ordering::Relaxed);\n\n            if self.current_block.load(Ordering::Relaxed) % self.epoch_duration == 0 {\n                self.compute_epoch();\n                _ = sender.send(Event::EpochChange(\n                    self.current_epoch.load(Ordering::Relaxed),\n                ));\n            }\n        }\n    }\n\n    fn compute_block(&mut self) {\n        let blocks = std::cmp::max(\n            Utc::now()\n                .naive_utc()\n                .signed_duration_since(self.genesis.naive_utc())\n                .num_seconds(),\n            0,\n        ) as u64;\n\n        self.current_block.store(blocks, Ordering::Relaxed);\n    }\n\n    fn compute_epoch(&mut self) {\n        self.current_epoch.store(\n            self.current_block.load(Ordering::Relaxed) / self.epoch_duration,\n            Ordering::Relaxed,\n        );\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use chrono::{Duration, Utc};\n\n    use crate::{Clock, Event, TimeClock};\n\n    #[tokio::test]\n    async fn test_time_clock() {\n        let genesis = Utc::now()\n            .checked_sub_signed(Duration::seconds(30))\n            .unwrap();\n\n        let clock = TimeClock::new(genesis, 5).unwrap();\n        let current_block = clock.block_ref();\n        let current_epoch = clock.epoch_ref();\n\n        let mut recv = clock.spawn().unwrap();\n\n        assert_eq!(recv.recv().await, Ok(Event::EpochChange(7)));\n        assert_eq!(current_epoch.load(std::sync::atomic::Ordering::Relaxed), 7);\n        assert!(current_block.load(std::sync::atomic::Ordering::Relaxed) >= 30);\n    }\n\n    #[tokio::test]\n    async fn test_time_clock_catchup() {\n        let genesis = Utc::now()\n            .checked_sub_signed(Duration::seconds(30))\n            .unwrap();\n\n        let clock = TimeClock::new(genesis, 2).unwrap();\n        let current_block = clock.block_ref();\n        let current_epoch = clock.epoch_ref();\n\n        let mut recv = clock.spawn().unwrap();\n\n        assert_eq!(recv.recv().await, Ok(Event::EpochChange(16)));\n        assert!(recv.try_recv().is_err());\n        assert_eq!(current_epoch.load(std::sync::atomic::Ordering::Relaxed), 16);\n        assert!(current_block.load(std::sync::atomic::Ordering::Relaxed) >= 30);\n        tokio::time::sleep(std::time::Duration::from_secs(5)).await;\n\n        assert_eq!(recv.recv().await, Ok(Event::EpochChange(17)));\n        assert_eq!(recv.recv().await, Ok(Event::EpochChange(18)));\n\n        assert_eq!(current_epoch.load(std::sync::atomic::Ordering::Relaxed), 18);\n        assert!(current_block.load(std::sync::atomic::Ordering::Relaxed) >= 35);\n    }\n}\n"
  },
  {
    "path": "crates/topos-config/Cargo.toml",
    "content": "[package]\nname = \"topos-config\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html\n\n[dependencies]\ntopos-p2p = { path = \"../topos-p2p\" }\ntopos-core = { path = \"../topos-core\" }\ntopos-wallet = { path = \"../topos-wallet\" }\n\nasync-stream.workspace = true\nasync-trait.workspace = true\nclap.workspace = true\nhex.workspace = true\nfutures.workspace = true\nopentelemetry.workspace = true\nserde.workspace = true\nserde_json.workspace = true\ntokio = { workspace = true, features = [\"full\"] }\ntokio-util.workspace = true\ntonic.workspace = true\ntower.workspace = true\ntracing = { workspace = true, features = [\"log\"] }\ntracing-opentelemetry.workspace = true\ntracing-subscriber = { workspace = true, features = [\"env-filter\", \"json\", \"ansi\", \"fmt\"] }\nuuid.workspace = true\nrand.workspace = true\nreqwest.workspace = true\nthiserror.workspace = true\nopentelemetry-otlp = { workspace = true, features = [\"grpc-tonic\", \"metrics\", \"tls-roots\"] }\nfigment = { version = \"0.10\", features = [\"yaml\", \"toml\", \"env\"] }\ndirs = \"5.0\"\ntracing-log = { version = \"0.1.3\", features = [\"env_logger\"] }\ntar = \"0.4.38\"\nflate2 =\"1.0.26\"\nurl = \"2.3.1\"\nonce_cell = \"1.17.1\"\ntoml = \"0.7.4\"\nregex = \"1\"\nrlp = \"0.5.1\"\nopenssl = { version = \"0.10.61\", features = [\"vendored\"] }\n\n[dev-dependencies]\ntopos-tce-broadcast = { path = \"../topos-tce-broadcast\" }\ntopos-tce-synchronizer = { path = \"../topos-tce-synchronizer\" }\ntopos-tce-gatekeeper = { path = \"../topos-tce-gatekeeper\" }\ntopos-tce-api = { path = \"../topos-tce-api\" }\ntopos-tce-storage = { path = \"../topos-tce-storage\" }\ntopos-test-sdk = { path = \"../topos-test-sdk\" }\nserde.workspace = true\nserde_json.workspace = true\ntest-log.workspace = true\nenv_logger.workspace = true\nrand.workspace = true\nfutures.workspace = true\nlibp2p = { workspace = true, features = [\"identify\"] }\nassert_cmd = \"2.0.6\"\ninsta = { version = \"1.21\", features = [\"json\", \"redactions\"] }\nrstest = { workspace = true, features = [\"async-timeout\"] }\ntempfile = \"3.8.0\"\npredicates = \"3.0.3\"\nsysinfo = \"0.29.11\"\nserial_test = {version = \"0.9.0\"}\n\n\n\n[lints]\nworkspace = true\n"
  },
  {
    "path": "crates/topos-config/assets/genesis-example.json",
    "content": "{\n    \"name\": \"polygon-edge\",\n    \"genesis\": {\n        \"nonce\": \"0x0000000000000000\",\n        \"timestamp\": \"0x0\",\n        \"extraData\": \"0x0000000000000000000000000000000000000000000000000000000000000000f90129f90120f84694100d617e4392c02b31bdce650b26b6c0c3e04f95b0ae7711044926a23c1462754cbb0d1b43fb91fc8fd18bf5c81edb4de15124203157657bf7a8e86d9a3be5f32de725f3c4f8469492183cff18a1328e7d791d607589a15d9eee4bc4b0b45118f9e430d94f424019bb8702e004db5dad5725ab1a5346b0aaad556935189c47df5e401988527ce880bb1e2492cef84694b4973cdb10894d1d1547673bd758589034c2bba5b0b9833912ee2eab270a1204f3f9e58c5f2be603cc2ce32f5467e2a8246bb6b25a7908b39a8a0ed629a689da376b5cdd2df84694c16d83893cb61872206d4e271b813015d3242d94b0a468068169523df684362de6a5b729c8db400958bfd4f6d4e3646cc640f3d241253a21b00f05ff97545f535b36b31c7b80c28080c080\",\n        \"gasLimit\": \"0x500000\",\n        \"difficulty\": \"0x1\",\n        \"mixHash\": \"0x0000000000000000000000000000000000000000000000000000000000000000\",\n        \"coinbase\": \"0x0000000000000000000000000000000000000000\",\n        \"alloc\": {\n            \"0x100D617E4392C02B31bdCe650b26b6c0c3E04F95\": {\n                \"balance\": \"0x3b9aca00\"\n            },\n            \"0x92183Cff18A1328E7d791D607589A15d9EeE4bC4\": {\n                \"balance\": \"0x3b9aca00\"\n            },\n            \"0xB4973Cdb10894D1D1547673bD758589034C2BBa5\": {\n                \"balance\": \"0x3b9aca00\"\n            },\n            \"0xC16d83893cB61872206D4e271B813015D3242d94\": {\n                \"balance\": \"0x3b9aca00\"\n            }\n        },\n        \"number\": \"0x0\",\n        \"gasUsed\": \"0x70000\",\n        \"parentHash\": \"0x0000000000000000000000000000000000000000000000000000000000000000\",\n        \"baseFee\": \"0x0\",\n        \"baseFeeEM\": \"0x0\"\n    },\n    \"params\": {\n        \"chainID\": 100,\n        \"engine\": {\n            \"ibft\": {\n                \"blockTime\": 6000000000,\n                \"epochSize\": 30000,\n                \"type\": \"PoA\",\n                \"validator_type\": \"bls\"\n            }\n        },\n        \"blockGasTarget\": 0,\n        \"burnContract\": null,\n        \"burnContractDestinationAddress\": \"0x0000000000000000000000000000000000000000\"\n    },\n    \"bootnodes\": [\n        \"/ip4/10.101.192.110/tcp/10001/p2p/16Uiu2HAkxKTnwPL3eZmFkiKYJiG3um9uraPh21XhsJEPNm8juhy3\",\n        \"/ip4/10.101.232.59/tcp/10001/p2p/16Uiu2HAmQfaE4bjJMVCwzigAUgp9eLcGQz8HZpURqEPNjtfDwge8\",\n        \"/ip4/10.101.208.7/tcp/10001/p2p/16Uiu2HAm5tS7AdBhhtQ2JuTtyy2U4uFqsiW57vof5fiyHPRpwboD\",\n        \"/ip4/10.101.210.76/tcp/10001/p2p/16Uiu2HAmFjGqEUYSnKkoqURRu8bZaWpuGdxBBqW15KFs4BsxFxdp\"\n    ]\n}\n\n"
  },
  {
    "path": "crates/topos-config/src/base.rs",
    "content": "use std::path::Path;\n\nuse figment::{\n    providers::{Format, Toml},\n    Figment,\n};\nuse serde::{Deserialize, Serialize};\n\nuse crate::node::NodeRole;\nuse crate::Config;\n\n#[derive(Serialize, Deserialize, Debug, Clone)]\n#[serde(rename_all = \"kebab-case\")]\npub struct BaseConfig {\n    #[serde(default = \"default_name\")]\n    pub name: String,\n\n    #[serde(default = \"default_role\")]\n    pub role: NodeRole,\n\n    #[serde(default = \"default_subnet\")]\n    pub subnet: String,\n\n    #[serde(default = \"default_secrets_config\")]\n    pub secrets_config: Option<String>,\n}\n\nfn default_name() -> String {\n    \"default\".to_string()\n}\n\nfn default_role() -> NodeRole {\n    NodeRole::Validator\n}\n\nfn default_subnet() -> String {\n    \"topos\".to_string()\n}\n\nfn default_secrets_config() -> Option<String> {\n    None\n}\n\nimpl BaseConfig {\n    pub fn need_tce(&self) -> bool {\n        self.subnet == \"topos\"\n    }\n\n    pub fn need_sequencer(&self) -> bool {\n        matches!(self.role, NodeRole::Sequencer)\n    }\n\n    pub fn need_edge(&self) -> bool {\n        true\n    }\n}\n\nimpl Config for BaseConfig {\n    type Output = Self;\n\n    fn load_from_file(figment: Figment, home: &Path) -> Figment {\n        let home = home.join(\"config.toml\");\n\n        let base = Figment::new()\n            .merge(Toml::file(home).nested())\n            .select(\"base\");\n\n        figment.merge(base)\n    }\n\n    fn load_context(figment: Figment) -> Result<Self::Output, figment::Error> {\n        figment.extract()\n    }\n\n    fn profile() -> String {\n        \"base\".to_string()\n    }\n}\n"
  },
  {
    "path": "crates/topos-config/src/edge/command.rs",
    "content": "use serde_json::Value;\nuse std::collections::HashMap;\nuse std::os::unix::prelude::ExitStatusExt;\nuse std::path::{Path, PathBuf};\nuse std::process::{ExitStatus, Stdio};\nuse tokio::{\n    io::{AsyncBufReadExt, BufReader},\n    process::Command,\n};\nuse tracing::debug;\nuse tracing::{error, info, warn};\n\npub const BINARY_NAME: &str = \"polygon-edge\";\n\npub struct CommandConfig {\n    binary_path: PathBuf,\n    args: Vec<String>,\n}\n\nimpl CommandConfig {\n    pub fn new(binary_path: PathBuf) -> Self {\n        let binary_path = if binary_path == PathBuf::from(\".\") {\n            std::env::current_dir()\n                .expect(\"Cannot get the current directory\")\n                .join(BINARY_NAME)\n        } else {\n            binary_path\n        };\n\n        CommandConfig {\n            binary_path,\n            args: Vec::new(),\n        }\n    }\n\n    pub fn init(mut self, path: &Path) -> Self {\n        self.args.push(\"secrets\".into());\n        self.args.push(\"init\".into());\n        self.args.push(\"--insecure\".into());\n        self.args.push(\"--data-dir\".into());\n        self.args.push(format!(\"{}\", path.display()));\n        self\n    }\n\n    pub fn server(\n        mut self,\n        data_dir: &Path,\n        genesis_path: &Path,\n        edge_args: HashMap<String, String>,\n    ) -> Self {\n        self.args.push(\"server\".into());\n        self.args.push(\"--data-dir\".into());\n        self.args.push(format!(\"{}\", data_dir.display()));\n        self.args.push(\"--chain\".into());\n        self.args.push(format!(\"{}\", genesis_path.display()));\n        self.args.push(\"--json\".into());\n\n        for (k, v) in &edge_args {\n            self.args.push(format!(\"--{k}\"));\n            self.args.push(v.to_string());\n        }\n\n        self\n    }\n\n    pub async fn spawn(self) -> Result<ExitStatus, std::io::Error> {\n        info!(\n            \"Spawning Polygon Edge binary located at: {:?}, args: {:?}\",\n            self.binary_path, self.args\n        );\n        let mut command = Command::new(self.binary_path);\n        command.kill_on_drop(true);\n        command.args(self.args);\n\n        let mut child = command\n            .stderr(Stdio::piped())\n            .stdout(Stdio::piped())\n            .stdin(Stdio::piped())\n            .spawn()?;\n\n        if let Some(pid) = child.id() {\n            info!(\"Polygon Edge child process with pid {pid} successfully started\");\n        }\n\n        let stdout = child\n            .stderr\n            .take()\n            .expect(\"child did not have a handle to stdout\");\n        let mut reader = BufReader::new(stdout).lines();\n\n        let running = async { child.wait().await };\n\n        let logging = async {\n            while let Ok(line) = reader.next_line().await {\n                match line {\n                    Some(l) => match serde_json::from_str(&l) {\n                        Ok(v) => EdgeLog::new(v).log(),\n                        Err(_) => println!(\"{l}\"),\n                    },\n                    None => break,\n                }\n            }\n        };\n\n        let (running_out, _) = tokio::join!(running, logging);\n\n        let exit_status = running_out?;\n\n        info!(\n            \"The Edge process is terminated with exit status {:?}; exit code: {:?}, exit signal \\\n             {:?}, success: {:?}, raw code: {}\",\n            exit_status,\n            exit_status.code(),\n            exit_status.signal(),\n            exit_status.success(),\n            exit_status.into_raw(),\n        );\n        Ok(exit_status)\n    }\n}\n\npub struct EdgeLog {\n    v: HashMap<String, Value>,\n}\n\nimpl EdgeLog {\n    pub fn new(v: HashMap<String, Value>) -> Self {\n        Self { v }\n    }\n\n    pub fn log(&mut self) {\n        match self.v.get(\"@level\") {\n            Some(level) => match level.as_str() {\n                Some(r#\"info\"#) => info!(\"{}\", self.internal()),\n                Some(r#\"warn\"#) => warn!(\"{}\", self.internal()),\n                Some(r#\"debug\"#) => debug!(\"{}\", self.internal()),\n                Some(r#\"error\"#) => error!(\"{}\", self.internal()),\n                _ => error!(\"log parse failure: {:?}\", self.v),\n            },\n            None => error!(\"{:?}\", self.v.get(\"error\")),\n        }\n    }\n\n    fn internal(&mut self) -> String {\n        let module = self.v.remove(\"@module\").unwrap();\n        let message = self.v.remove(\"@message\").unwrap();\n\n        // FIXME: Figure out tracing features to make this nicer\n        self.v.remove(\"@timestamp\");\n        self.v.remove(\"@level\");\n\n        let mut message = format!(\"{module}: {message}\");\n\n        for (k, s) in &self.v {\n            message = format!(\"{} {}:{}\", message, k, s);\n        }\n\n        message\n    }\n}\n"
  },
  {
    "path": "crates/topos-config/src/edge.rs",
    "content": "use crate::{edge::command::CommandConfig, Config};\nuse figment::{\n    providers::{Format, Toml},\n    Figment,\n};\nuse serde::{Deserialize, Serialize};\nuse std::{\n    collections::HashMap,\n    path::{Path, PathBuf},\n    process::ExitStatus,\n};\nuse tokio::{spawn, task::JoinHandle};\nuse tracing::{error, info};\n\nuse self::command::BINARY_NAME;\n\n// TODO: Provides the default arguments here\n// Serde `flatten` and `default` doesn't work together yet\n// https://github.com/serde-rs/serde/issues/1626\n#[derive(Serialize, Deserialize, Debug, Clone)]\n#[serde(rename_all = \"kebab-case\")]\npub struct EdgeConfig {\n    #[serde(flatten)]\n    pub args: HashMap<String, String>,\n}\n\nimpl Config for EdgeConfig {\n    type Output = EdgeConfig;\n\n    fn load_from_file(figment: Figment, home: &Path) -> Figment {\n        let home = home.join(\"config.toml\");\n\n        let edge = Figment::new()\n            .merge(Toml::file(home).nested())\n            .select(\"edge\");\n\n        figment.merge(edge)\n    }\n\n    fn load_context(figment: Figment) -> Result<Self::Output, figment::Error> {\n        figment.extract()\n    }\n\n    fn profile() -> String {\n        \"edge\".to_string()\n    }\n}\n\n#[derive(Serialize, Deserialize, Debug, Clone)]\n#[serde(rename_all = \"kebab-case\")]\npub struct EdgeBinConfig {\n    pub edge_path: PathBuf,\n}\n\nimpl EdgeBinConfig {\n    pub fn binary_path(&self) -> PathBuf {\n        self.edge_path.join(BINARY_NAME)\n    }\n}\n\nimpl Config for EdgeBinConfig {\n    type Output = EdgeBinConfig;\n\n    fn load_from_file(figment: Figment, home: &Path) -> Figment {\n        let home = home.join(\"config.toml\");\n\n        let edge = Figment::new()\n            .merge(Toml::file(home).nested())\n            .select(\"edge\");\n\n        figment.merge(edge)\n    }\n\n    fn load_context(figment: Figment) -> Result<Self::Output, figment::Error> {\n        figment.extract()\n    }\n\n    fn profile() -> String {\n        \"edge\".to_string()\n    }\n}\npub mod command;\n\npub fn generate_edge_config(\n    edge_path: PathBuf,\n    config_path: PathBuf,\n) -> JoinHandle<Result<ExitStatus, std::io::Error>> {\n    // Create the Polygon Edge config\n    info!(\"Generating the configuration at {config_path:?}\");\n    info!(\"Polygon-edge binary located at: {edge_path:?}\");\n    spawn(async move {\n        CommandConfig::new(edge_path)\n            .init(&config_path)\n            .spawn()\n            .await\n            .map_err(|e| {\n                error!(\"Failed to generate the edge configuration: {e:?}\");\n                e\n            })\n    })\n}\n"
  },
  {
    "path": "crates/topos-config/src/genesis/mod.rs",
    "content": "use rlp::Rlp;\nuse std::collections::HashSet;\nuse std::str::FromStr;\nuse std::{fs, path::PathBuf};\n\nuse serde_json::Value;\nuse topos_core::types::ValidatorId;\nuse topos_p2p::{Multiaddr, PeerId};\nuse tracing::info;\n\nuse crate::node::NodeConfig;\n\n#[cfg(test)]\npub(crate) mod tests;\n\n/// From the Edge format\npub struct Genesis {\n    pub json: Value,\n}\n\n#[derive(Debug, thiserror::Error)]\npub enum Error {\n    #[error(\"Failed to parse validators\")]\n    ParseValidators,\n\n    #[error(\"Invalid genesis file on path {0}: {1}\")]\n    InvalidGenesisFile(String, String),\n}\n\nimpl Genesis {\n    pub fn new(path: &PathBuf) -> Result<Self, Error> {\n        info!(\"Reading subnet genesis file {}\", path.display());\n        let genesis_file = fs::File::open(path)\n            .map_err(|e| Error::InvalidGenesisFile(path.display().to_string(), e.to_string()))?;\n\n        let json: Value = serde_json::from_reader(genesis_file).expect(\"genesis json parsed\");\n\n        Ok(Self { json })\n    }\n\n    // TODO: parse directly with serde\n    pub fn boot_peers(&self, port: Option<u16>) -> Vec<(PeerId, Multiaddr)> {\n        match self.json[\"bootnodes\"].as_array() {\n            Some(v) => v\n                .iter()\n                .map(|bootnode| {\n                    let (multiaddr, peerid) =\n                        bootnode.as_str().unwrap().rsplit_once(\"/p2p/\").unwrap();\n\n                    // Extract the Edge port from the genesis file\n                    let (multiaddr, edge_port) = multiaddr.rsplit_once('/').unwrap();\n\n                    // Use the given port instead if any\n                    let port = port.map_or(edge_port.to_string(), |p| p.to_string());\n\n                    let multiaddr = format!(\"{multiaddr}/{port}\");\n                    (peerid.parse().unwrap(), multiaddr.parse().unwrap())\n                })\n                .collect::<Vec<_>>(),\n            None => Vec::default(),\n        }\n    }\n\n    /// Parse the validators from the `extraData` field of the genesis file.\n    /// The `extraData` is padded with 32 bytes, and the validators are RLP encoded.\n    /// Each validator is 20 bytes, with a SEAL at the end of the whole list (8 bytes)\n    pub fn validators(&self) -> Result<HashSet<ValidatorId>, Error> {\n        let extra_data = self.json[\"genesis\"][\"extraData\"]\n            .as_str()\n            .expect(\"The extraData field must be present. Bad genesis file?\")\n            .to_string();\n\n        // Define constants for the prefix size and validator size\n        const VANITY_SIZE: usize = 32;\n\n        // Remove the \"0x\" prefix from the hex string\n        let hex_string = &extra_data[2..];\n\n        // Convert the hex string to bytes\n        let bytes = hex::decode(hex_string).expect(\"Failed to decode hex string\");\n\n        // Slice the bytes to get the validators data\n        let validators_data = &bytes[VANITY_SIZE..];\n\n        // Create an Rlp object from the validators data\n        let rlp = Rlp::new(validators_data);\n\n        // Get the first Rlp item (index 0) and iterate over its items\n        let first_item = rlp.at(0).expect(\"Failed to get first RLP item\");\n        let item_count = first_item\n            .item_count()\n            .expect(\"Validators must be an RLP list. Bad genesis file?\");\n        first_item.into_iter().try_fold(\n            HashSet::with_capacity(item_count),\n            |mut validator_public_keys, validator_rlp| {\n                if let Ok(public_key) = validator_rlp.data() {\n                    let address = format!(\"0x{}\", hex::encode(&public_key[1..=20]));\n                    validator_public_keys.insert(\n                        ValidatorId::from_str(address.as_str())\n                            .map_err(|_| Error::ParseValidators)?,\n                    );\n                }\n                Ok(validator_public_keys)\n            },\n        )\n    }\n}\n\nimpl TryFrom<&NodeConfig> for Genesis {\n    type Error = Error;\n\n    fn try_from(config: &NodeConfig) -> Result<Self, Self::Error> {\n        Genesis::new(&config.genesis_path)\n    }\n}\n"
  },
  {
    "path": "crates/topos-config/src/genesis/tests.rs",
    "content": "use rstest::fixture;\nuse rstest::rstest;\nuse std::str::FromStr;\nuse topos_core::types::ValidatorId;\n\nuse super::Genesis;\n\nmacro_rules! test_case {\n    ($fname:expr) => {\n        concat!(env!(\"CARGO_MANIFEST_DIR\"), \"/assets/\", $fname)\n    };\n}\n\n#[fixture]\n#[once]\npub fn genesis() -> Genesis {\n    Genesis::new(&test_case!(\"genesis-example.json\").into())\n        .expect(\"Expected valid test genesis file\")\n}\n\n#[rstest]\npub fn test_correct_validator_count(genesis: &Genesis) {\n    let validators = genesis.validators().unwrap();\n    assert_eq!(validators.len(), 4);\n}\n\n#[rstest]\npub fn test_parse_bootnodes(genesis: &Genesis) {\n    let bootnodes = genesis.boot_peers(None);\n\n    assert_eq!(4, bootnodes.len());\n}\n\n#[rstest]\npub fn test_extract_validators(genesis: &Genesis) {\n    let validators = genesis.validators().unwrap();\n\n    let first = ValidatorId::from_str(\"0x100d617e4392c02b31bdce650b26b6c0c3e04f95\").unwrap();\n    let second = ValidatorId::from_str(\"0x92183cff18a1328e7d791d607589a15d9eee4bc4\").unwrap();\n    let third = ValidatorId::from_str(\"0xb4973cdb10894d1d1547673bd758589034c2bba5\").unwrap();\n    let fourth = ValidatorId::from_str(\"0xc16d83893cb61872206d4e271b813015d3242d94\").unwrap();\n\n    assert_eq!(validators.get(&first), Some(&first));\n    assert_eq!(validators.get(&second), Some(&second));\n    assert_eq!(validators.get(&third), Some(&third));\n    assert_eq!(validators.get(&fourth), Some(&fourth));\n}\n"
  },
  {
    "path": "crates/topos-config/src/lib.rs",
    "content": "pub(crate) mod base;\npub mod edge;\npub mod genesis;\npub mod node;\npub mod sequencer;\npub mod tce;\n\nuse std::path::Path;\n\nuse figment::providers::Serialized;\nuse figment::{error::Kind, Figment};\nuse serde::Serialize;\n\npub trait Config: Serialize {\n    /// The configuration type returned (should be Self).\n    type Output;\n\n    /// Load the configuration from a file or multiple files.\n    /// The home is the directory where the configuration files are located.\n    /// For node, it is the `node` directory in the $TOPOS_HOME directory.\n    fn load_from_file(figment: Figment, home: &Path) -> Figment;\n\n    /// Load the configuration from the context.\n    /// Trying to extract the configuration from the figment context.\n    fn load_context(figment: Figment) -> Result<Self::Output, figment::Error>;\n\n    /// Return the profile name of the configuration to be used\n    /// when generating the file.\n    fn profile() -> String;\n\n    /// Convert the configuration to a TOML table.\n    fn to_toml(&self) -> Result<toml::Table, toml::ser::Error> {\n        let mut config_toml = toml::Table::new();\n\n        let config = toml::Table::try_from(self)?;\n\n        // Flatten the top level\n        for (profile, content) in config {\n            config_toml.insert(profile, content);\n        }\n\n        Ok(config_toml)\n    }\n\n    /// Main function to load the configuration.\n    /// It will load the configuration from the file and an optional existing struct (if any)\n    /// and then extract the configuration from the context in order to build the Config.\n    /// The Config is then returned or an error if the configuration is not valid.\n    fn load<S: Serialize>(home: &Path, config: Option<&S>) -> Result<Self::Output, figment::Error> {\n        let mut figment = Figment::new();\n\n        figment = Self::load_from_file(figment, home);\n\n        if let Some(config) = config {\n            figment = figment.merge(Serialized::from(config, Self::profile()))\n        }\n\n        Self::load_context(figment)\n    }\n}\n\npub(crate) fn load_config<T: Config, S: Serialize>(\n    node_path: &Path,\n    config: Option<&S>,\n) -> T::Output {\n    match T::load(node_path, config) {\n        Ok(config) => config,\n        Err(figment::Error {\n            kind: Kind::MissingField(name),\n            ..\n        }) => {\n            println!(\"Missing field: {}\", name);\n            std::process::exit(1);\n        }\n        Err(e) => {\n            println!(\"Failed to load config: {e}\");\n            std::process::exit(1);\n        }\n    }\n}\n"
  },
  {
    "path": "crates/topos-config/src/node.rs",
    "content": "use std::path::{Path, PathBuf};\n\nuse figment::{\n    providers::{Format, Toml},\n    Figment,\n};\n\nuse serde::{Deserialize, Serialize};\nuse topos_wallet::SecretManager;\nuse tracing::{debug, error};\n\nuse crate::{\n    base::BaseConfig,\n    edge::{EdgeBinConfig, EdgeConfig},\n    load_config,\n    sequencer::SequencerConfig,\n    tce::TceConfig,\n    Config,\n};\n\n#[derive(clap::ValueEnum, Clone, Debug, Deserialize, Serialize)]\n#[serde(rename_all = \"lowercase\")]\npub enum NodeRole {\n    Validator,\n    Sequencer,\n    FullNode,\n}\n\n#[derive(Serialize, Deserialize, Debug)]\npub struct NodeConfig {\n    pub base: BaseConfig,\n    pub tce: Option<TceConfig>,\n    pub sequencer: Option<SequencerConfig>,\n    pub edge: Option<EdgeConfig>,\n\n    #[serde(skip)]\n    pub home_path: PathBuf,\n\n    #[serde(skip)]\n    pub node_path: PathBuf,\n\n    #[serde(skip)]\n    pub genesis_path: PathBuf,\n\n    #[serde(skip)]\n    pub edge_bin: Option<EdgeBinConfig>,\n}\n\nimpl NodeConfig {\n    /// Try to create a new node config struct from the given home path and node name.\n    /// It expects a config file to be present in the node's folder.\n    ///\n    /// This `config.toml` can be generated using: `topos node init` command\n    pub fn try_from<S: Serialize>(\n        home_path: &Path,\n        node_name: &str,\n        config: Option<&S>,\n    ) -> Result<Self, std::io::Error> {\n        let node_path = home_path.join(\"node\").join(node_name);\n        let config_path = node_path.join(\"config.toml\");\n\n        // TODO: Move this to `topos-node` when migrated\n        if !Path::new(&config_path).exists() {\n            error!(\n                \"Please run 'topos node init --name {node_name}' to create a config file first \\\n                 for {node_name}.\"\n            );\n            std::process::exit(1);\n        }\n\n        Ok(Self::build_config(node_path, home_path, config))\n    }\n\n    /// Create a new node config struct from the given home path and node name.\n    ///\n    /// It doesn't check the existence of the config file.\n    /// It's useful for creating a config file for a new node, relying on the default values.\n    pub fn create<S: Serialize>(home_path: &Path, node_name: &str, config: Option<&S>) -> Self {\n        let node_path = home_path.join(\"node\").join(node_name);\n\n        Self::build_config(node_path, home_path, config)\n    }\n\n    /// Common function to build a node config struct from the given home path and node name.\n    fn build_config<S: Serialize>(\n        node_path: PathBuf,\n        home_path: &Path,\n        config: Option<&S>,\n    ) -> Self {\n        let node_folder = node_path.as_path();\n        let base = load_config::<BaseConfig, _>(node_folder, config);\n\n        // Load genesis pointed by the local config\n        let genesis_path = home_path\n            .join(\"subnet\")\n            .join(base.subnet.clone())\n            .join(\"genesis.json\");\n\n        let mut config = NodeConfig {\n            node_path: node_path.to_path_buf(),\n            genesis_path,\n            home_path: home_path.to_path_buf(),\n            base: base.clone(),\n            sequencer: base\n                .need_sequencer()\n                .then(|| load_config::<SequencerConfig, ()>(node_folder, None)),\n            tce: base\n                .need_tce()\n                .then(|| load_config::<TceConfig, ()>(node_folder, None)),\n            edge_bin: base\n                .need_edge()\n                .then(|| load_config::<EdgeBinConfig, _>(node_folder, config)),\n            edge: base\n                .need_edge()\n                .then(|| load_config::<EdgeConfig, ()>(node_folder, None)),\n        };\n\n        // Make the TCE DB path relative to the folder\n        if let Some(config) = config.tce.as_mut() {\n            config.db_path = node_folder.join(&config.db_path);\n            debug!(\n                \"Maked TCE DB path relative to the node folder -> {:?}\",\n                config.db_path\n            );\n        }\n\n        config\n    }\n}\n\nimpl Config for NodeConfig {\n    type Output = NodeConfig;\n\n    fn load_from_file(figment: Figment, home: &Path) -> Figment {\n        let home = home.join(\"config.toml\");\n\n        figment.merge(Toml::file(home))\n    }\n\n    fn load_context(figment: Figment) -> Result<Self::Output, figment::Error> {\n        figment.extract()\n    }\n\n    fn profile() -> String {\n        \"default\".to_string()\n    }\n}\n\nimpl From<&NodeConfig> for SecretManager {\n    fn from(val: &NodeConfig) -> Self {\n        match val.base.secrets_config.as_ref() {\n            Some(secrets_config) => SecretManager::from_aws(secrets_config),\n            None => SecretManager::from_fs(val.node_path.clone()),\n        }\n    }\n}\n"
  },
  {
    "path": "crates/topos-config/src/sequencer.rs",
    "content": "use std::path::Path;\n\nuse crate::Config;\nuse figment::{\n    providers::{Format, Toml},\n    Figment,\n};\nuse serde::{Deserialize, Serialize};\n\n#[derive(Serialize, Deserialize, Debug, Clone)]\n#[serde(rename_all = \"kebab-case\")]\npub struct SequencerConfig {\n    /// SubnetId of your Sequencer, hex encoded 32 bytes prefixed with 0x\n    pub subnet_id: Option<String>,\n\n    /// JSON-RPC endpoint of the Edge node, websocket and http support expected\n    /// If the endpoint address starts with `https`, ssl will be used with http/websocket\n    #[serde(default = \"default_subnet_jsonrpc_endpoint\")]\n    pub subnet_jsonrpc_http: String,\n\n    // Optional explicit websocket endpoint for the subnet jsonrpc api. If this parameter is not provided,\n    // it will be derived from the `subnet_jsonrpc_http`.\n    // Full uri value is expected, e.g. `wss://arbitrum.infura.com/v3/ws/mykey` or `ws://127.0.0.1/ws`\n    pub subnet_jsonrpc_ws: Option<String>,\n\n    /// Address where the Topos Core contract is deployed\n    #[serde(default = \"default_subnet_contract_address\")]\n    pub subnet_contract_address: String,\n\n    /// gRPC API endpoint of one TCE process\n    #[serde(default = \"default_tce_grpc_endpoint\")]\n    pub tce_grpc_endpoint: String,\n\n    /// OTLP agent endpoint, not used if not provided\n    pub otlp_agent: Option<String>,\n\n    /// OTLP service name, not used if not provided\n    pub otlp_service_name: Option<String>,\n\n    /// Start synchronizing from particular block number\n    /// Default is to sync from genesis block (0)\n    pub start_block: Option<u64>,\n}\n\nfn default_subnet_jsonrpc_endpoint() -> String {\n    \"127.0.0.1:8545\".to_string()\n}\n\nfn default_subnet_contract_address() -> String {\n    \"0x0000000000000000000000000000000000000000\".to_string()\n}\n\nfn default_tce_grpc_endpoint() -> String {\n    \"http://[::1]:1340\".to_string()\n}\n\nimpl Config for SequencerConfig {\n    type Output = Self;\n\n    fn load_from_file(figment: Figment, home: &Path) -> Figment {\n        let home = home.join(\"config.toml\");\n\n        let sequencer = Figment::new()\n            .merge(Toml::file(home).nested())\n            .select(\"sequencer\");\n\n        figment.merge(sequencer)\n    }\n\n    fn load_context(figment: Figment) -> Result<Self::Output, figment::Error> {\n        figment.extract()\n    }\n\n    fn profile() -> String {\n        \"sequencer\".to_string()\n    }\n}\n"
  },
  {
    "path": "crates/topos-config/src/tce/broadcast.rs",
    "content": "use serde::{Deserialize, Serialize};\n\n/// Broadcast threshold configurations\n#[derive(Clone, Debug, Default, Deserialize, Serialize)]\npub struct ReliableBroadcastParams {\n    /// Echo threshold\n    pub echo_threshold: usize,\n    /// Ready threshold\n    pub ready_threshold: usize,\n    /// Delivery threshold\n    pub delivery_threshold: usize,\n}\n\nimpl ReliableBroadcastParams {\n    pub const fn new(n: usize) -> Self {\n        let f: usize = n / 3;\n\n        Self {\n            echo_threshold: 1 + (n + f) / 2,\n            ready_threshold: 1 + f,\n            delivery_threshold: 2 * f + 1,\n        }\n    }\n}\n"
  },
  {
    "path": "crates/topos-config/src/tce/p2p.rs",
    "content": "use std::net::SocketAddr;\n\nuse serde::{Deserialize, Serialize};\nuse topos_p2p::Multiaddr;\n\nuse super::DEFAULT_IP;\n\n#[derive(Serialize, Deserialize, Debug, Clone)]\n#[serde(rename_all = \"kebab-case\")]\npub struct P2PConfig {\n    /// List of multiaddresses to listen for incoming connections\n    #[serde(default = \"default_listen_addresses\")]\n    pub listen_addresses: Vec<Multiaddr>,\n    /// List of multiaddresses to advertise to the network\n    #[serde(default = \"default_public_addresses\")]\n    pub public_addresses: Vec<Multiaddr>,\n\n    #[serde(skip)]\n    pub is_bootnode: bool,\n}\n\nimpl Default for P2PConfig {\n    fn default() -> Self {\n        Self {\n            listen_addresses: default_listen_addresses(),\n            public_addresses: default_public_addresses(),\n            is_bootnode: false,\n        }\n    }\n}\n\nconst fn default_libp2p_api_addr() -> SocketAddr {\n    SocketAddr::V4(std::net::SocketAddrV4::new(DEFAULT_IP, 9090))\n}\n\nfn default_listen_addresses() -> Vec<Multiaddr> {\n    vec![format!(\n        \"/ip4/{}/tcp/{}\",\n        default_libp2p_api_addr().ip(),\n        default_libp2p_api_addr().port()\n    )\n    .parse()\n    .expect(\n        r#\"\n        Listen multiaddresses generation failure.\n        This is a critical bug that need to be report on `https://github.com/topos-protocol/topos/issues`\n    \"#,\n    )]\n}\n\nfn default_public_addresses() -> Vec<Multiaddr> {\n    vec![format!(\n        \"/ip4/{}/tcp/{}\",\n        default_libp2p_api_addr().ip(),\n        default_libp2p_api_addr().port()\n    )\n    .parse()\n    .expect(\n        r#\"\n        Public multiaddresses generation failure.\n        This is a critical bug that need to be report on `https://github.com/topos-protocol/topos/issues`\n    \"#,\n    )]\n}\n"
  },
  {
    "path": "crates/topos-config/src/tce/synchronization.rs",
    "content": "use serde::{Deserialize, Serialize};\n\n/// Configuration for the TCE synchronization\n#[derive(Serialize, Deserialize, Debug, Clone)]\n#[serde(rename_all = \"kebab-case\")]\npub struct SynchronizationConfig {\n    /// Interval in seconds to synchronize the TCE\n    #[serde(default = \"SynchronizationConfig::default_interval_seconds\")]\n    pub interval_seconds: u64,\n\n    /// Maximum number of Proof of delivery per query per subnet\n    #[serde(default = \"SynchronizationConfig::default_limit_per_subnet\")]\n    pub limit_per_subnet: usize,\n}\n\nimpl Default for SynchronizationConfig {\n    fn default() -> Self {\n        Self {\n            interval_seconds: SynchronizationConfig::INTERVAL_SECONDS,\n            limit_per_subnet: SynchronizationConfig::LIMIT_PER_SUBNET,\n        }\n    }\n}\n\nimpl SynchronizationConfig {\n    pub const INTERVAL_SECONDS: u64 = 10;\n    pub const LIMIT_PER_SUBNET: usize = 100;\n\n    const fn default_interval_seconds() -> u64 {\n        Self::INTERVAL_SECONDS\n    }\n\n    const fn default_limit_per_subnet() -> usize {\n        Self::LIMIT_PER_SUBNET\n    }\n}\n"
  },
  {
    "path": "crates/topos-config/src/tce.rs",
    "content": "use std::collections::HashSet;\nuse std::path::Path;\nuse std::{net::SocketAddr, path::PathBuf};\n\nuse figment::{\n    providers::{Format, Toml},\n    Figment,\n};\nuse serde::{Deserialize, Serialize};\nuse topos_core::types::ValidatorId;\nuse topos_p2p::config::NetworkConfig;\n\nuse crate::Config;\nuse topos_p2p::{Multiaddr, PeerId};\n\nuse self::broadcast::ReliableBroadcastParams;\nuse self::p2p::P2PConfig;\nuse self::synchronization::SynchronizationConfig;\n\npub mod broadcast;\npub mod p2p;\npub mod synchronization;\n\nconst DEFAULT_IP: std::net::Ipv4Addr = std::net::Ipv4Addr::new(0, 0, 0, 0);\n\n#[derive(Debug)]\npub enum AuthKey {\n    Seed(Vec<u8>),\n    PrivateKey(Vec<u8>),\n}\n#[derive(Default, Debug)]\npub enum StorageConfiguration {\n    #[default]\n    RAM,\n    RocksDB(Option<PathBuf>),\n}\n\n#[derive(Serialize, Deserialize, Debug)]\n#[serde(rename_all = \"kebab-case\")]\npub struct TceConfig {\n    #[serde(skip)]\n    pub auth_key: Option<AuthKey>,\n    #[serde(skip)]\n    pub signing_key: Option<AuthKey>,\n    #[serde(skip)]\n    pub tce_params: ReliableBroadcastParams,\n    #[serde(skip)]\n    pub boot_peers: Vec<(PeerId, Multiaddr)>,\n    #[serde(skip)]\n    pub validators: HashSet<ValidatorId>,\n    #[serde(skip)]\n    pub storage: StorageConfiguration,\n\n    #[serde(skip)]\n    pub version: &'static str,\n\n    /// Storage database path, if not set RAM storage is used\n    #[serde(default = \"default_db_path\")]\n    pub db_path: PathBuf,\n    /// Array of extra boot nodes to connect to\n    pub extra_boot_peers: Option<String>,\n    /// Connection degree for the GossipSub overlay\n    #[serde(default = \"default_minimum_tce_cluster_size\")]\n    pub minimum_tce_cluster_size: usize,\n\n    /// libp2p addresses\n    pub libp2p_api_addr: Option<SocketAddr>,\n\n    /// P2P configuration\n    #[serde(default)]\n    pub p2p: P2PConfig,\n\n    /// Synchronization configuration\n    #[serde(default)]\n    pub synchronization: SynchronizationConfig,\n\n    /// gRPC API Addr\n    #[serde(default = \"default_grpc_api_addr\")]\n    pub grpc_api_addr: SocketAddr,\n    /// GraphQL API Addr\n    #[serde(default = \"default_graphql_api_addr\")]\n    pub graphql_api_addr: SocketAddr,\n    /// Metrics server API Addr\n    #[serde(default = \"default_metrics_api_addr\")]\n    pub metrics_api_addr: SocketAddr,\n    /// Socket of the opentelemetry agent endpoint\n    /// If not provided open telemetry will not be used\n    pub otlp_agent: Option<String>,\n    /// Otlp service name\n    /// If not provided open telemetry will not be used\n    pub otlp_service_name: Option<String>,\n\n    #[serde(default = \"default_network_bootstrap_timeout\")]\n    pub network_bootstrap_timeout: u64,\n}\n\nconst fn default_network_bootstrap_timeout() -> u64 {\n    90\n}\n\nfn default_db_path() -> PathBuf {\n    PathBuf::from(\"./tce_rocksdb\")\n}\nconst fn default_minimum_tce_cluster_size() -> usize {\n    NetworkConfig::MINIMUM_CLUSTER_SIZE\n}\n\nconst fn default_grpc_api_addr() -> SocketAddr {\n    SocketAddr::V4(std::net::SocketAddrV4::new(DEFAULT_IP, 1340))\n}\n\nconst fn default_graphql_api_addr() -> SocketAddr {\n    SocketAddr::V4(std::net::SocketAddrV4::new(DEFAULT_IP, 4030))\n}\n\nconst fn default_metrics_api_addr() -> SocketAddr {\n    SocketAddr::V4(std::net::SocketAddrV4::new(DEFAULT_IP, 3000))\n}\n\nimpl TceConfig {\n    pub fn parse_boot_peers(&self) -> Vec<(PeerId, Multiaddr)> {\n        self.extra_boot_peers\n            .clone()\n            .unwrap_or_default()\n            .split(&[',', ' '])\n            .map(|s| s.to_string())\n            .collect::<Vec<String>>()\n            .chunks(2)\n            .filter_map(|pair| {\n                if pair.len() > 1 {\n                    Some((\n                        pair[0].as_str().parse().unwrap(),\n                        pair[1].as_str().parse().unwrap(),\n                    ))\n                } else {\n                    None\n                }\n            })\n            .collect()\n    }\n}\n\nimpl Config for TceConfig {\n    type Output = TceConfig;\n\n    fn load_from_file(figment: Figment, home: &Path) -> Figment {\n        let home = home.join(\"config.toml\");\n\n        let tce = Figment::new()\n            .merge(Toml::file(home).nested())\n            .select(\"tce\");\n\n        figment.merge(tce)\n    }\n\n    fn load_context(figment: Figment) -> Result<Self::Output, figment::Error> {\n        figment.extract()\n    }\n\n    fn profile() -> String {\n        \"tce\".to_string()\n    }\n}\n"
  },
  {
    "path": "crates/topos-core/.rustfmt.toml",
    "content": "unstable_features = true\nignore = [\n    \"src/generated\",\n]\n"
  },
  {
    "path": "crates/topos-core/Cargo.toml",
    "content": "[package]\nname = \"topos-core\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n[lints]\nworkspace = true\n\n[dependencies]\ntopos-crypto = { path = \"../topos-crypto\" }\nbincode.workspace = true\nthiserror.workspace = true\nhex.workspace = true\nethereum-types.workspace = true\n\ntonic = { workspace = true, default-features = false, features = [\n    \"prost\",\n    \"codegen\",\n    \"transport\",\n] }\n\nasync-graphql.workspace = true\nasync-trait.workspace = true\nbase64ct.workspace = true\nprost.workspace = true\nserde = { workspace = true, features = [\"derive\"] }\ntracing.workspace = true\nuuid.workspace = true\n\n[build-dependencies]\ntonic-build = { version = \"0.11\", default-features = false, features = [\n    \"prost\", \"transport\"\n] }\n\n[dev-dependencies]\nasync-stream.workspace = true\nenv_logger.workspace = true\nfutures.workspace = true\nrstest.workspace = true\ntest-log.workspace = true\ntokio-stream.workspace = true\ntokio.workspace = true\ntracing-subscriber = { workspace = true, features = [\"env-filter\", \"fmt\"] }\ntracing.workspace = true\n\ntopos-test-sdk = { path = \"../topos-test-sdk/\" }\n[features]\ndefault = []\n\nuci = []\napi = []\n\n[package.metadata.docs.rs]\nall-features = true\n# enable unstable features in the documentation\nrustc-args = [\"--cfg\", \"docsrs\"]\n"
  },
  {
    "path": "crates/topos-core/build.rs",
    "content": "use std::path::PathBuf;\n\nfn main() -> Result<(), Box<dyn std::error::Error>> {\n    let descriptor_path = PathBuf::from(\"src/api/grpc/generated\").join(\"topos.bin\");\n\n    tonic_build::configure()\n        .file_descriptor_set_path(descriptor_path)\n        .type_attribute(\n            \".topos.shared.v1.UUID\",\n            \"#[derive(Copy, serde::Deserialize, serde::Serialize)]\",\n        )\n        .type_attribute(\n            \".topos.shared.v1.SubnetId\",\n            \"#[derive(Eq, Hash, serde::Deserialize, serde::Serialize)]\",\n        )\n        .type_attribute(\n            \".topos.shared.v1.CertificateId\",\n            \"#[derive(Eq, Hash, serde::Deserialize, serde::Serialize)]\",\n        )\n        .type_attribute(\n            \".topos.shared.v1.Frost\",\n            \"#[derive(Eq, Hash, serde::Deserialize, serde::Serialize)]\",\n        )\n        .type_attribute(\n            \".topos.shared.v1.StarkProof\",\n            \"#[derive(Eq, Hash, serde::Deserialize, serde::Serialize)]\",\n        )\n        .type_attribute(\n            \".topos.tce.v1.SignedReady\",\n            \"#[derive(serde::Deserialize, serde::Serialize)]\",\n        )\n        .type_attribute(\n            \".topos.shared.v1.Positions.SourceStreamPosition\",\n            \"#[derive(serde::Deserialize, serde::Serialize)]\",\n        )\n        .type_attribute(\n            \".topos.tce.v1.ProofOfDelivery\",\n            \"#[derive(serde::Deserialize, serde::Serialize)]\",\n        )\n        .type_attribute(\n            \".topos.tce.v1.CheckpointResponse\",\n            \"#[derive(serde::Deserialize, serde::Serialize)]\",\n        )\n        .type_attribute(\n            \".topos.tce.v1.CheckpointRequest\",\n            \"#[derive(serde::Deserialize, serde::Serialize)]\",\n        )\n        .type_attribute(\n            \".topos.tce.v1.CheckpointMapFieldEntry\",\n            \"#[derive(serde::Deserialize, serde::Serialize)]\",\n        )\n        .type_attribute(\n            \".topos.shared.v1.EcdsaSignature\",\n            \"#[derive(Eq, Hash, serde::Deserialize, serde::Serialize)]\",\n        )\n        .type_attribute(\n            \".topos.shared.v1.ValidatorId\",\n            \"#[derive(Eq, Hash, serde::Deserialize, serde::Serialize)]\",\n        )\n        .type_attribute(\n            \".topos.tce.v1.Gossip\",\n            \"#[derive(serde::Deserialize, serde::Serialize)]\",\n        )\n        .type_attribute(\n            \".topos.tce.v1.Echo\",\n            \"#[derive(serde::Deserialize, serde::Serialize)]\",\n        )\n        .type_attribute(\n            \".topos.tce.v1.Ready\",\n            \"#[derive(serde::Deserialize, serde::Serialize)]\",\n        )\n        .type_attribute(\n            \".topos.tce.v1.DoubleEchoRequest\",\n            \"#[derive(serde::Deserialize, serde::Serialize)]\",\n        )\n        .type_attribute(\n            \".topos.tce.v1.Batch\",\n            \"#[derive(serde::Deserialize, serde::Serialize)]\",\n        )\n        .type_attribute(\n            \".topos.uci.v1.Certificate\",\n            \"#[derive(Eq, Hash, serde::Deserialize, serde::Serialize)]\",\n        )\n        .out_dir(\"src/api/grpc/generated\")\n        .compile(\n            &[\n                \"proto/topos/shared/v1/uuid.proto\",\n                \"proto/topos/shared/v1/subnet.proto\",\n                \"proto/topos/shared/v1/validator_id.proto\",\n                \"proto/topos/tce/v1/api.proto\",\n                \"proto/topos/tce/v1/console.proto\",\n                \"proto/topos/tce/v1/synchronization.proto\",\n                \"proto/topos/tce/v1/double_echo.proto\",\n                \"proto/topos/tce/v1/gossipsub.proto\",\n                \"proto/topos/uci/v1/certification.proto\",\n                \"proto/topos/p2p/info.proto\",\n            ],\n            &[\"proto\"],\n        )?;\n    Ok(())\n}\n"
  },
  {
    "path": "crates/topos-core/proto/buf.yaml",
    "content": "version: v1\nbreaking:\n  use:\n    - FILE\nlint:\n  use:\n    - DEFAULT\n"
  },
  {
    "path": "crates/topos-core/proto/topos/p2p/info.proto",
    "content": "syntax = \"proto3\";\n\npackage topos.p2p;\n\nservice InfoService {\n}\n"
  },
  {
    "path": "crates/topos-core/proto/topos/shared/v1/certificate.proto",
    "content": "syntax = \"proto3\";\n\npackage topos.shared.v1;\n\nmessage CertificateId {\n  bytes value = 1;\n}\n"
  },
  {
    "path": "crates/topos-core/proto/topos/shared/v1/checkpoints.proto",
    "content": "syntax = \"proto3\";\n\npackage topos.shared.v1;\n\nimport \"topos/shared/v1/certificate.proto\";\nimport \"topos/shared/v1/subnet.proto\";\n\n// Checkpoints are used to walk through streams\nmessage Checkpoints {\n  // SourceCheckpoint represents a snapshot of multiple stream's positions regarding\n  // one or multiple source subnets.\n  message SourceCheckpoint {\n    repeated SubnetId source_subnet_ids = 1;\n    repeated Positions.SourceStreamPosition positions = 2;\n  }\n\n  // TargetCheckpoint represents a snapshot of multiple stream's positions regarding\n  // one or multiple target subnets.\n  message TargetCheckpoint {\n    repeated SubnetId target_subnet_ids = 1;\n    repeated Positions.TargetStreamPosition positions = 2;\n  }\n}\n\nmessage Positions {\n  // SourceStreamPosition represents a single point in a source stream.\n  // It is defined by a source_subnet_id and a position, resolving to a certificate_id\n  message SourceStreamPosition {\n    // The source_subnet_id is a mandatory field for the SourceStreamPosition\n    SubnetId source_subnet_id = 1;\n    uint64 position = 2;\n    CertificateId certificate_id = 3;\n  }\n\n  // TargetStreamPosition represents a single point in a target stream regarding a source subnet.\n  // It is defined by a target_subnet_id, source_subnet_id and a position, resolving to a certificate_id\n  message TargetStreamPosition {\n    // The source_subnet_id is a mandatory field for the TargetStreamPosition\n    SubnetId source_subnet_id = 1;\n    // The target_subnet_id is a mandatory field for the TargetStreamPosition\n    SubnetId target_subnet_id = 2;\n    uint64 position = 3;\n    CertificateId certificate_id = 4;\n  }\n}\n"
  },
  {
    "path": "crates/topos-core/proto/topos/shared/v1/frost.proto",
    "content": "syntax = \"proto3\";\n\npackage topos.shared.v1;\n\nmessage Frost {\n  bytes value = 1;\n}\n"
  },
  {
    "path": "crates/topos-core/proto/topos/shared/v1/signature.proto",
    "content": "syntax = \"proto3\";\n\npackage topos.shared.v1;\n\n// A signature using the ECDSA algorithm.\n// Used to sign double echo protocol messages.\nmessage EcdsaSignature {\n  bytes r = 1;\n  bytes s = 2;\n  uint64 v = 3;\n}\n"
  },
  {
    "path": "crates/topos-core/proto/topos/shared/v1/stark_proof.proto",
    "content": "syntax = \"proto3\";\n\npackage topos.shared.v1;\n\nmessage StarkProof {\n  bytes value = 1;\n}\n"
  },
  {
    "path": "crates/topos-core/proto/topos/shared/v1/subnet.proto",
    "content": "syntax = \"proto3\";\n\npackage topos.shared.v1;\n\nmessage SubnetId {\n  bytes value = 1;\n}\n"
  },
  {
    "path": "crates/topos-core/proto/topos/shared/v1/uuid.proto",
    "content": "syntax = \"proto3\";\n\npackage topos.shared.v1;\n\nmessage UUID {\n  uint64 most_significant_bits = 1;\n  uint64 least_significant_bits = 2;\n}\n"
  },
  {
    "path": "crates/topos-core/proto/topos/shared/v1/validator_id.proto",
    "content": "syntax = \"proto3\";\n\npackage topos.shared.v1;\n\n// Id of the validator in the Topos protocol network\n// This is the same as the validator's H160 address in the Ethereum compatible network\nmessage ValidatorId {\n  // The validator's H160 address\n  bytes value = 1;\n}\n"
  },
  {
    "path": "crates/topos-core/proto/topos/tce/v1/api.proto",
    "content": "syntax = \"proto3\";\n\npackage topos.tce.v1;\n\nimport \"topos/shared/v1/checkpoints.proto\";\nimport \"topos/shared/v1/subnet.proto\";\nimport \"topos/shared/v1/uuid.proto\";\nimport \"topos/tce/v1/synchronization.proto\";\nimport \"topos/uci/v1/certification.proto\";\nimport \"topos/shared/v1/certificate.proto\";\n\n\nservice APIService {\n  rpc SubmitCertificate(SubmitCertificateRequest) returns (SubmitCertificateResponse);\n\n  rpc GetSourceHead(GetSourceHeadRequest) returns (GetSourceHeadResponse);\n\n  /// This RPC allows a client to get latest pending certificates for\n  /// requested subnets (by their subnet id)\n  ///\n  /// Returns a map of subnet_id -> last pending certificate\n  /// If there are no pending certificate for a subnet, returns None for that subnet id\n  rpc GetLastPendingCertificates(GetLastPendingCertificatesRequest) returns (GetLastPendingCertificatesResponse);\n\n  // This RPC allows a client to open a bidirectional stream with a TCE\n  rpc WatchCertificates(stream WatchCertificatesRequest) returns (stream WatchCertificatesResponse);\n}\n\nmessage SubmitCertificateRequest {\n  topos.uci.v1.Certificate certificate = 1;\n}\n\nmessage SubmitCertificateResponse {}\n\nmessage GetSourceHeadRequest {\n  topos.shared.v1.SubnetId subnet_id = 1;\n}\n\nmessage GetSourceHeadResponse {\n  topos.shared.v1.Positions.SourceStreamPosition position = 1;\n  topos.uci.v1.Certificate certificate = 2;\n}\n\nmessage GetLastPendingCertificatesRequest {\n  repeated topos.shared.v1.SubnetId subnet_ids = 1;\n}\n\nmessage LastPendingCertificate {\n  topos.uci.v1.Certificate value = 1;\n  // Pending certificate index (effectively total number of pending certificates)\n  uint64 index = 2;\n}\n\nmessage GetLastPendingCertificatesResponse {\n  // Bytes and array types (SubnetId) could not be key in the map type according to specifications,\n  // so we use SubnetId hex encoded string with 0x prefix as key\n  map<string, LastPendingCertificate> last_pending_certificate = 1;\n}\n\nmessage WatchCertificatesRequest {\n  // Provide a request_id to track response\n  topos.shared.v1.UUID request_id = 1;\n\n  // Define which command needs to be performed\n  oneof command {\n    OpenStream open_stream = 2;\n  }\n\n  // Sent to start receiving events and being able to send further command\n  message OpenStream {\n    topos.shared.v1.Checkpoints.TargetCheckpoint target_checkpoint = 1;\n    topos.shared.v1.Checkpoints.SourceCheckpoint source_checkpoint = 2;\n  }\n}\n\nmessage WatchCertificatesResponse {\n  // If the response is directly linked to a request this ID allow one to track it\n  topos.shared.v1.UUID request_id = 1;\n\n  oneof event {\n    StreamOpened stream_opened = 2;\n    CertificatePushed certificate_pushed = 3;\n  }\n\n  // Sent by the TCE when the stream is ready to be used and\n  // that certificates will start being pushed\n  message StreamOpened {\n    repeated topos.shared.v1.SubnetId subnet_ids = 1;\n  }\n\n  // Target Certificate pushed from the TCE to the sequencer\n  message CertificatePushed {\n    topos.uci.v1.Certificate certificate = 1;\n    repeated topos.shared.v1.Positions.TargetStreamPosition positions = 2;\n  }\n}\n"
  },
  {
    "path": "crates/topos-core/proto/topos/tce/v1/console.proto",
    "content": "syntax = \"proto3\";\n\npackage topos.tce.v1;\n\nimport \"topos/shared/v1/uuid.proto\";\n\nservice ConsoleService {\n  rpc Status(StatusRequest) returns (StatusResponse);\n}\n\nmessage StatusRequest {}\nmessage StatusResponse {\n  bool has_active_sample = 1;\n}\n"
  },
  {
    "path": "crates/topos-core/proto/topos/tce/v1/double_echo.proto",
    "content": "syntax = \"proto3\";\n\npackage topos.tce.v1;\n\nimport \"topos/shared/v1/certificate.proto\";\nimport \"topos/shared/v1/signature.proto\";\nimport \"topos/shared/v1/validator_id.proto\";\nimport \"topos/uci/v1/certification.proto\";\n\nmessage Gossip {\n  topos.uci.v1.Certificate certificate = 1;\n}\n\nmessage Echo {\n  topos.shared.v1.CertificateId certificate_id = 1;\n  topos.shared.v1.EcdsaSignature signature = 2;\n  topos.shared.v1.ValidatorId validator_id = 3;\n}\n\nmessage Ready {\n  topos.shared.v1.CertificateId certificate_id = 1;\n  topos.shared.v1.EcdsaSignature signature = 2;\n  topos.shared.v1.ValidatorId validator_id = 3;\n}\n\nmessage DoubleEchoRequest {\n  oneof request {\n    Gossip gossip = 1;\n    Echo echo = 2;\n    Ready ready = 3;\n  }\n}\n"
  },
  {
    "path": "crates/topos-core/proto/topos/tce/v1/gossipsub.proto",
    "content": "syntax = \"proto3\";\n\npackage topos.tce.v1;\n\nimport \"topos/tce/v1/double_echo.proto\";\n\nmessage Batch {\n  repeated bytes messages = 1;\n}\n"
  },
  {
    "path": "crates/topos-core/proto/topos/tce/v1/synchronization.proto",
    "content": "syntax = \"proto3\";\n\npackage topos.tce.v1;\n\nimport \"topos/shared/v1/checkpoints.proto\";\nimport \"topos/shared/v1/certificate.proto\";\nimport \"topos/shared/v1/subnet.proto\";\nimport \"topos/shared/v1/uuid.proto\";\nimport \"topos/uci/v1/certification.proto\";\n\nservice SynchronizerService {\n  rpc fetch_checkpoint(CheckpointRequest) returns (CheckpointResponse);\n  rpc fetch_certificates(FetchCertificatesRequest) returns (FetchCertificatesResponse);\n}\n\nmessage CheckpointRequest {\n  // Provide a request_id to track response\n  topos.shared.v1.UUID request_id = 1;\n\n  repeated ProofOfDelivery checkpoint = 2;\n\n  uint64 limit_per_subnet = 3;\n}\n\nmessage CheckpointResponse {\n  // If the response is directly linked to a request this ID allow one to track it\n  topos.shared.v1.UUID request_id = 1;\n\n  repeated CheckpointMapFieldEntry checkpoint_diff = 2;\n}\n\nmessage CheckpointMapFieldEntry {\n    string key = 1;\n    repeated ProofOfDelivery value = 2;\n}\n\nmessage FetchCertificatesRequest {\n  // Provide a request_id to track response\n  topos.shared.v1.UUID request_id = 1;\n\n  repeated topos.shared.v1.CertificateId certificates = 2;\n}\n\nmessage FetchCertificatesResponse {\n  // Provide a request_id to track response\n  topos.shared.v1.UUID request_id = 1;\n  repeated topos.uci.v1.Certificate certificates =2;\n}\n\nmessage ProofOfDelivery {\n  topos.shared.v1.Positions.SourceStreamPosition delivery_position = 1;\n  repeated SignedReady readies = 2;\n  uint64 threshold = 3;\n}\n\nmessage SignedReady {\n    string ready = 1;\n    string signature = 2;\n}\n\n"
  },
  {
    "path": "crates/topos-core/proto/topos/uci/v1/certification.proto",
    "content": "syntax = \"proto3\";\n\npackage topos.uci.v1;\n\nimport \"topos/shared/v1/certificate.proto\";\nimport \"topos/shared/v1/frost.proto\";\nimport \"topos/shared/v1/stark_proof.proto\";\nimport \"topos/shared/v1/subnet.proto\";\n\n// Certificate - main exchange item\nmessage Certificate {\n  topos.shared.v1.CertificateId prev_id = 1;\n  topos.shared.v1.SubnetId source_subnet_id = 2;\n  bytes state_root = 3;\n  bytes tx_root_hash = 4;\n  bytes receipts_root_hash = 5;\n  repeated topos.shared.v1.SubnetId target_subnets = 6;\n  uint32 verifier = 7;\n  topos.shared.v1.CertificateId id = 8;\n  topos.shared.v1.StarkProof proof = 9;\n  topos.shared.v1.Frost signature = 10;\n}\n\n\nmessage OptionalCertificate {\n  Certificate value = 1;\n}\n"
  },
  {
    "path": "crates/topos-core/src/api/graphql/certificate.rs",
    "content": "use async_graphql::{NewType, SimpleObject};\nuse serde::{Deserialize, Serialize};\n\nuse crate::{types::CertificateDelivered, uci};\n\nuse super::{checkpoint::SourceStreamPosition, subnet::SubnetId};\n\n#[derive(Serialize, Deserialize, Debug, NewType)]\npub struct CertificateId(String);\n\nimpl From<uci::CertificateId> for CertificateId {\n    fn from(value: uci::CertificateId) -> Self {\n        Self(value.to_string())\n    }\n}\n\n#[derive(Serialize, Deserialize, Debug, SimpleObject)]\n#[serde(rename_all = \"camelCase\")]\npub struct CertificatePositions {\n    source: SourceStreamPosition,\n}\n\n/// A certificate that has been delivered\n#[derive(Debug, Serialize, Deserialize, SimpleObject)]\n#[serde(rename_all = \"camelCase\")]\npub struct Certificate {\n    pub id: CertificateId,\n    pub prev_id: CertificateId,\n    pub proof: String,\n    pub signature: String,\n    pub source_subnet_id: SubnetId,\n    pub state_root: String,\n    pub target_subnets: Vec<SubnetId>,\n    pub tx_root_hash: String,\n    pub receipts_root_hash: String,\n    pub verifier: u32,\n    pub positions: CertificatePositions,\n}\n\n/// A certificate that has not been delivered yet\n#[derive(Debug, Serialize, Deserialize, SimpleObject)]\n#[serde(rename_all = \"camelCase\")]\npub struct UndeliveredCertificate {\n    pub id: CertificateId,\n    pub prev_id: CertificateId,\n    pub proof: String,\n    pub signature: String,\n    pub source_subnet_id: SubnetId,\n    pub state_root: String,\n    pub target_subnets: Vec<SubnetId>,\n    pub tx_root_hash: String,\n    pub receipts_root_hash: String,\n    pub verifier: u32,\n}\n\nimpl From<&uci::Certificate> for UndeliveredCertificate {\n    fn from(value: &crate::uci::Certificate) -> Self {\n        Self {\n            id: CertificateId(value.id.to_string()),\n            prev_id: CertificateId(value.prev_id.to_string()),\n            proof: hex::encode(&value.proof),\n            signature: hex::encode(&value.signature),\n            source_subnet_id: (&value.source_subnet_id).into(),\n            state_root: hex::encode(value.state_root),\n            target_subnets: value.target_subnets.iter().map(Into::into).collect(),\n            tx_root_hash: hex::encode(value.tx_root_hash),\n            receipts_root_hash: format!(\"0x{}\", hex::encode(value.receipts_root_hash)),\n            verifier: value.verifier,\n        }\n    }\n}\n\n#[derive(Debug, Serialize, Deserialize, SimpleObject)]\npub struct Ready {\n    message: String,\n    signature: String,\n}\n\nimpl From<&CertificateDelivered> for Certificate {\n    fn from(value: &CertificateDelivered) -> Self {\n        let uci_cert = &value.certificate;\n\n        Self {\n            id: CertificateId(uci_cert.id.to_string()),\n            prev_id: CertificateId(uci_cert.prev_id.to_string()),\n            proof: hex::encode(&uci_cert.proof),\n            signature: hex::encode(&uci_cert.signature),\n            source_subnet_id: (&uci_cert.source_subnet_id).into(),\n            state_root: hex::encode(uci_cert.state_root),\n            target_subnets: uci_cert.target_subnets.iter().map(Into::into).collect(),\n            tx_root_hash: hex::encode(uci_cert.tx_root_hash),\n            receipts_root_hash: format!(\"0x{}\", hex::encode(uci_cert.receipts_root_hash)),\n            verifier: uci_cert.verifier,\n            positions: CertificatePositions {\n                source: (&value.proof_of_delivery).into(),\n            },\n        }\n    }\n}\n\nimpl TryFrom<CertificateId> for crate::uci::CertificateId {\n    type Error = uci::Error;\n\n    fn try_from(value: CertificateId) -> Result<Self, Self::Error> {\n        crate::uci::CertificateId::try_from(value.0.as_bytes())\n    }\n}\n"
  },
  {
    "path": "crates/topos-core/src/api/graphql/checkpoint.rs",
    "content": "use async_graphql::{InputObject, SimpleObject};\nuse serde::{Deserialize, Serialize};\n\nuse crate::types::ProofOfDelivery;\n\nuse super::{certificate::CertificateId, subnet::SubnetId};\n\n#[derive(InputObject)]\npub struct SourceStreamPositionInput {\n    pub source_subnet_id: SubnetId,\n    pub position: u64,\n    pub certificate_id: Option<CertificateId>,\n}\n\n#[derive(Debug, Deserialize, Serialize, SimpleObject)]\n#[serde(rename_all = \"camelCase\")]\npub struct SourceStreamPosition {\n    pub source_subnet_id: SubnetId,\n    pub position: u64,\n    pub certificate_id: CertificateId,\n}\n\nimpl From<&ProofOfDelivery> for SourceStreamPosition {\n    fn from(value: &ProofOfDelivery) -> Self {\n        Self {\n            certificate_id: value.certificate_id.into(),\n            source_subnet_id: (&value.delivery_position.subnet_id).into(),\n            position: *value.delivery_position.position,\n        }\n    }\n}\n\n#[derive(InputObject)]\npub struct SourceCheckpointInput {\n    pub source_subnet_ids: Vec<SubnetId>,\n    pub positions: Vec<SourceStreamPositionInput>,\n}\n"
  },
  {
    "path": "crates/topos-core/src/api/graphql/errors.rs",
    "content": "#[derive(Debug, thiserror::Error)]\npub enum GraphQLServerError {\n    #[error(\"The provided data layer is invalid\")]\n    ParseDataConnector,\n\n    #[error(\"The provided subnet_id is not a proper HEX value\")]\n    ParseSubnetId,\n\n    #[error(\"The provided certificate_id is not a proper HEX value\")]\n    ParseCertificateId,\n\n    #[error(\"Internal Server Error\")]\n    StorageError,\n\n    #[error(\"Certificate not found\")]\n    CertificateNotFound,\n\n    #[error(\"Unable to create transient stream: {0}\")]\n    TransientStream(String),\n\n    #[error(\"Internal API error: {0}\")]\n    InternalError(&'static str),\n}\n"
  },
  {
    "path": "crates/topos-core/src/api/graphql/filter.rs",
    "content": "use crate::api::graphql::subnet::SubnetId;\n\n#[derive(Debug, serde::Serialize, serde::Deserialize, async_graphql::OneofObject)]\npub enum SubnetFilter {\n    Source(SubnetId),\n    Target(SubnetId),\n}\n"
  },
  {
    "path": "crates/topos-core/src/api/graphql/mod.rs",
    "content": "pub mod certificate;\npub mod checkpoint;\npub mod errors;\npub mod filter;\npub mod query;\npub mod subnet;\n"
  },
  {
    "path": "crates/topos-core/src/api/graphql/query.rs",
    "content": "use crate::api::graphql::certificate::{Certificate, CertificateId};\nuse crate::api::graphql::checkpoint::SourceCheckpointInput;\nuse crate::api::graphql::errors::GraphQLServerError;\n\nuse async_graphql::Context;\nuse async_trait::async_trait;\n\n#[async_trait]\npub trait CertificateQuery {\n    async fn certificates_per_subnet(\n        ctx: &Context<'_>,\n        from_source_checkpoint: SourceCheckpointInput,\n        first: usize,\n    ) -> Result<Vec<Certificate>, GraphQLServerError>;\n\n    async fn certificate_by_id(\n        ctx: &Context<'_>,\n        certificate_id: CertificateId,\n    ) -> Result<Certificate, GraphQLServerError>;\n}\n"
  },
  {
    "path": "crates/topos-core/src/api/graphql/subnet.rs",
    "content": "use async_graphql::NewType;\nuse serde::{Deserialize, Serialize};\nuse std::str::FromStr;\nuse tracing::error;\n\nuse super::errors::GraphQLServerError;\n\n#[derive(Clone, Debug, Serialize, Deserialize, NewType, PartialEq, Eq)]\npub struct SubnetId(pub(crate) String);\n\nimpl TryFrom<&SubnetId> for crate::uci::SubnetId {\n    type Error = GraphQLServerError;\n\n    fn try_from(value: &SubnetId) -> Result<Self, Self::Error> {\n        Self::from_str(value.0.as_str()).map_err(|e| {\n            error!(\"Failed to convert SubnetId from GraphQL input {e:?}\");\n            GraphQLServerError::ParseDataConnector\n        })\n    }\n}\n\nimpl From<&crate::uci::SubnetId> for SubnetId {\n    fn from(uci_id: &crate::uci::SubnetId) -> Self {\n        Self(uci_id.to_string())\n    }\n}\n\nimpl PartialEq<crate::uci::SubnetId> for SubnetId {\n    fn eq(&self, other: &crate::uci::SubnetId) -> bool {\n        if let Ok(current) = crate::uci::SubnetId::from_str(&self.0) {\n            other.as_array().eq(current.as_array())\n        } else {\n            error!(\"Failed to parse the subnet id {} during comparison\", self.0);\n            false\n        }\n    }\n}\n"
  },
  {
    "path": "crates/topos-core/src/api/grpc/checkpoints/errors.rs",
    "content": "use crate::api::grpc::shared::v1_conversions_subnet::Error;\n\n#[derive(Debug, thiserror::Error)]\npub enum TargetCheckpointError {\n    #[error(\"Subnet format is invalid\")]\n    InvalidSubnetFormat,\n    #[error(\"Invalid target stream position\")]\n    InvalidTargetStreamPosition,\n    #[error(\"Checkpoint parse error\")]\n    ParseError,\n}\n\n#[derive(Debug, thiserror::Error)]\npub enum StreamPositionError {\n    #[error(\"The target_subnet_id field is missing\")]\n    MissingTargetSubnetId,\n    #[error(\"The source_subnet_id field is missing\")]\n    MissingSourceSubnetId,\n    #[error(\"Unable to parse SubnetId: {0}\")]\n    InvalidSubnetFormat(#[from] Error),\n    #[error(\"Unable to parse CertificateId\")]\n    InvalidCertificateIdFormat,\n}\n"
  },
  {
    "path": "crates/topos-core/src/api/grpc/checkpoints/mod.rs",
    "content": "use crate::api::grpc::shared::v1 as shared_v1;\nuse crate::uci::SubnetId;\n\nmod errors;\nmod positions;\n\npub use errors::*;\npub use positions::*;\n\n#[derive(Debug, Default, PartialEq, Eq)]\npub struct TargetCheckpoint {\n    pub target_subnet_ids: Vec<SubnetId>,\n    pub positions: Vec<TargetStreamPosition>,\n}\n\nimpl TryFrom<shared_v1::checkpoints::TargetCheckpoint> for TargetCheckpoint {\n    type Error = TargetCheckpointError;\n\n    fn try_from(value: shared_v1::checkpoints::TargetCheckpoint) -> Result<Self, Self::Error> {\n        Ok(TargetCheckpoint {\n            target_subnet_ids: value\n                .target_subnet_ids\n                .into_iter()\n                .map(TryInto::try_into)\n                .collect::<Result<Vec<SubnetId>, _>>()\n                .map_err(|_| TargetCheckpointError::InvalidSubnetFormat)?,\n            positions: value\n                .positions\n                .into_iter()\n                .map(TryInto::try_into)\n                .collect::<Result<Vec<TargetStreamPosition>, _>>()\n                .map_err(|_| TargetCheckpointError::InvalidTargetStreamPosition)?,\n        })\n    }\n}\n\nimpl From<TargetCheckpoint> for shared_v1::checkpoints::TargetCheckpoint {\n    fn from(value: TargetCheckpoint) -> Self {\n        Self {\n            target_subnet_ids: value\n                .target_subnet_ids\n                .into_iter()\n                .map(Into::into)\n                .collect(),\n            positions: value.positions.into_iter().map(Into::into).collect(),\n        }\n    }\n}\n"
  },
  {
    "path": "crates/topos-core/src/api/grpc/checkpoints/positions.rs",
    "content": "use crate::api::grpc::checkpoints::StreamPositionError;\nuse crate::api::grpc::shared::v1 as shared_v1;\nuse crate::uci::{CertificateId, SubnetId};\n\n#[derive(Debug, Clone, PartialEq, Eq, Hash)]\npub struct TargetStreamPosition {\n    pub target_subnet_id: SubnetId,\n    pub source_subnet_id: SubnetId,\n    pub position: u64,\n    pub certificate_id: Option<CertificateId>,\n}\n\nimpl TryFrom<shared_v1::positions::TargetStreamPosition> for TargetStreamPosition {\n    type Error = StreamPositionError;\n\n    fn try_from(value: shared_v1::positions::TargetStreamPosition) -> Result<Self, Self::Error> {\n        Ok(Self {\n            target_subnet_id: value\n                .target_subnet_id\n                .map(TryInto::try_into)\n                .ok_or(StreamPositionError::MissingTargetSubnetId)??,\n            source_subnet_id: value\n                .source_subnet_id\n                .map(TryInto::try_into)\n                .ok_or(StreamPositionError::MissingSourceSubnetId)??,\n            position: value.position,\n            certificate_id: value\n                .certificate_id\n                .map(TryInto::try_into)\n                .map_or(Ok(None), |v| {\n                    v.map(Some)\n                        .map_err(|_| StreamPositionError::InvalidCertificateIdFormat)\n                })?,\n        })\n    }\n}\n\nimpl From<TargetStreamPosition> for shared_v1::positions::TargetStreamPosition {\n    fn from(value: TargetStreamPosition) -> Self {\n        Self {\n            source_subnet_id: Some(value.source_subnet_id.into()),\n            target_subnet_id: Some(value.target_subnet_id.into()),\n            position: value.position,\n            certificate_id: value.certificate_id.map(Into::into),\n        }\n    }\n}\n\n#[derive(Debug, Clone, PartialEq, Eq, Hash)]\npub struct SourceStreamPosition {\n    pub source_subnet_id: SubnetId,\n    pub position: u64,\n    pub certificate_id: Option<CertificateId>,\n}\n\nimpl TryFrom<shared_v1::positions::SourceStreamPosition> for SourceStreamPosition {\n    type Error = StreamPositionError;\n\n    fn try_from(value: shared_v1::positions::SourceStreamPosition) -> Result<Self, Self::Error> {\n        Ok(Self {\n            source_subnet_id: value\n                .source_subnet_id\n                .map(TryInto::try_into)\n                .ok_or(StreamPositionError::MissingSourceSubnetId)??,\n            position: value.position,\n            certificate_id: value\n                .certificate_id\n                .map(TryInto::try_into)\n                .map_or(Ok(None), |v| {\n                    v.map(Some)\n                        .map_err(|_| StreamPositionError::InvalidCertificateIdFormat)\n                })?,\n        })\n    }\n}\n\nimpl From<SourceStreamPosition> for shared_v1::positions::SourceStreamPosition {\n    fn from(value: SourceStreamPosition) -> Self {\n        Self {\n            source_subnet_id: Some(value.source_subnet_id.into()),\n            position: value.position,\n            certificate_id: value.certificate_id.map(Into::into),\n        }\n    }\n}\n"
  },
  {
    "path": "crates/topos-core/src/api/grpc/conversions/shared/v1/certificate.rs",
    "content": "use crate::uci::CERTIFICATE_ID_LENGTH;\n\nuse super::v1::CertificateId;\n\nimpl std::fmt::Display for CertificateId {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        write!(f, \"0x{}\", hex::encode(&self.value))\n    }\n}\n\n#[derive(Debug, thiserror::Error)]\npub enum Error {\n    #[error(\"Unable to parse certificateId ({0})\")]\n    ValidationError(CertificateId),\n}\n\nimpl From<[u8; CERTIFICATE_ID_LENGTH]> for CertificateId {\n    fn from(value: [u8; CERTIFICATE_ID_LENGTH]) -> Self {\n        CertificateId {\n            value: value.to_vec(),\n        }\n    }\n}\n\nimpl From<crate::uci::CertificateId> for CertificateId {\n    fn from(value: crate::uci::CertificateId) -> Self {\n        CertificateId {\n            value: value.as_array().to_vec(),\n        }\n    }\n}\n\nimpl TryFrom<CertificateId> for crate::uci::CertificateId {\n    type Error = Error;\n\n    fn try_from(value: CertificateId) -> Result<Self, Self::Error> {\n        if value.value.len() != CERTIFICATE_ID_LENGTH {\n            return Err(Error::ValidationError(value));\n        }\n        let mut id = [0; CERTIFICATE_ID_LENGTH];\n\n        id.copy_from_slice(value.value.as_slice());\n\n        Ok(id.into())\n    }\n}\n\nimpl PartialEq<CertificateId> for crate::uci::CertificateId {\n    fn eq(&self, other: &CertificateId) -> bool {\n        if other.value.len() != CERTIFICATE_ID_LENGTH {\n            return false;\n        }\n        self.as_array() == &other.value[..CERTIFICATE_ID_LENGTH]\n    }\n}\n"
  },
  {
    "path": "crates/topos-core/src/api/grpc/conversions/shared/v1/signature.rs",
    "content": "use super::v1::EcdsaSignature;\nuse topos_crypto::messages::U256;\n\nimpl From<EcdsaSignature> for topos_crypto::messages::Signature {\n    fn from(proto: EcdsaSignature) -> Self {\n        topos_crypto::messages::Signature {\n            r: U256::from_big_endian(&proto.r),\n            s: U256::from_big_endian(&proto.s),\n            v: proto.v,\n        }\n    }\n}\n\nimpl From<topos_crypto::messages::Signature> for EcdsaSignature {\n    fn from(other: topos_crypto::messages::Signature) -> Self {\n        let mut ecdsa_signature = EcdsaSignature {\n            r: vec![0; 32],\n            s: vec![0; 32],\n            v: 0,\n        };\n        other.r.to_big_endian(&mut ecdsa_signature.r);\n        other.s.to_big_endian(&mut ecdsa_signature.s);\n        ecdsa_signature.v = other.v;\n\n        ecdsa_signature\n    }\n}\n"
  },
  {
    "path": "crates/topos-core/src/api/grpc/conversions/shared/v1/subnet.rs",
    "content": "use crate::uci::SUBNET_ID_LENGTH;\n\nuse super::v1::SubnetId;\nuse base64ct::{Base64, Encoding};\n\nimpl std::fmt::Display for SubnetId {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        write!(f, \"{}\", Base64::encode_string(&self.value))\n    }\n}\n\n#[derive(Debug, thiserror::Error)]\npub enum Error {\n    #[error(\"Unable to parse subnetId ({0})\")]\n    ValidationError(SubnetId),\n\n    #[error(\"Unable to parse UCI field ({0}))\")]\n    UCI(#[from] crate::uci::Error),\n\n    #[error(\"Missing mandatory field: {0}\")]\n    MissingField(&'static str),\n\n    #[error(\"Invalid or missing state_root\")]\n    InvalidStateRoot,\n\n    #[error(\"Invalid or missing tx_root_hash\")]\n    InvalidTxRootHash,\n\n    #[error(\"Invalid or missing receipts_root_hash\")]\n    InvalidReceiptsRootHash,\n}\n\nimpl From<[u8; SUBNET_ID_LENGTH]> for SubnetId {\n    fn from(value: [u8; SUBNET_ID_LENGTH]) -> Self {\n        SubnetId {\n            value: value.to_vec(),\n        }\n    }\n}\n\nimpl TryFrom<SubnetId> for [u8; SUBNET_ID_LENGTH] {\n    type Error = Error;\n\n    fn try_from(value: SubnetId) -> Result<Self, Self::Error> {\n        if value.value.len() != SUBNET_ID_LENGTH {\n            return Err(Error::ValidationError(value));\n        }\n        let mut id = [0; SUBNET_ID_LENGTH];\n\n        id.copy_from_slice(value.value.as_slice());\n\n        Ok(id)\n    }\n}\n\nimpl From<crate::uci::SubnetId> for SubnetId {\n    fn from(value: crate::uci::SubnetId) -> Self {\n        SubnetId {\n            value: value.as_array().to_vec(),\n        }\n    }\n}\n\nimpl TryFrom<SubnetId> for crate::uci::SubnetId {\n    type Error = Error;\n\n    fn try_from(value: SubnetId) -> Result<Self, Self::Error> {\n        if value.value.len() != SUBNET_ID_LENGTH {\n            return Err(Error::ValidationError(value));\n        }\n        let mut id = [0; SUBNET_ID_LENGTH];\n\n        id.copy_from_slice(value.value.as_slice());\n\n        Ok(id.into())\n    }\n}\n"
  },
  {
    "path": "crates/topos-core/src/api/grpc/conversions/shared/v1/uuid.rs",
    "content": "use super::v1::Uuid;\n\nimpl From<(u64, u64)> for Uuid {\n    fn from((most_significant_bits, least_significant_bits): (u64, u64)) -> Self {\n        Self {\n            most_significant_bits,\n            least_significant_bits,\n        }\n    }\n}\n\nimpl From<Uuid> for uuid::Uuid {\n    fn from(proto: Uuid) -> Self {\n        Self::from_u64_pair(proto.most_significant_bits, proto.least_significant_bits)\n    }\n}\n\nimpl From<uuid::Uuid> for Uuid {\n    fn from(uuid: uuid::Uuid) -> Self {\n        uuid.as_u64_pair().into()\n    }\n}\n"
  },
  {
    "path": "crates/topos-core/src/api/grpc/conversions/shared/v1/validator_id.rs",
    "content": "use super::v1::ValidatorId;\nuse topos_crypto::messages::H160;\nuse topos_crypto::validator_id::{Error, VALIDATOR_ID_LENGTH};\n\nimpl std::fmt::Display for ValidatorId {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        write!(f, \"0x{}\", hex::encode(&self.value))\n    }\n}\n\nimpl From<topos_crypto::validator_id::ValidatorId> for ValidatorId {\n    fn from(other: topos_crypto::validator_id::ValidatorId) -> Self {\n        ValidatorId {\n            value: other.as_bytes().to_vec(),\n        }\n    }\n}\n\nimpl TryFrom<ValidatorId> for topos_crypto::validator_id::ValidatorId {\n    type Error = Error;\n\n    fn try_from(other: ValidatorId) -> Result<Self, Self::Error> {\n        if other.value.len() != VALIDATOR_ID_LENGTH {\n            return Err(Error::InvalidByteLength(hex::encode(other.value)));\n        }\n        let mut value = [0; VALIDATOR_ID_LENGTH];\n        value.copy_from_slice(other.value.as_slice());\n        Ok(H160::from_slice(&value).into())\n    }\n}\n\nimpl PartialEq<ValidatorId> for topos_crypto::validator_id::ValidatorId {\n    fn eq(&self, other: &ValidatorId) -> bool {\n        if other.value.len() != VALIDATOR_ID_LENGTH {\n            return false;\n        }\n        self.as_bytes() == &other.value[..VALIDATOR_ID_LENGTH]\n    }\n}\n"
  },
  {
    "path": "crates/topos-core/src/api/grpc/conversions/tce/v1/api.rs",
    "content": "use crate::api::grpc::tce::v1::{\n    watch_certificates_request::{Command, OpenStream},\n    watch_certificates_response::{CertificatePushed, Event, StreamOpened},\n    WatchCertificatesRequest, WatchCertificatesResponse,\n};\n\nmacro_rules! impl_command_conversion {\n    ($type: ident) => {\n        impl From<$type> for WatchCertificatesRequest {\n            fn from(command: $type) -> Self {\n                Self {\n                    request_id: Some(uuid::Uuid::new_v4().as_u64_pair().into()),\n                    command: Some(Command::$type(command)),\n                }\n            }\n        }\n    };\n}\n\nmacro_rules! impl_event_conversion {\n    ($type: ident) => {\n        impl From<$type> for WatchCertificatesResponse {\n            fn from(event: $type) -> Self {\n                Self {\n                    request_id: None,\n                    event: Some(Event::$type(event)),\n                }\n            }\n        }\n    };\n}\n\nimpl_command_conversion!(OpenStream);\n\nimpl_event_conversion!(StreamOpened);\nimpl_event_conversion!(CertificatePushed);\n"
  },
  {
    "path": "crates/topos-core/src/api/grpc/conversions/tce/v1/mod.rs",
    "content": "pub mod api;\npub mod synchronization;\n"
  },
  {
    "path": "crates/topos-core/src/api/grpc/conversions/tce/v1/synchronization.rs",
    "content": "use prost::{bytes::Bytes, Message};\n\nuse crate::api::grpc::tce::v1::{\n    CheckpointRequest, CheckpointResponse, FetchCertificatesRequest, FetchCertificatesResponse,\n};\n\nuse crate::api::grpc::ConversionError;\n\nmacro_rules! impl_to_vec_conversion {\n    ($($type: ident),*) => {\n        $(\n            impl From<$type> for Vec<u8> {\n                fn from(val: $type) -> Self {\n                    val.encode_to_vec()\n                }\n            }\n        )*\n    };\n}\n\nmacro_rules! impl_from_vec_conversion {\n    ($($type: ident),*) => {\n        $(\n            impl TryFrom<Vec<u8>> for $type {\n                type Error = ConversionError;\n                fn try_from(input: Vec<u8>) -> Result<Self, Self::Error> {\n                    let bytes = Bytes::from(input);\n\n                    Ok(Self::decode(bytes)?)\n                }\n            }\n        )*\n    };\n}\n\nimpl_to_vec_conversion!(\n    CheckpointRequest,\n    CheckpointResponse,\n    FetchCertificatesRequest,\n    FetchCertificatesResponse\n);\n\nimpl_from_vec_conversion!(\n    CheckpointResponse,\n    CheckpointRequest,\n    FetchCertificatesRequest,\n    FetchCertificatesResponse\n);\n"
  },
  {
    "path": "crates/topos-core/src/api/grpc/conversions/uci/v1/uci.rs",
    "content": "//!\n//! Protobuf generated/native Rust structures related conversions for GRPC API\n//!\nuse crate::api::grpc::shared::v1_conversions_subnet::Error;\nuse crate::api::grpc::uci::v1 as proto_v1;\n\nimpl TryFrom<proto_v1::Certificate> for crate::uci::Certificate {\n    type Error = Error;\n\n    fn try_from(certificate: proto_v1::Certificate) -> Result<Self, Self::Error> {\n        Ok(crate::uci::Certificate {\n            prev_id: certificate\n                .prev_id\n                .ok_or(Error::MissingField(\"certificate.prev_id\"))?\n                .value\n                .as_slice()\n                .try_into()?,\n            source_subnet_id: certificate\n                .source_subnet_id\n                .ok_or(Error::MissingField(\"certificate.source_subnet_id\"))?\n                .value\n                .as_slice()\n                .try_into()?,\n            state_root: certificate\n                .state_root\n                .try_into()\n                .map_err(|_| Error::InvalidStateRoot)?,\n            tx_root_hash: certificate\n                .tx_root_hash\n                .try_into()\n                .map_err(|_| Error::InvalidTxRootHash)?,\n            receipts_root_hash: certificate\n                .receipts_root_hash\n                .try_into()\n                .map_err(|_| Error::InvalidReceiptsRootHash)?,\n            target_subnets: certificate\n                .target_subnets\n                .into_iter()\n                .map(TryInto::try_into)\n                .collect::<Result<Vec<crate::uci::SubnetId>, _>>()?,\n            verifier: certificate.verifier,\n            id: certificate\n                .id\n                .ok_or(Error::MissingField(\"certificate.id\"))?\n                .value\n                .as_slice()\n                .try_into()?,\n            proof: certificate.proof.expect(\"valid proof\").value,\n            signature: certificate.signature.expect(\"valid frost signature\").value,\n        })\n    }\n}\n\nimpl From<crate::uci::Certificate> for proto_v1::Certificate {\n    fn from(certificate: crate::uci::Certificate) -> Self {\n        proto_v1::Certificate {\n            prev_id: Some(crate::api::grpc::shared::v1::CertificateId {\n                value: certificate.prev_id.into(),\n            }),\n            source_subnet_id: Some(crate::api::grpc::shared::v1::SubnetId {\n                value: certificate.source_subnet_id.into(),\n            }),\n            state_root: certificate.state_root.to_vec(),\n            tx_root_hash: certificate.tx_root_hash.to_vec(),\n            receipts_root_hash: certificate.receipts_root_hash.to_vec(),\n            verifier: certificate.verifier,\n            target_subnets: certificate\n                .target_subnets\n                .into_iter()\n                .map(|target_subnet| target_subnet.into())\n                .collect(),\n            id: Some(crate::api::grpc::shared::v1::CertificateId {\n                value: certificate.id.into(),\n            }),\n            proof: Some(crate::api::grpc::shared::v1::StarkProof {\n                value: certificate.proof,\n            }),\n            signature: Some(crate::api::grpc::shared::v1::Frost {\n                value: certificate.signature,\n            }),\n        }\n    }\n}\n\n#[test]\nfn test_proto_uci_certificate_conversion_id_random_0x() {\n    use crate::api::grpc::shared::v1::{CertificateId, Frost, StarkProof, SubnetId};\n    let valid_cert = proto_v1::Certificate {\n        prev_id: Some(CertificateId {\n            value: vec![\n                134, 103, 37, 44, 159, 78, 218, 73, 112, 17, 202, 189, 112, 180, 121, 0, 12, 128,\n                186, 116, 161, 18, 122, 129, 75, 151, 144, 95, 63, 203, 218, 69,\n            ],\n        }),\n        source_subnet_id: Some(SubnetId {\n            value: vec![\n                98, 139, 93, 91, 125, 115, 135, 224, 46, 222, 68, 33, 52, 2, 83, 179, 100, 2, 44,\n                97, 103, 55, 128, 90, 14, 40, 56, 72, 66, 59, 0, 181,\n            ],\n        }),\n        state_root: vec![\n            145, 239, 242, 24, 12, 214, 83, 202, 223, 162, 240, 11, 146, 240, 28, 179, 163, 174,\n            70, 6, 216, 40, 150, 1, 195, 33, 156, 132, 21, 43, 6, 236,\n        ],\n        tx_root_hash: vec![\n            86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224,\n            27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33,\n        ],\n        receipts_root_hash: vec![\n            86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224,\n            27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33,\n        ],\n        target_subnets: Vec::new(),\n        verifier: 0,\n        id: Some(CertificateId {\n            value: vec![\n                48, 120, 230, 118, 216, 103, 205, 65, 12, 143, 205, 166, 153, 107, 194, 94, 158,\n                29, 135, 167, 231, 50, 238, 173, 96, 165, 27, 215, 255, 94, 18, 199,\n            ],\n        }),\n        proof: Some(StarkProof { value: Vec::new() }),\n        signature: Some(Frost {\n            value: vec![\n                76, 181, 52, 25, 163, 103, 87, 142, 229, 64, 163, 77, 11, 225, 135, 96, 181, 34,\n                168, 13, 152, 69, 90, 202, 11, 235, 122, 214, 103, 26, 31, 109, 94, 117, 53, 83,\n                195, 74, 47, 175, 189, 3, 134, 164, 186, 179, 73, 86, 202, 172, 213, 195, 160, 139,\n                240, 230, 103, 81, 227, 99, 241, 130, 157, 188,\n            ],\n        }),\n    };\n    if let Err(e) = crate::uci::Certificate::try_from(valid_cert) {\n        panic!(\"Unable to perform certificate conversion: {e}\");\n    };\n}\n\n#[test]\nfn test_proto_uci_certificate_conversion_id_starts_with_0x() {\n    use crate::api::grpc::shared::v1::{CertificateId, Frost, StarkProof, SubnetId};\n    let mut prev_id = vec![b'0', b'x'];\n    prev_id.append(\n        &mut hex::decode(\"aac03cadfff6846c9ce72956eee2498011dd7b08689565d6f29e25c0a967ef14\")\n            .expect(\"Valid id\"),\n    );\n    let id = \"504b5d01948bc777ba1510ba92a901f516408e4b2a1a5b97fed719430acc9ec9\";\n    let valid_cert = proto_v1::Certificate {\n        prev_id: Some(CertificateId { value: prev_id }),\n        id: Some(CertificateId {\n            value: hex::decode(id).expect(\"Valid id\"),\n        }),\n        source_subnet_id: Some(SubnetId::from([0u8; 32])),\n        state_root: [0u8; 32].to_vec(),\n        tx_root_hash: [0u8; 32].to_vec(),\n        receipts_root_hash: [0u8; 32].to_vec(),\n        proof: Some(StarkProof { value: Vec::new() }),\n        signature: Some(Frost { value: Vec::new() }),\n        ..Default::default()\n    };\n    let cert: crate::uci::Certificate = match crate::uci::Certificate::try_from(valid_cert) {\n        Ok(cert) => cert,\n        Err(e) => {\n            panic!(\"Unable to perform certificate conversion: {e}\");\n        }\n    };\n    println!(\n        \"First certificate converted prev_id={}, id={}\",\n        cert.prev_id, cert.id\n    );\n\n    let prev_id = \"0xFF4b5d01948bc777ba1510ba92a901f516408e4b2a1a5b97fed719430acc9ec9\"\n        .to_string()\n        .into_bytes();\n    let id = \"AA4b5d01948bc777ba1510ba92a901f516408e4b2a1a5b97fed719430acc9ec9\"\n        .to_string()\n        .into_bytes();\n    let valid_cert_2 = proto_v1::Certificate {\n        prev_id: Some(CertificateId { value: prev_id }),\n        id: Some(CertificateId { value: id }),\n        source_subnet_id: Some(SubnetId::from([0u8; 32])),\n        state_root: [0u8; 32].to_vec(),\n        tx_root_hash: [0u8; 32].to_vec(),\n        receipts_root_hash: [0u8; 32].to_vec(),\n        proof: Some(StarkProof { value: Vec::new() }),\n        signature: Some(Frost { value: Vec::new() }),\n        ..Default::default()\n    };\n    let cert_2: crate::uci::Certificate = match crate::uci::Certificate::try_from(valid_cert_2) {\n        Ok(cert) => cert,\n        Err(e) => {\n            panic!(\"Unable to perform certificate conversion: {e}\");\n        }\n    };\n\n    println!(\n        \"Second certificate converted prev_id={}, id={}\",\n        cert_2.prev_id, cert_2.id\n    );\n}\n"
  },
  {
    "path": "crates/topos-core/src/api/grpc/generated/topos.p2p.rs",
    "content": "/// Generated client implementations.\npub mod info_service_client {\n    #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)]\n    use tonic::codegen::*;\n    use tonic::codegen::http::Uri;\n    #[derive(Debug, Clone)]\n    pub struct InfoServiceClient<T> {\n        inner: tonic::client::Grpc<T>,\n    }\n    impl InfoServiceClient<tonic::transport::Channel> {\n        /// Attempt to create a new client by connecting to a given endpoint.\n        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>\n        where\n            D: TryInto<tonic::transport::Endpoint>,\n            D::Error: Into<StdError>,\n        {\n            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;\n            Ok(Self::new(conn))\n        }\n    }\n    impl<T> InfoServiceClient<T>\n    where\n        T: tonic::client::GrpcService<tonic::body::BoxBody>,\n        T::Error: Into<StdError>,\n        T::ResponseBody: Body<Data = Bytes> + Send + 'static,\n        <T::ResponseBody as Body>::Error: Into<StdError> + Send,\n    {\n        pub fn new(inner: T) -> Self {\n            let inner = tonic::client::Grpc::new(inner);\n            Self { inner }\n        }\n        pub fn with_origin(inner: T, origin: Uri) -> Self {\n            let inner = tonic::client::Grpc::with_origin(inner, origin);\n            Self { inner }\n        }\n        pub fn with_interceptor<F>(\n            inner: T,\n            interceptor: F,\n        ) -> InfoServiceClient<InterceptedService<T, F>>\n        where\n            F: tonic::service::Interceptor,\n            T::ResponseBody: Default,\n            T: tonic::codegen::Service<\n                http::Request<tonic::body::BoxBody>,\n                Response = http::Response<\n                    <T as tonic::client::GrpcService<tonic::body::BoxBody>>::ResponseBody,\n                >,\n            >,\n            <T as tonic::codegen::Service<\n                http::Request<tonic::body::BoxBody>,\n            >>::Error: Into<StdError> + Send + Sync,\n        {\n            InfoServiceClient::new(InterceptedService::new(inner, interceptor))\n        }\n        /// Compress requests with the given encoding.\n        ///\n        /// This requires the server to support it otherwise it might respond with an\n        /// error.\n        #[must_use]\n        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {\n            self.inner = self.inner.send_compressed(encoding);\n            self\n        }\n        /// Enable decompressing responses.\n        #[must_use]\n        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {\n            self.inner = self.inner.accept_compressed(encoding);\n            self\n        }\n        /// Limits the maximum size of a decoded message.\n        ///\n        /// Default: `4MB`\n        #[must_use]\n        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {\n            self.inner = self.inner.max_decoding_message_size(limit);\n            self\n        }\n        /// Limits the maximum size of an encoded message.\n        ///\n        /// Default: `usize::MAX`\n        #[must_use]\n        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {\n            self.inner = self.inner.max_encoding_message_size(limit);\n            self\n        }\n    }\n}\n/// Generated server implementations.\npub mod info_service_server {\n    #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)]\n    use tonic::codegen::*;\n    /// Generated trait containing gRPC methods that should be implemented for use with InfoServiceServer.\n    #[async_trait]\n    pub trait InfoService: Send + Sync + 'static {}\n    #[derive(Debug)]\n    pub struct InfoServiceServer<T: InfoService> {\n        inner: _Inner<T>,\n        accept_compression_encodings: EnabledCompressionEncodings,\n        send_compression_encodings: EnabledCompressionEncodings,\n        max_decoding_message_size: Option<usize>,\n        max_encoding_message_size: Option<usize>,\n    }\n    struct _Inner<T>(Arc<T>);\n    impl<T: InfoService> InfoServiceServer<T> {\n        pub fn new(inner: T) -> Self {\n            Self::from_arc(Arc::new(inner))\n        }\n        pub fn from_arc(inner: Arc<T>) -> Self {\n            let inner = _Inner(inner);\n            Self {\n                inner,\n                accept_compression_encodings: Default::default(),\n                send_compression_encodings: Default::default(),\n                max_decoding_message_size: None,\n                max_encoding_message_size: None,\n            }\n        }\n        pub fn with_interceptor<F>(\n            inner: T,\n            interceptor: F,\n        ) -> InterceptedService<Self, F>\n        where\n            F: tonic::service::Interceptor,\n        {\n            InterceptedService::new(Self::new(inner), interceptor)\n        }\n        /// Enable decompressing requests with the given encoding.\n        #[must_use]\n        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {\n            self.accept_compression_encodings.enable(encoding);\n            self\n        }\n        /// Compress responses with the given encoding, if the client supports it.\n        #[must_use]\n        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {\n            self.send_compression_encodings.enable(encoding);\n            self\n        }\n        /// Limits the maximum size of a decoded message.\n        ///\n        /// Default: `4MB`\n        #[must_use]\n        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {\n            self.max_decoding_message_size = Some(limit);\n            self\n        }\n        /// Limits the maximum size of an encoded message.\n        ///\n        /// Default: `usize::MAX`\n        #[must_use]\n        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {\n            self.max_encoding_message_size = Some(limit);\n            self\n        }\n    }\n    impl<T, B> tonic::codegen::Service<http::Request<B>> for InfoServiceServer<T>\n    where\n        T: InfoService,\n        B: Body + Send + 'static,\n        B::Error: Into<StdError> + Send + 'static,\n    {\n        type Response = http::Response<tonic::body::BoxBody>;\n        type Error = std::convert::Infallible;\n        type Future = BoxFuture<Self::Response, Self::Error>;\n        fn poll_ready(\n            &mut self,\n            _cx: &mut Context<'_>,\n        ) -> Poll<std::result::Result<(), Self::Error>> {\n            Poll::Ready(Ok(()))\n        }\n        fn call(&mut self, req: http::Request<B>) -> Self::Future {\n            let inner = self.inner.clone();\n            match req.uri().path() {\n                _ => {\n                    Box::pin(async move {\n                        Ok(\n                            http::Response::builder()\n                                .status(200)\n                                .header(\"grpc-status\", \"12\")\n                                .header(\"content-type\", \"application/grpc\")\n                                .body(empty_body())\n                                .unwrap(),\n                        )\n                    })\n                }\n            }\n        }\n    }\n    impl<T: InfoService> Clone for InfoServiceServer<T> {\n        fn clone(&self) -> Self {\n            let inner = self.inner.clone();\n            Self {\n                inner,\n                accept_compression_encodings: self.accept_compression_encodings,\n                send_compression_encodings: self.send_compression_encodings,\n                max_decoding_message_size: self.max_decoding_message_size,\n                max_encoding_message_size: self.max_encoding_message_size,\n            }\n        }\n    }\n    impl<T: InfoService> Clone for _Inner<T> {\n        fn clone(&self) -> Self {\n            Self(Arc::clone(&self.0))\n        }\n    }\n    impl<T: std::fmt::Debug> std::fmt::Debug for _Inner<T> {\n        fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n            write!(f, \"{:?}\", self.0)\n        }\n    }\n    impl<T: InfoService> tonic::server::NamedService for InfoServiceServer<T> {\n        const NAME: &'static str = \"topos.p2p.InfoService\";\n    }\n}\n"
  },
  {
    "path": "crates/topos-core/src/api/grpc/generated/topos.shared.v1.rs",
    "content": "#[derive(Copy, serde::Deserialize, serde::Serialize)]\n#[allow(clippy::derive_partial_eq_without_eq)]\n#[derive(Clone, PartialEq, ::prost::Message)]\npub struct Uuid {\n    #[prost(uint64, tag = \"1\")]\n    pub most_significant_bits: u64,\n    #[prost(uint64, tag = \"2\")]\n    pub least_significant_bits: u64,\n}\n#[derive(Eq, Hash, serde::Deserialize, serde::Serialize)]\n#[allow(clippy::derive_partial_eq_without_eq)]\n#[derive(Clone, PartialEq, ::prost::Message)]\npub struct SubnetId {\n    #[prost(bytes = \"vec\", tag = \"1\")]\n    pub value: ::prost::alloc::vec::Vec<u8>,\n}\n/// Id of the validator in the Topos protocol network\n/// This is the same as the validator's H160 address in the Ethereum compatible network\n#[derive(Eq, Hash, serde::Deserialize, serde::Serialize)]\n#[allow(clippy::derive_partial_eq_without_eq)]\n#[derive(Clone, PartialEq, ::prost::Message)]\npub struct ValidatorId {\n    /// The validator's H160 address\n    #[prost(bytes = \"vec\", tag = \"1\")]\n    pub value: ::prost::alloc::vec::Vec<u8>,\n}\n#[derive(Eq, Hash, serde::Deserialize, serde::Serialize)]\n#[allow(clippy::derive_partial_eq_without_eq)]\n#[derive(Clone, PartialEq, ::prost::Message)]\npub struct CertificateId {\n    #[prost(bytes = \"vec\", tag = \"1\")]\n    pub value: ::prost::alloc::vec::Vec<u8>,\n}\n/// Checkpoints are used to walk through streams\n#[allow(clippy::derive_partial_eq_without_eq)]\n#[derive(Clone, PartialEq, ::prost::Message)]\npub struct Checkpoints {}\n/// Nested message and enum types in `Checkpoints`.\npub mod checkpoints {\n    /// SourceCheckpoint represents a snapshot of multiple stream's positions regarding\n    /// one or multiple source subnets.\n    #[allow(clippy::derive_partial_eq_without_eq)]\n    #[derive(Clone, PartialEq, ::prost::Message)]\n    pub struct SourceCheckpoint {\n        #[prost(message, repeated, tag = \"1\")]\n        pub source_subnet_ids: ::prost::alloc::vec::Vec<super::SubnetId>,\n        #[prost(message, repeated, tag = \"2\")]\n        pub positions: ::prost::alloc::vec::Vec<super::positions::SourceStreamPosition>,\n    }\n    /// TargetCheckpoint represents a snapshot of multiple stream's positions regarding\n    /// one or multiple target subnets.\n    #[allow(clippy::derive_partial_eq_without_eq)]\n    #[derive(Clone, PartialEq, ::prost::Message)]\n    pub struct TargetCheckpoint {\n        #[prost(message, repeated, tag = \"1\")]\n        pub target_subnet_ids: ::prost::alloc::vec::Vec<super::SubnetId>,\n        #[prost(message, repeated, tag = \"2\")]\n        pub positions: ::prost::alloc::vec::Vec<super::positions::TargetStreamPosition>,\n    }\n}\n#[allow(clippy::derive_partial_eq_without_eq)]\n#[derive(Clone, PartialEq, ::prost::Message)]\npub struct Positions {}\n/// Nested message and enum types in `Positions`.\npub mod positions {\n    /// SourceStreamPosition represents a single point in a source stream.\n    /// It is defined by a source_subnet_id and a position, resolving to a certificate_id\n    #[derive(serde::Deserialize, serde::Serialize)]\n    #[allow(clippy::derive_partial_eq_without_eq)]\n    #[derive(Clone, PartialEq, ::prost::Message)]\n    pub struct SourceStreamPosition {\n        /// The source_subnet_id is a mandatory field for the SourceStreamPosition\n        #[prost(message, optional, tag = \"1\")]\n        pub source_subnet_id: ::core::option::Option<super::SubnetId>,\n        #[prost(uint64, tag = \"2\")]\n        pub position: u64,\n        #[prost(message, optional, tag = \"3\")]\n        pub certificate_id: ::core::option::Option<super::CertificateId>,\n    }\n    /// TargetStreamPosition represents a single point in a target stream regarding a source subnet.\n    /// It is defined by a target_subnet_id, source_subnet_id and a position, resolving to a certificate_id\n    #[allow(clippy::derive_partial_eq_without_eq)]\n    #[derive(Clone, PartialEq, ::prost::Message)]\n    pub struct TargetStreamPosition {\n        /// The source_subnet_id is a mandatory field for the TargetStreamPosition\n        #[prost(message, optional, tag = \"1\")]\n        pub source_subnet_id: ::core::option::Option<super::SubnetId>,\n        /// The target_subnet_id is a mandatory field for the TargetStreamPosition\n        #[prost(message, optional, tag = \"2\")]\n        pub target_subnet_id: ::core::option::Option<super::SubnetId>,\n        #[prost(uint64, tag = \"3\")]\n        pub position: u64,\n        #[prost(message, optional, tag = \"4\")]\n        pub certificate_id: ::core::option::Option<super::CertificateId>,\n    }\n}\n#[derive(Eq, Hash, serde::Deserialize, serde::Serialize)]\n#[allow(clippy::derive_partial_eq_without_eq)]\n#[derive(Clone, PartialEq, ::prost::Message)]\npub struct Frost {\n    #[prost(bytes = \"vec\", tag = \"1\")]\n    pub value: ::prost::alloc::vec::Vec<u8>,\n}\n#[derive(Eq, Hash, serde::Deserialize, serde::Serialize)]\n#[allow(clippy::derive_partial_eq_without_eq)]\n#[derive(Clone, PartialEq, ::prost::Message)]\npub struct StarkProof {\n    #[prost(bytes = \"vec\", tag = \"1\")]\n    pub value: ::prost::alloc::vec::Vec<u8>,\n}\n/// A signature using the ECDSA algorithm.\n/// Used to sign double echo protocol messages.\n#[derive(Eq, Hash, serde::Deserialize, serde::Serialize)]\n#[allow(clippy::derive_partial_eq_without_eq)]\n#[derive(Clone, PartialEq, ::prost::Message)]\npub struct EcdsaSignature {\n    #[prost(bytes = \"vec\", tag = \"1\")]\n    pub r: ::prost::alloc::vec::Vec<u8>,\n    #[prost(bytes = \"vec\", tag = \"2\")]\n    pub s: ::prost::alloc::vec::Vec<u8>,\n    #[prost(uint64, tag = \"3\")]\n    pub v: u64,\n}\n"
  },
  {
    "path": "crates/topos-core/src/api/grpc/generated/topos.tce.v1.rs",
    "content": "#[derive(serde::Deserialize, serde::Serialize)]\n#[allow(clippy::derive_partial_eq_without_eq)]\n#[derive(Clone, PartialEq, ::prost::Message)]\npub struct CheckpointRequest {\n    /// Provide a request_id to track response\n    #[prost(message, optional, tag = \"1\")]\n    pub request_id: ::core::option::Option<super::super::shared::v1::Uuid>,\n    #[prost(message, repeated, tag = \"2\")]\n    pub checkpoint: ::prost::alloc::vec::Vec<ProofOfDelivery>,\n    #[prost(uint64, tag = \"3\")]\n    pub limit_per_subnet: u64,\n}\n#[derive(serde::Deserialize, serde::Serialize)]\n#[allow(clippy::derive_partial_eq_without_eq)]\n#[derive(Clone, PartialEq, ::prost::Message)]\npub struct CheckpointResponse {\n    /// If the response is directly linked to a request this ID allow one to track it\n    #[prost(message, optional, tag = \"1\")]\n    pub request_id: ::core::option::Option<super::super::shared::v1::Uuid>,\n    #[prost(message, repeated, tag = \"2\")]\n    pub checkpoint_diff: ::prost::alloc::vec::Vec<CheckpointMapFieldEntry>,\n}\n#[derive(serde::Deserialize, serde::Serialize)]\n#[allow(clippy::derive_partial_eq_without_eq)]\n#[derive(Clone, PartialEq, ::prost::Message)]\npub struct CheckpointMapFieldEntry {\n    #[prost(string, tag = \"1\")]\n    pub key: ::prost::alloc::string::String,\n    #[prost(message, repeated, tag = \"2\")]\n    pub value: ::prost::alloc::vec::Vec<ProofOfDelivery>,\n}\n#[allow(clippy::derive_partial_eq_without_eq)]\n#[derive(Clone, PartialEq, ::prost::Message)]\npub struct FetchCertificatesRequest {\n    /// Provide a request_id to track response\n    #[prost(message, optional, tag = \"1\")]\n    pub request_id: ::core::option::Option<super::super::shared::v1::Uuid>,\n    #[prost(message, repeated, tag = \"2\")]\n    pub certificates: ::prost::alloc::vec::Vec<super::super::shared::v1::CertificateId>,\n}\n#[allow(clippy::derive_partial_eq_without_eq)]\n#[derive(Clone, PartialEq, ::prost::Message)]\npub struct FetchCertificatesResponse {\n    /// Provide a request_id to track response\n    #[prost(message, optional, tag = \"1\")]\n    pub request_id: ::core::option::Option<super::super::shared::v1::Uuid>,\n    #[prost(message, repeated, tag = \"2\")]\n    pub certificates: ::prost::alloc::vec::Vec<super::super::uci::v1::Certificate>,\n}\n#[derive(serde::Deserialize, serde::Serialize)]\n#[allow(clippy::derive_partial_eq_without_eq)]\n#[derive(Clone, PartialEq, ::prost::Message)]\npub struct ProofOfDelivery {\n    #[prost(message, optional, tag = \"1\")]\n    pub delivery_position: ::core::option::Option<\n        super::super::shared::v1::positions::SourceStreamPosition,\n    >,\n    #[prost(message, repeated, tag = \"2\")]\n    pub readies: ::prost::alloc::vec::Vec<SignedReady>,\n    #[prost(uint64, tag = \"3\")]\n    pub threshold: u64,\n}\n#[derive(serde::Deserialize, serde::Serialize)]\n#[allow(clippy::derive_partial_eq_without_eq)]\n#[derive(Clone, PartialEq, ::prost::Message)]\npub struct SignedReady {\n    #[prost(string, tag = \"1\")]\n    pub ready: ::prost::alloc::string::String,\n    #[prost(string, tag = \"2\")]\n    pub signature: ::prost::alloc::string::String,\n}\n/// Generated client implementations.\npub mod synchronizer_service_client {\n    #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)]\n    use tonic::codegen::*;\n    use tonic::codegen::http::Uri;\n    #[derive(Debug, Clone)]\n    pub struct SynchronizerServiceClient<T> {\n        inner: tonic::client::Grpc<T>,\n    }\n    impl SynchronizerServiceClient<tonic::transport::Channel> {\n        /// Attempt to create a new client by connecting to a given endpoint.\n        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>\n        where\n            D: TryInto<tonic::transport::Endpoint>,\n            D::Error: Into<StdError>,\n        {\n            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;\n            Ok(Self::new(conn))\n        }\n    }\n    impl<T> SynchronizerServiceClient<T>\n    where\n        T: tonic::client::GrpcService<tonic::body::BoxBody>,\n        T::Error: Into<StdError>,\n        T::ResponseBody: Body<Data = Bytes> + Send + 'static,\n        <T::ResponseBody as Body>::Error: Into<StdError> + Send,\n    {\n        pub fn new(inner: T) -> Self {\n            let inner = tonic::client::Grpc::new(inner);\n            Self { inner }\n        }\n        pub fn with_origin(inner: T, origin: Uri) -> Self {\n            let inner = tonic::client::Grpc::with_origin(inner, origin);\n            Self { inner }\n        }\n        pub fn with_interceptor<F>(\n            inner: T,\n            interceptor: F,\n        ) -> SynchronizerServiceClient<InterceptedService<T, F>>\n        where\n            F: tonic::service::Interceptor,\n            T::ResponseBody: Default,\n            T: tonic::codegen::Service<\n                http::Request<tonic::body::BoxBody>,\n                Response = http::Response<\n                    <T as tonic::client::GrpcService<tonic::body::BoxBody>>::ResponseBody,\n                >,\n            >,\n            <T as tonic::codegen::Service<\n                http::Request<tonic::body::BoxBody>,\n            >>::Error: Into<StdError> + Send + Sync,\n        {\n            SynchronizerServiceClient::new(InterceptedService::new(inner, interceptor))\n        }\n        /// Compress requests with the given encoding.\n        ///\n        /// This requires the server to support it otherwise it might respond with an\n        /// error.\n        #[must_use]\n        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {\n            self.inner = self.inner.send_compressed(encoding);\n            self\n        }\n        /// Enable decompressing responses.\n        #[must_use]\n        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {\n            self.inner = self.inner.accept_compressed(encoding);\n            self\n        }\n        /// Limits the maximum size of a decoded message.\n        ///\n        /// Default: `4MB`\n        #[must_use]\n        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {\n            self.inner = self.inner.max_decoding_message_size(limit);\n            self\n        }\n        /// Limits the maximum size of an encoded message.\n        ///\n        /// Default: `usize::MAX`\n        #[must_use]\n        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {\n            self.inner = self.inner.max_encoding_message_size(limit);\n            self\n        }\n        pub async fn fetch_checkpoint(\n            &mut self,\n            request: impl tonic::IntoRequest<super::CheckpointRequest>,\n        ) -> std::result::Result<\n            tonic::Response<super::CheckpointResponse>,\n            tonic::Status,\n        > {\n            self.inner\n                .ready()\n                .await\n                .map_err(|e| {\n                    tonic::Status::new(\n                        tonic::Code::Unknown,\n                        format!(\"Service was not ready: {}\", e.into()),\n                    )\n                })?;\n            let codec = tonic::codec::ProstCodec::default();\n            let path = http::uri::PathAndQuery::from_static(\n                \"/topos.tce.v1.SynchronizerService/fetch_checkpoint\",\n            );\n            let mut req = request.into_request();\n            req.extensions_mut()\n                .insert(\n                    GrpcMethod::new(\n                        \"topos.tce.v1.SynchronizerService\",\n                        \"fetch_checkpoint\",\n                    ),\n                );\n            self.inner.unary(req, path, codec).await\n        }\n        pub async fn fetch_certificates(\n            &mut self,\n            request: impl tonic::IntoRequest<super::FetchCertificatesRequest>,\n        ) -> std::result::Result<\n            tonic::Response<super::FetchCertificatesResponse>,\n            tonic::Status,\n        > {\n            self.inner\n                .ready()\n                .await\n                .map_err(|e| {\n                    tonic::Status::new(\n                        tonic::Code::Unknown,\n                        format!(\"Service was not ready: {}\", e.into()),\n                    )\n                })?;\n            let codec = tonic::codec::ProstCodec::default();\n            let path = http::uri::PathAndQuery::from_static(\n                \"/topos.tce.v1.SynchronizerService/fetch_certificates\",\n            );\n            let mut req = request.into_request();\n            req.extensions_mut()\n                .insert(\n                    GrpcMethod::new(\n                        \"topos.tce.v1.SynchronizerService\",\n                        \"fetch_certificates\",\n                    ),\n                );\n            self.inner.unary(req, path, codec).await\n        }\n    }\n}\n/// Generated server implementations.\npub mod synchronizer_service_server {\n    #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)]\n    use tonic::codegen::*;\n    /// Generated trait containing gRPC methods that should be implemented for use with SynchronizerServiceServer.\n    #[async_trait]\n    pub trait SynchronizerService: Send + Sync + 'static {\n        async fn fetch_checkpoint(\n            &self,\n            request: tonic::Request<super::CheckpointRequest>,\n        ) -> std::result::Result<\n            tonic::Response<super::CheckpointResponse>,\n            tonic::Status,\n        >;\n        async fn fetch_certificates(\n            &self,\n            request: tonic::Request<super::FetchCertificatesRequest>,\n        ) -> std::result::Result<\n            tonic::Response<super::FetchCertificatesResponse>,\n            tonic::Status,\n        >;\n    }\n    #[derive(Debug)]\n    pub struct SynchronizerServiceServer<T: SynchronizerService> {\n        inner: _Inner<T>,\n        accept_compression_encodings: EnabledCompressionEncodings,\n        send_compression_encodings: EnabledCompressionEncodings,\n        max_decoding_message_size: Option<usize>,\n        max_encoding_message_size: Option<usize>,\n    }\n    struct _Inner<T>(Arc<T>);\n    impl<T: SynchronizerService> SynchronizerServiceServer<T> {\n        pub fn new(inner: T) -> Self {\n            Self::from_arc(Arc::new(inner))\n        }\n        pub fn from_arc(inner: Arc<T>) -> Self {\n            let inner = _Inner(inner);\n            Self {\n                inner,\n                accept_compression_encodings: Default::default(),\n                send_compression_encodings: Default::default(),\n                max_decoding_message_size: None,\n                max_encoding_message_size: None,\n            }\n        }\n        pub fn with_interceptor<F>(\n            inner: T,\n            interceptor: F,\n        ) -> InterceptedService<Self, F>\n        where\n            F: tonic::service::Interceptor,\n        {\n            InterceptedService::new(Self::new(inner), interceptor)\n        }\n        /// Enable decompressing requests with the given encoding.\n        #[must_use]\n        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {\n            self.accept_compression_encodings.enable(encoding);\n            self\n        }\n        /// Compress responses with the given encoding, if the client supports it.\n        #[must_use]\n        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {\n            self.send_compression_encodings.enable(encoding);\n            self\n        }\n        /// Limits the maximum size of a decoded message.\n        ///\n        /// Default: `4MB`\n        #[must_use]\n        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {\n            self.max_decoding_message_size = Some(limit);\n            self\n        }\n        /// Limits the maximum size of an encoded message.\n        ///\n        /// Default: `usize::MAX`\n        #[must_use]\n        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {\n            self.max_encoding_message_size = Some(limit);\n            self\n        }\n    }\n    impl<T, B> tonic::codegen::Service<http::Request<B>> for SynchronizerServiceServer<T>\n    where\n        T: SynchronizerService,\n        B: Body + Send + 'static,\n        B::Error: Into<StdError> + Send + 'static,\n    {\n        type Response = http::Response<tonic::body::BoxBody>;\n        type Error = std::convert::Infallible;\n        type Future = BoxFuture<Self::Response, Self::Error>;\n        fn poll_ready(\n            &mut self,\n            _cx: &mut Context<'_>,\n        ) -> Poll<std::result::Result<(), Self::Error>> {\n            Poll::Ready(Ok(()))\n        }\n        fn call(&mut self, req: http::Request<B>) -> Self::Future {\n            let inner = self.inner.clone();\n            match req.uri().path() {\n                \"/topos.tce.v1.SynchronizerService/fetch_checkpoint\" => {\n                    #[allow(non_camel_case_types)]\n                    struct fetch_checkpointSvc<T: SynchronizerService>(pub Arc<T>);\n                    impl<\n                        T: SynchronizerService,\n                    > tonic::server::UnaryService<super::CheckpointRequest>\n                    for fetch_checkpointSvc<T> {\n                        type Response = super::CheckpointResponse;\n                        type Future = BoxFuture<\n                            tonic::Response<Self::Response>,\n                            tonic::Status,\n                        >;\n                        fn call(\n                            &mut self,\n                            request: tonic::Request<super::CheckpointRequest>,\n                        ) -> Self::Future {\n                            let inner = Arc::clone(&self.0);\n                            let fut = async move {\n                                <T as SynchronizerService>::fetch_checkpoint(\n                                        &inner,\n                                        request,\n                                    )\n                                    .await\n                            };\n                            Box::pin(fut)\n                        }\n                    }\n                    let accept_compression_encodings = self.accept_compression_encodings;\n                    let send_compression_encodings = self.send_compression_encodings;\n                    let max_decoding_message_size = self.max_decoding_message_size;\n                    let max_encoding_message_size = self.max_encoding_message_size;\n                    let inner = self.inner.clone();\n                    let fut = async move {\n                        let inner = inner.0;\n                        let method = fetch_checkpointSvc(inner);\n                        let codec = tonic::codec::ProstCodec::default();\n                        let mut grpc = tonic::server::Grpc::new(codec)\n                            .apply_compression_config(\n                                accept_compression_encodings,\n                                send_compression_encodings,\n                            )\n                            .apply_max_message_size_config(\n                                max_decoding_message_size,\n                                max_encoding_message_size,\n                            );\n                        let res = grpc.unary(method, req).await;\n                        Ok(res)\n                    };\n                    Box::pin(fut)\n                }\n                \"/topos.tce.v1.SynchronizerService/fetch_certificates\" => {\n                    #[allow(non_camel_case_types)]\n                    struct fetch_certificatesSvc<T: SynchronizerService>(pub Arc<T>);\n                    impl<\n                        T: SynchronizerService,\n                    > tonic::server::UnaryService<super::FetchCertificatesRequest>\n                    for fetch_certificatesSvc<T> {\n                        type Response = super::FetchCertificatesResponse;\n                        type Future = BoxFuture<\n                            tonic::Response<Self::Response>,\n                            tonic::Status,\n                        >;\n                        fn call(\n                            &mut self,\n                            request: tonic::Request<super::FetchCertificatesRequest>,\n                        ) -> Self::Future {\n                            let inner = Arc::clone(&self.0);\n                            let fut = async move {\n                                <T as SynchronizerService>::fetch_certificates(\n                                        &inner,\n                                        request,\n                                    )\n                                    .await\n                            };\n                            Box::pin(fut)\n                        }\n                    }\n                    let accept_compression_encodings = self.accept_compression_encodings;\n                    let send_compression_encodings = self.send_compression_encodings;\n                    let max_decoding_message_size = self.max_decoding_message_size;\n                    let max_encoding_message_size = self.max_encoding_message_size;\n                    let inner = self.inner.clone();\n                    let fut = async move {\n                        let inner = inner.0;\n                        let method = fetch_certificatesSvc(inner);\n                        let codec = tonic::codec::ProstCodec::default();\n                        let mut grpc = tonic::server::Grpc::new(codec)\n                            .apply_compression_config(\n                                accept_compression_encodings,\n                                send_compression_encodings,\n                            )\n                            .apply_max_message_size_config(\n                                max_decoding_message_size,\n                                max_encoding_message_size,\n                            );\n                        let res = grpc.unary(method, req).await;\n                        Ok(res)\n                    };\n                    Box::pin(fut)\n                }\n                _ => {\n                    Box::pin(async move {\n                        Ok(\n                            http::Response::builder()\n                                .status(200)\n                                .header(\"grpc-status\", \"12\")\n                                .header(\"content-type\", \"application/grpc\")\n                                .body(empty_body())\n                                .unwrap(),\n                        )\n                    })\n                }\n            }\n        }\n    }\n    impl<T: SynchronizerService> Clone for SynchronizerServiceServer<T> {\n        fn clone(&self) -> Self {\n            let inner = self.inner.clone();\n            Self {\n                inner,\n                accept_compression_encodings: self.accept_compression_encodings,\n                send_compression_encodings: self.send_compression_encodings,\n                max_decoding_message_size: self.max_decoding_message_size,\n                max_encoding_message_size: self.max_encoding_message_size,\n            }\n        }\n    }\n    impl<T: SynchronizerService> Clone for _Inner<T> {\n        fn clone(&self) -> Self {\n            Self(Arc::clone(&self.0))\n        }\n    }\n    impl<T: std::fmt::Debug> std::fmt::Debug for _Inner<T> {\n        fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n            write!(f, \"{:?}\", self.0)\n        }\n    }\n    impl<T: SynchronizerService> tonic::server::NamedService\n    for SynchronizerServiceServer<T> {\n        const NAME: &'static str = \"topos.tce.v1.SynchronizerService\";\n    }\n}\n#[allow(clippy::derive_partial_eq_without_eq)]\n#[derive(Clone, PartialEq, ::prost::Message)]\npub struct SubmitCertificateRequest {\n    #[prost(message, optional, tag = \"1\")]\n    pub certificate: ::core::option::Option<super::super::uci::v1::Certificate>,\n}\n#[allow(clippy::derive_partial_eq_without_eq)]\n#[derive(Clone, PartialEq, ::prost::Message)]\npub struct SubmitCertificateResponse {}\n#[allow(clippy::derive_partial_eq_without_eq)]\n#[derive(Clone, PartialEq, ::prost::Message)]\npub struct GetSourceHeadRequest {\n    #[prost(message, optional, tag = \"1\")]\n    pub subnet_id: ::core::option::Option<super::super::shared::v1::SubnetId>,\n}\n#[allow(clippy::derive_partial_eq_without_eq)]\n#[derive(Clone, PartialEq, ::prost::Message)]\npub struct GetSourceHeadResponse {\n    #[prost(message, optional, tag = \"1\")]\n    pub position: ::core::option::Option<\n        super::super::shared::v1::positions::SourceStreamPosition,\n    >,\n    #[prost(message, optional, tag = \"2\")]\n    pub certificate: ::core::option::Option<super::super::uci::v1::Certificate>,\n}\n#[allow(clippy::derive_partial_eq_without_eq)]\n#[derive(Clone, PartialEq, ::prost::Message)]\npub struct GetLastPendingCertificatesRequest {\n    #[prost(message, repeated, tag = \"1\")]\n    pub subnet_ids: ::prost::alloc::vec::Vec<super::super::shared::v1::SubnetId>,\n}\n#[allow(clippy::derive_partial_eq_without_eq)]\n#[derive(Clone, PartialEq, ::prost::Message)]\npub struct LastPendingCertificate {\n    #[prost(message, optional, tag = \"1\")]\n    pub value: ::core::option::Option<super::super::uci::v1::Certificate>,\n    /// Pending certificate index (effectively total number of pending certificates)\n    #[prost(uint64, tag = \"2\")]\n    pub index: u64,\n}\n#[allow(clippy::derive_partial_eq_without_eq)]\n#[derive(Clone, PartialEq, ::prost::Message)]\npub struct GetLastPendingCertificatesResponse {\n    /// Bytes and array types (SubnetId) could not be key in the map type according to specifications,\n    /// so we use SubnetId hex encoded string with 0x prefix as key\n    #[prost(map = \"string, message\", tag = \"1\")]\n    pub last_pending_certificate: ::std::collections::HashMap<\n        ::prost::alloc::string::String,\n        LastPendingCertificate,\n    >,\n}\n#[allow(clippy::derive_partial_eq_without_eq)]\n#[derive(Clone, PartialEq, ::prost::Message)]\npub struct WatchCertificatesRequest {\n    /// Provide a request_id to track response\n    #[prost(message, optional, tag = \"1\")]\n    pub request_id: ::core::option::Option<super::super::shared::v1::Uuid>,\n    /// Define which command needs to be performed\n    #[prost(oneof = \"watch_certificates_request::Command\", tags = \"2\")]\n    pub command: ::core::option::Option<watch_certificates_request::Command>,\n}\n/// Nested message and enum types in `WatchCertificatesRequest`.\npub mod watch_certificates_request {\n    /// Sent to start receiving events and being able to send further command\n    #[allow(clippy::derive_partial_eq_without_eq)]\n    #[derive(Clone, PartialEq, ::prost::Message)]\n    pub struct OpenStream {\n        #[prost(message, optional, tag = \"1\")]\n        pub target_checkpoint: ::core::option::Option<\n            super::super::super::shared::v1::checkpoints::TargetCheckpoint,\n        >,\n        #[prost(message, optional, tag = \"2\")]\n        pub source_checkpoint: ::core::option::Option<\n            super::super::super::shared::v1::checkpoints::SourceCheckpoint,\n        >,\n    }\n    /// Define which command needs to be performed\n    #[allow(clippy::derive_partial_eq_without_eq)]\n    #[derive(Clone, PartialEq, ::prost::Oneof)]\n    pub enum Command {\n        #[prost(message, tag = \"2\")]\n        OpenStream(OpenStream),\n    }\n}\n#[allow(clippy::derive_partial_eq_without_eq)]\n#[derive(Clone, PartialEq, ::prost::Message)]\npub struct WatchCertificatesResponse {\n    /// If the response is directly linked to a request this ID allow one to track it\n    #[prost(message, optional, tag = \"1\")]\n    pub request_id: ::core::option::Option<super::super::shared::v1::Uuid>,\n    #[prost(oneof = \"watch_certificates_response::Event\", tags = \"2, 3\")]\n    pub event: ::core::option::Option<watch_certificates_response::Event>,\n}\n/// Nested message and enum types in `WatchCertificatesResponse`.\npub mod watch_certificates_response {\n    /// Sent by the TCE when the stream is ready to be used and\n    /// that certificates will start being pushed\n    #[allow(clippy::derive_partial_eq_without_eq)]\n    #[derive(Clone, PartialEq, ::prost::Message)]\n    pub struct StreamOpened {\n        #[prost(message, repeated, tag = \"1\")]\n        pub subnet_ids: ::prost::alloc::vec::Vec<\n            super::super::super::shared::v1::SubnetId,\n        >,\n    }\n    /// Target Certificate pushed from the TCE to the sequencer\n    #[allow(clippy::derive_partial_eq_without_eq)]\n    #[derive(Clone, PartialEq, ::prost::Message)]\n    pub struct CertificatePushed {\n        #[prost(message, optional, tag = \"1\")]\n        pub certificate: ::core::option::Option<\n            super::super::super::uci::v1::Certificate,\n        >,\n        #[prost(message, repeated, tag = \"2\")]\n        pub positions: ::prost::alloc::vec::Vec<\n            super::super::super::shared::v1::positions::TargetStreamPosition,\n        >,\n    }\n    #[allow(clippy::derive_partial_eq_without_eq)]\n    #[derive(Clone, PartialEq, ::prost::Oneof)]\n    pub enum Event {\n        #[prost(message, tag = \"2\")]\n        StreamOpened(StreamOpened),\n        #[prost(message, tag = \"3\")]\n        CertificatePushed(CertificatePushed),\n    }\n}\n/// Generated client implementations.\npub mod api_service_client {\n    #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)]\n    use tonic::codegen::*;\n    use tonic::codegen::http::Uri;\n    #[derive(Debug, Clone)]\n    pub struct ApiServiceClient<T> {\n        inner: tonic::client::Grpc<T>,\n    }\n    impl ApiServiceClient<tonic::transport::Channel> {\n        /// Attempt to create a new client by connecting to a given endpoint.\n        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>\n        where\n            D: TryInto<tonic::transport::Endpoint>,\n            D::Error: Into<StdError>,\n        {\n            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;\n            Ok(Self::new(conn))\n        }\n    }\n    impl<T> ApiServiceClient<T>\n    where\n        T: tonic::client::GrpcService<tonic::body::BoxBody>,\n        T::Error: Into<StdError>,\n        T::ResponseBody: Body<Data = Bytes> + Send + 'static,\n        <T::ResponseBody as Body>::Error: Into<StdError> + Send,\n    {\n        pub fn new(inner: T) -> Self {\n            let inner = tonic::client::Grpc::new(inner);\n            Self { inner }\n        }\n        pub fn with_origin(inner: T, origin: Uri) -> Self {\n            let inner = tonic::client::Grpc::with_origin(inner, origin);\n            Self { inner }\n        }\n        pub fn with_interceptor<F>(\n            inner: T,\n            interceptor: F,\n        ) -> ApiServiceClient<InterceptedService<T, F>>\n        where\n            F: tonic::service::Interceptor,\n            T::ResponseBody: Default,\n            T: tonic::codegen::Service<\n                http::Request<tonic::body::BoxBody>,\n                Response = http::Response<\n                    <T as tonic::client::GrpcService<tonic::body::BoxBody>>::ResponseBody,\n                >,\n            >,\n            <T as tonic::codegen::Service<\n                http::Request<tonic::body::BoxBody>,\n            >>::Error: Into<StdError> + Send + Sync,\n        {\n            ApiServiceClient::new(InterceptedService::new(inner, interceptor))\n        }\n        /// Compress requests with the given encoding.\n        ///\n        /// This requires the server to support it otherwise it might respond with an\n        /// error.\n        #[must_use]\n        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {\n            self.inner = self.inner.send_compressed(encoding);\n            self\n        }\n        /// Enable decompressing responses.\n        #[must_use]\n        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {\n            self.inner = self.inner.accept_compressed(encoding);\n            self\n        }\n        /// Limits the maximum size of a decoded message.\n        ///\n        /// Default: `4MB`\n        #[must_use]\n        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {\n            self.inner = self.inner.max_decoding_message_size(limit);\n            self\n        }\n        /// Limits the maximum size of an encoded message.\n        ///\n        /// Default: `usize::MAX`\n        #[must_use]\n        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {\n            self.inner = self.inner.max_encoding_message_size(limit);\n            self\n        }\n        pub async fn submit_certificate(\n            &mut self,\n            request: impl tonic::IntoRequest<super::SubmitCertificateRequest>,\n        ) -> std::result::Result<\n            tonic::Response<super::SubmitCertificateResponse>,\n            tonic::Status,\n        > {\n            self.inner\n                .ready()\n                .await\n                .map_err(|e| {\n                    tonic::Status::new(\n                        tonic::Code::Unknown,\n                        format!(\"Service was not ready: {}\", e.into()),\n                    )\n                })?;\n            let codec = tonic::codec::ProstCodec::default();\n            let path = http::uri::PathAndQuery::from_static(\n                \"/topos.tce.v1.APIService/SubmitCertificate\",\n            );\n            let mut req = request.into_request();\n            req.extensions_mut()\n                .insert(GrpcMethod::new(\"topos.tce.v1.APIService\", \"SubmitCertificate\"));\n            self.inner.unary(req, path, codec).await\n        }\n        pub async fn get_source_head(\n            &mut self,\n            request: impl tonic::IntoRequest<super::GetSourceHeadRequest>,\n        ) -> std::result::Result<\n            tonic::Response<super::GetSourceHeadResponse>,\n            tonic::Status,\n        > {\n            self.inner\n                .ready()\n                .await\n                .map_err(|e| {\n                    tonic::Status::new(\n                        tonic::Code::Unknown,\n                        format!(\"Service was not ready: {}\", e.into()),\n                    )\n                })?;\n            let codec = tonic::codec::ProstCodec::default();\n            let path = http::uri::PathAndQuery::from_static(\n                \"/topos.tce.v1.APIService/GetSourceHead\",\n            );\n            let mut req = request.into_request();\n            req.extensions_mut()\n                .insert(GrpcMethod::new(\"topos.tce.v1.APIService\", \"GetSourceHead\"));\n            self.inner.unary(req, path, codec).await\n        }\n        /// / This RPC allows a client to get latest pending certificates for\n        /// / requested subnets (by their subnet id)\n        /// /\n        /// / Returns a map of subnet_id -> last pending certificate\n        /// / If there are no pending certificate for a subnet, returns None for that subnet id\n        pub async fn get_last_pending_certificates(\n            &mut self,\n            request: impl tonic::IntoRequest<super::GetLastPendingCertificatesRequest>,\n        ) -> std::result::Result<\n            tonic::Response<super::GetLastPendingCertificatesResponse>,\n            tonic::Status,\n        > {\n            self.inner\n                .ready()\n                .await\n                .map_err(|e| {\n                    tonic::Status::new(\n                        tonic::Code::Unknown,\n                        format!(\"Service was not ready: {}\", e.into()),\n                    )\n                })?;\n            let codec = tonic::codec::ProstCodec::default();\n            let path = http::uri::PathAndQuery::from_static(\n                \"/topos.tce.v1.APIService/GetLastPendingCertificates\",\n            );\n            let mut req = request.into_request();\n            req.extensions_mut()\n                .insert(\n                    GrpcMethod::new(\n                        \"topos.tce.v1.APIService\",\n                        \"GetLastPendingCertificates\",\n                    ),\n                );\n            self.inner.unary(req, path, codec).await\n        }\n        /// This RPC allows a client to open a bidirectional stream with a TCE\n        pub async fn watch_certificates(\n            &mut self,\n            request: impl tonic::IntoStreamingRequest<\n                Message = super::WatchCertificatesRequest,\n            >,\n        ) -> std::result::Result<\n            tonic::Response<tonic::codec::Streaming<super::WatchCertificatesResponse>>,\n            tonic::Status,\n        > {\n            self.inner\n                .ready()\n                .await\n                .map_err(|e| {\n                    tonic::Status::new(\n                        tonic::Code::Unknown,\n                        format!(\"Service was not ready: {}\", e.into()),\n                    )\n                })?;\n            let codec = tonic::codec::ProstCodec::default();\n            let path = http::uri::PathAndQuery::from_static(\n                \"/topos.tce.v1.APIService/WatchCertificates\",\n            );\n            let mut req = request.into_streaming_request();\n            req.extensions_mut()\n                .insert(GrpcMethod::new(\"topos.tce.v1.APIService\", \"WatchCertificates\"));\n            self.inner.streaming(req, path, codec).await\n        }\n    }\n}\n/// Generated server implementations.\npub mod api_service_server {\n    #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)]\n    use tonic::codegen::*;\n    /// Generated trait containing gRPC methods that should be implemented for use with ApiServiceServer.\n    #[async_trait]\n    pub trait ApiService: Send + Sync + 'static {\n        async fn submit_certificate(\n            &self,\n            request: tonic::Request<super::SubmitCertificateRequest>,\n        ) -> std::result::Result<\n            tonic::Response<super::SubmitCertificateResponse>,\n            tonic::Status,\n        >;\n        async fn get_source_head(\n            &self,\n            request: tonic::Request<super::GetSourceHeadRequest>,\n        ) -> std::result::Result<\n            tonic::Response<super::GetSourceHeadResponse>,\n            tonic::Status,\n        >;\n        /// / This RPC allows a client to get latest pending certificates for\n        /// / requested subnets (by their subnet id)\n        /// /\n        /// / Returns a map of subnet_id -> last pending certificate\n        /// / If there are no pending certificate for a subnet, returns None for that subnet id\n        async fn get_last_pending_certificates(\n            &self,\n            request: tonic::Request<super::GetLastPendingCertificatesRequest>,\n        ) -> std::result::Result<\n            tonic::Response<super::GetLastPendingCertificatesResponse>,\n            tonic::Status,\n        >;\n        /// Server streaming response type for the WatchCertificates method.\n        type WatchCertificatesStream: tonic::codegen::tokio_stream::Stream<\n                Item = std::result::Result<\n                    super::WatchCertificatesResponse,\n                    tonic::Status,\n                >,\n            >\n            + Send\n            + 'static;\n        /// This RPC allows a client to open a bidirectional stream with a TCE\n        async fn watch_certificates(\n            &self,\n            request: tonic::Request<tonic::Streaming<super::WatchCertificatesRequest>>,\n        ) -> std::result::Result<\n            tonic::Response<Self::WatchCertificatesStream>,\n            tonic::Status,\n        >;\n    }\n    #[derive(Debug)]\n    pub struct ApiServiceServer<T: ApiService> {\n        inner: _Inner<T>,\n        accept_compression_encodings: EnabledCompressionEncodings,\n        send_compression_encodings: EnabledCompressionEncodings,\n        max_decoding_message_size: Option<usize>,\n        max_encoding_message_size: Option<usize>,\n    }\n    struct _Inner<T>(Arc<T>);\n    impl<T: ApiService> ApiServiceServer<T> {\n        pub fn new(inner: T) -> Self {\n            Self::from_arc(Arc::new(inner))\n        }\n        pub fn from_arc(inner: Arc<T>) -> Self {\n            let inner = _Inner(inner);\n            Self {\n                inner,\n                accept_compression_encodings: Default::default(),\n                send_compression_encodings: Default::default(),\n                max_decoding_message_size: None,\n                max_encoding_message_size: None,\n            }\n        }\n        pub fn with_interceptor<F>(\n            inner: T,\n            interceptor: F,\n        ) -> InterceptedService<Self, F>\n        where\n            F: tonic::service::Interceptor,\n        {\n            InterceptedService::new(Self::new(inner), interceptor)\n        }\n        /// Enable decompressing requests with the given encoding.\n        #[must_use]\n        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {\n            self.accept_compression_encodings.enable(encoding);\n            self\n        }\n        /// Compress responses with the given encoding, if the client supports it.\n        #[must_use]\n        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {\n            self.send_compression_encodings.enable(encoding);\n            self\n        }\n        /// Limits the maximum size of a decoded message.\n        ///\n        /// Default: `4MB`\n        #[must_use]\n        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {\n            self.max_decoding_message_size = Some(limit);\n            self\n        }\n        /// Limits the maximum size of an encoded message.\n        ///\n        /// Default: `usize::MAX`\n        #[must_use]\n        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {\n            self.max_encoding_message_size = Some(limit);\n            self\n        }\n    }\n    impl<T, B> tonic::codegen::Service<http::Request<B>> for ApiServiceServer<T>\n    where\n        T: ApiService,\n        B: Body + Send + 'static,\n        B::Error: Into<StdError> + Send + 'static,\n    {\n        type Response = http::Response<tonic::body::BoxBody>;\n        type Error = std::convert::Infallible;\n        type Future = BoxFuture<Self::Response, Self::Error>;\n        fn poll_ready(\n            &mut self,\n            _cx: &mut Context<'_>,\n        ) -> Poll<std::result::Result<(), Self::Error>> {\n            Poll::Ready(Ok(()))\n        }\n        fn call(&mut self, req: http::Request<B>) -> Self::Future {\n            let inner = self.inner.clone();\n            match req.uri().path() {\n                \"/topos.tce.v1.APIService/SubmitCertificate\" => {\n                    #[allow(non_camel_case_types)]\n                    struct SubmitCertificateSvc<T: ApiService>(pub Arc<T>);\n                    impl<\n                        T: ApiService,\n                    > tonic::server::UnaryService<super::SubmitCertificateRequest>\n                    for SubmitCertificateSvc<T> {\n                        type Response = super::SubmitCertificateResponse;\n                        type Future = BoxFuture<\n                            tonic::Response<Self::Response>,\n                            tonic::Status,\n                        >;\n                        fn call(\n                            &mut self,\n                            request: tonic::Request<super::SubmitCertificateRequest>,\n                        ) -> Self::Future {\n                            let inner = Arc::clone(&self.0);\n                            let fut = async move {\n                                <T as ApiService>::submit_certificate(&inner, request).await\n                            };\n                            Box::pin(fut)\n                        }\n                    }\n                    let accept_compression_encodings = self.accept_compression_encodings;\n                    let send_compression_encodings = self.send_compression_encodings;\n                    let max_decoding_message_size = self.max_decoding_message_size;\n                    let max_encoding_message_size = self.max_encoding_message_size;\n                    let inner = self.inner.clone();\n                    let fut = async move {\n                        let inner = inner.0;\n                        let method = SubmitCertificateSvc(inner);\n                        let codec = tonic::codec::ProstCodec::default();\n                        let mut grpc = tonic::server::Grpc::new(codec)\n                            .apply_compression_config(\n                                accept_compression_encodings,\n                                send_compression_encodings,\n                            )\n                            .apply_max_message_size_config(\n                                max_decoding_message_size,\n                                max_encoding_message_size,\n                            );\n                        let res = grpc.unary(method, req).await;\n                        Ok(res)\n                    };\n                    Box::pin(fut)\n                }\n                \"/topos.tce.v1.APIService/GetSourceHead\" => {\n                    #[allow(non_camel_case_types)]\n                    struct GetSourceHeadSvc<T: ApiService>(pub Arc<T>);\n                    impl<\n                        T: ApiService,\n                    > tonic::server::UnaryService<super::GetSourceHeadRequest>\n                    for GetSourceHeadSvc<T> {\n                        type Response = super::GetSourceHeadResponse;\n                        type Future = BoxFuture<\n                            tonic::Response<Self::Response>,\n                            tonic::Status,\n                        >;\n                        fn call(\n                            &mut self,\n                            request: tonic::Request<super::GetSourceHeadRequest>,\n                        ) -> Self::Future {\n                            let inner = Arc::clone(&self.0);\n                            let fut = async move {\n                                <T as ApiService>::get_source_head(&inner, request).await\n                            };\n                            Box::pin(fut)\n                        }\n                    }\n                    let accept_compression_encodings = self.accept_compression_encodings;\n                    let send_compression_encodings = self.send_compression_encodings;\n                    let max_decoding_message_size = self.max_decoding_message_size;\n                    let max_encoding_message_size = self.max_encoding_message_size;\n                    let inner = self.inner.clone();\n                    let fut = async move {\n                        let inner = inner.0;\n                        let method = GetSourceHeadSvc(inner);\n                        let codec = tonic::codec::ProstCodec::default();\n                        let mut grpc = tonic::server::Grpc::new(codec)\n                            .apply_compression_config(\n                                accept_compression_encodings,\n                                send_compression_encodings,\n                            )\n                            .apply_max_message_size_config(\n                                max_decoding_message_size,\n                                max_encoding_message_size,\n                            );\n                        let res = grpc.unary(method, req).await;\n                        Ok(res)\n                    };\n                    Box::pin(fut)\n                }\n                \"/topos.tce.v1.APIService/GetLastPendingCertificates\" => {\n                    #[allow(non_camel_case_types)]\n                    struct GetLastPendingCertificatesSvc<T: ApiService>(pub Arc<T>);\n                    impl<\n                        T: ApiService,\n                    > tonic::server::UnaryService<\n                        super::GetLastPendingCertificatesRequest,\n                    > for GetLastPendingCertificatesSvc<T> {\n                        type Response = super::GetLastPendingCertificatesResponse;\n                        type Future = BoxFuture<\n                            tonic::Response<Self::Response>,\n                            tonic::Status,\n                        >;\n                        fn call(\n                            &mut self,\n                            request: tonic::Request<\n                                super::GetLastPendingCertificatesRequest,\n                            >,\n                        ) -> Self::Future {\n                            let inner = Arc::clone(&self.0);\n                            let fut = async move {\n                                <T as ApiService>::get_last_pending_certificates(\n                                        &inner,\n                                        request,\n                                    )\n                                    .await\n                            };\n                            Box::pin(fut)\n                        }\n                    }\n                    let accept_compression_encodings = self.accept_compression_encodings;\n                    let send_compression_encodings = self.send_compression_encodings;\n                    let max_decoding_message_size = self.max_decoding_message_size;\n                    let max_encoding_message_size = self.max_encoding_message_size;\n                    let inner = self.inner.clone();\n                    let fut = async move {\n                        let inner = inner.0;\n                        let method = GetLastPendingCertificatesSvc(inner);\n                        let codec = tonic::codec::ProstCodec::default();\n                        let mut grpc = tonic::server::Grpc::new(codec)\n                            .apply_compression_config(\n                                accept_compression_encodings,\n                                send_compression_encodings,\n                            )\n                            .apply_max_message_size_config(\n                                max_decoding_message_size,\n                                max_encoding_message_size,\n                            );\n                        let res = grpc.unary(method, req).await;\n                        Ok(res)\n                    };\n                    Box::pin(fut)\n                }\n                \"/topos.tce.v1.APIService/WatchCertificates\" => {\n                    #[allow(non_camel_case_types)]\n                    struct WatchCertificatesSvc<T: ApiService>(pub Arc<T>);\n                    impl<\n                        T: ApiService,\n                    > tonic::server::StreamingService<super::WatchCertificatesRequest>\n                    for WatchCertificatesSvc<T> {\n                        type Response = super::WatchCertificatesResponse;\n                        type ResponseStream = T::WatchCertificatesStream;\n                        type Future = BoxFuture<\n                            tonic::Response<Self::ResponseStream>,\n                            tonic::Status,\n                        >;\n                        fn call(\n                            &mut self,\n                            request: tonic::Request<\n                                tonic::Streaming<super::WatchCertificatesRequest>,\n                            >,\n                        ) -> Self::Future {\n                            let inner = Arc::clone(&self.0);\n                            let fut = async move {\n                                <T as ApiService>::watch_certificates(&inner, request).await\n                            };\n                            Box::pin(fut)\n                        }\n                    }\n                    let accept_compression_encodings = self.accept_compression_encodings;\n                    let send_compression_encodings = self.send_compression_encodings;\n                    let max_decoding_message_size = self.max_decoding_message_size;\n                    let max_encoding_message_size = self.max_encoding_message_size;\n                    let inner = self.inner.clone();\n                    let fut = async move {\n                        let inner = inner.0;\n                        let method = WatchCertificatesSvc(inner);\n                        let codec = tonic::codec::ProstCodec::default();\n                        let mut grpc = tonic::server::Grpc::new(codec)\n                            .apply_compression_config(\n                                accept_compression_encodings,\n                                send_compression_encodings,\n                            )\n                            .apply_max_message_size_config(\n                                max_decoding_message_size,\n                                max_encoding_message_size,\n                            );\n                        let res = grpc.streaming(method, req).await;\n                        Ok(res)\n                    };\n                    Box::pin(fut)\n                }\n                _ => {\n                    Box::pin(async move {\n                        Ok(\n                            http::Response::builder()\n                                .status(200)\n                                .header(\"grpc-status\", \"12\")\n                                .header(\"content-type\", \"application/grpc\")\n                                .body(empty_body())\n                                .unwrap(),\n                        )\n                    })\n                }\n            }\n        }\n    }\n    impl<T: ApiService> Clone for ApiServiceServer<T> {\n        fn clone(&self) -> Self {\n            let inner = self.inner.clone();\n            Self {\n                inner,\n                accept_compression_encodings: self.accept_compression_encodings,\n                send_compression_encodings: self.send_compression_encodings,\n                max_decoding_message_size: self.max_decoding_message_size,\n                max_encoding_message_size: self.max_encoding_message_size,\n            }\n        }\n    }\n    impl<T: ApiService> Clone for _Inner<T> {\n        fn clone(&self) -> Self {\n            Self(Arc::clone(&self.0))\n        }\n    }\n    impl<T: std::fmt::Debug> std::fmt::Debug for _Inner<T> {\n        fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n            write!(f, \"{:?}\", self.0)\n        }\n    }\n    impl<T: ApiService> tonic::server::NamedService for ApiServiceServer<T> {\n        const NAME: &'static str = \"topos.tce.v1.APIService\";\n    }\n}\n#[allow(clippy::derive_partial_eq_without_eq)]\n#[derive(Clone, PartialEq, ::prost::Message)]\npub struct StatusRequest {}\n#[allow(clippy::derive_partial_eq_without_eq)]\n#[derive(Clone, PartialEq, ::prost::Message)]\npub struct StatusResponse {\n    #[prost(bool, tag = \"1\")]\n    pub has_active_sample: bool,\n}\n/// Generated client implementations.\npub mod console_service_client {\n    #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)]\n    use tonic::codegen::*;\n    use tonic::codegen::http::Uri;\n    #[derive(Debug, Clone)]\n    pub struct ConsoleServiceClient<T> {\n        inner: tonic::client::Grpc<T>,\n    }\n    impl ConsoleServiceClient<tonic::transport::Channel> {\n        /// Attempt to create a new client by connecting to a given endpoint.\n        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>\n        where\n            D: TryInto<tonic::transport::Endpoint>,\n            D::Error: Into<StdError>,\n        {\n            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;\n            Ok(Self::new(conn))\n        }\n    }\n    impl<T> ConsoleServiceClient<T>\n    where\n        T: tonic::client::GrpcService<tonic::body::BoxBody>,\n        T::Error: Into<StdError>,\n        T::ResponseBody: Body<Data = Bytes> + Send + 'static,\n        <T::ResponseBody as Body>::Error: Into<StdError> + Send,\n    {\n        pub fn new(inner: T) -> Self {\n            let inner = tonic::client::Grpc::new(inner);\n            Self { inner }\n        }\n        pub fn with_origin(inner: T, origin: Uri) -> Self {\n            let inner = tonic::client::Grpc::with_origin(inner, origin);\n            Self { inner }\n        }\n        pub fn with_interceptor<F>(\n            inner: T,\n            interceptor: F,\n        ) -> ConsoleServiceClient<InterceptedService<T, F>>\n        where\n            F: tonic::service::Interceptor,\n            T::ResponseBody: Default,\n            T: tonic::codegen::Service<\n                http::Request<tonic::body::BoxBody>,\n                Response = http::Response<\n                    <T as tonic::client::GrpcService<tonic::body::BoxBody>>::ResponseBody,\n                >,\n            >,\n            <T as tonic::codegen::Service<\n                http::Request<tonic::body::BoxBody>,\n            >>::Error: Into<StdError> + Send + Sync,\n        {\n            ConsoleServiceClient::new(InterceptedService::new(inner, interceptor))\n        }\n        /// Compress requests with the given encoding.\n        ///\n        /// This requires the server to support it otherwise it might respond with an\n        /// error.\n        #[must_use]\n        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {\n            self.inner = self.inner.send_compressed(encoding);\n            self\n        }\n        /// Enable decompressing responses.\n        #[must_use]\n        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {\n            self.inner = self.inner.accept_compressed(encoding);\n            self\n        }\n        /// Limits the maximum size of a decoded message.\n        ///\n        /// Default: `4MB`\n        #[must_use]\n        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {\n            self.inner = self.inner.max_decoding_message_size(limit);\n            self\n        }\n        /// Limits the maximum size of an encoded message.\n        ///\n        /// Default: `usize::MAX`\n        #[must_use]\n        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {\n            self.inner = self.inner.max_encoding_message_size(limit);\n            self\n        }\n        pub async fn status(\n            &mut self,\n            request: impl tonic::IntoRequest<super::StatusRequest>,\n        ) -> std::result::Result<tonic::Response<super::StatusResponse>, tonic::Status> {\n            self.inner\n                .ready()\n                .await\n                .map_err(|e| {\n                    tonic::Status::new(\n                        tonic::Code::Unknown,\n                        format!(\"Service was not ready: {}\", e.into()),\n                    )\n                })?;\n            let codec = tonic::codec::ProstCodec::default();\n            let path = http::uri::PathAndQuery::from_static(\n                \"/topos.tce.v1.ConsoleService/Status\",\n            );\n            let mut req = request.into_request();\n            req.extensions_mut()\n                .insert(GrpcMethod::new(\"topos.tce.v1.ConsoleService\", \"Status\"));\n            self.inner.unary(req, path, codec).await\n        }\n    }\n}\n/// Generated server implementations.\npub mod console_service_server {\n    #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)]\n    use tonic::codegen::*;\n    /// Generated trait containing gRPC methods that should be implemented for use with ConsoleServiceServer.\n    #[async_trait]\n    pub trait ConsoleService: Send + Sync + 'static {\n        async fn status(\n            &self,\n            request: tonic::Request<super::StatusRequest>,\n        ) -> std::result::Result<tonic::Response<super::StatusResponse>, tonic::Status>;\n    }\n    #[derive(Debug)]\n    pub struct ConsoleServiceServer<T: ConsoleService> {\n        inner: _Inner<T>,\n        accept_compression_encodings: EnabledCompressionEncodings,\n        send_compression_encodings: EnabledCompressionEncodings,\n        max_decoding_message_size: Option<usize>,\n        max_encoding_message_size: Option<usize>,\n    }\n    struct _Inner<T>(Arc<T>);\n    impl<T: ConsoleService> ConsoleServiceServer<T> {\n        pub fn new(inner: T) -> Self {\n            Self::from_arc(Arc::new(inner))\n        }\n        pub fn from_arc(inner: Arc<T>) -> Self {\n            let inner = _Inner(inner);\n            Self {\n                inner,\n                accept_compression_encodings: Default::default(),\n                send_compression_encodings: Default::default(),\n                max_decoding_message_size: None,\n                max_encoding_message_size: None,\n            }\n        }\n        pub fn with_interceptor<F>(\n            inner: T,\n            interceptor: F,\n        ) -> InterceptedService<Self, F>\n        where\n            F: tonic::service::Interceptor,\n        {\n            InterceptedService::new(Self::new(inner), interceptor)\n        }\n        /// Enable decompressing requests with the given encoding.\n        #[must_use]\n        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {\n            self.accept_compression_encodings.enable(encoding);\n            self\n        }\n        /// Compress responses with the given encoding, if the client supports it.\n        #[must_use]\n        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {\n            self.send_compression_encodings.enable(encoding);\n            self\n        }\n        /// Limits the maximum size of a decoded message.\n        ///\n        /// Default: `4MB`\n        #[must_use]\n        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {\n            self.max_decoding_message_size = Some(limit);\n            self\n        }\n        /// Limits the maximum size of an encoded message.\n        ///\n        /// Default: `usize::MAX`\n        #[must_use]\n        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {\n            self.max_encoding_message_size = Some(limit);\n            self\n        }\n    }\n    impl<T, B> tonic::codegen::Service<http::Request<B>> for ConsoleServiceServer<T>\n    where\n        T: ConsoleService,\n        B: Body + Send + 'static,\n        B::Error: Into<StdError> + Send + 'static,\n    {\n        type Response = http::Response<tonic::body::BoxBody>;\n        type Error = std::convert::Infallible;\n        type Future = BoxFuture<Self::Response, Self::Error>;\n        fn poll_ready(\n            &mut self,\n            _cx: &mut Context<'_>,\n        ) -> Poll<std::result::Result<(), Self::Error>> {\n            Poll::Ready(Ok(()))\n        }\n        fn call(&mut self, req: http::Request<B>) -> Self::Future {\n            let inner = self.inner.clone();\n            match req.uri().path() {\n                \"/topos.tce.v1.ConsoleService/Status\" => {\n                    #[allow(non_camel_case_types)]\n                    struct StatusSvc<T: ConsoleService>(pub Arc<T>);\n                    impl<\n                        T: ConsoleService,\n                    > tonic::server::UnaryService<super::StatusRequest>\n                    for StatusSvc<T> {\n                        type Response = super::StatusResponse;\n                        type Future = BoxFuture<\n                            tonic::Response<Self::Response>,\n                            tonic::Status,\n                        >;\n                        fn call(\n                            &mut self,\n                            request: tonic::Request<super::StatusRequest>,\n                        ) -> Self::Future {\n                            let inner = Arc::clone(&self.0);\n                            let fut = async move {\n                                <T as ConsoleService>::status(&inner, request).await\n                            };\n                            Box::pin(fut)\n                        }\n                    }\n                    let accept_compression_encodings = self.accept_compression_encodings;\n                    let send_compression_encodings = self.send_compression_encodings;\n                    let max_decoding_message_size = self.max_decoding_message_size;\n                    let max_encoding_message_size = self.max_encoding_message_size;\n                    let inner = self.inner.clone();\n                    let fut = async move {\n                        let inner = inner.0;\n                        let method = StatusSvc(inner);\n                        let codec = tonic::codec::ProstCodec::default();\n                        let mut grpc = tonic::server::Grpc::new(codec)\n                            .apply_compression_config(\n                                accept_compression_encodings,\n                                send_compression_encodings,\n                            )\n                            .apply_max_message_size_config(\n                                max_decoding_message_size,\n                                max_encoding_message_size,\n                            );\n                        let res = grpc.unary(method, req).await;\n                        Ok(res)\n                    };\n                    Box::pin(fut)\n                }\n                _ => {\n                    Box::pin(async move {\n                        Ok(\n                            http::Response::builder()\n                                .status(200)\n                                .header(\"grpc-status\", \"12\")\n                                .header(\"content-type\", \"application/grpc\")\n                                .body(empty_body())\n                                .unwrap(),\n                        )\n                    })\n                }\n            }\n        }\n    }\n    impl<T: ConsoleService> Clone for ConsoleServiceServer<T> {\n        fn clone(&self) -> Self {\n            let inner = self.inner.clone();\n            Self {\n                inner,\n                accept_compression_encodings: self.accept_compression_encodings,\n                send_compression_encodings: self.send_compression_encodings,\n                max_decoding_message_size: self.max_decoding_message_size,\n                max_encoding_message_size: self.max_encoding_message_size,\n            }\n        }\n    }\n    impl<T: ConsoleService> Clone for _Inner<T> {\n        fn clone(&self) -> Self {\n            Self(Arc::clone(&self.0))\n        }\n    }\n    impl<T: std::fmt::Debug> std::fmt::Debug for _Inner<T> {\n        fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n            write!(f, \"{:?}\", self.0)\n        }\n    }\n    impl<T: ConsoleService> tonic::server::NamedService for ConsoleServiceServer<T> {\n        const NAME: &'static str = \"topos.tce.v1.ConsoleService\";\n    }\n}\n#[derive(serde::Deserialize, serde::Serialize)]\n#[allow(clippy::derive_partial_eq_without_eq)]\n#[derive(Clone, PartialEq, ::prost::Message)]\npub struct Gossip {\n    #[prost(message, optional, tag = \"1\")]\n    pub certificate: ::core::option::Option<super::super::uci::v1::Certificate>,\n}\n#[derive(serde::Deserialize, serde::Serialize)]\n#[allow(clippy::derive_partial_eq_without_eq)]\n#[derive(Clone, PartialEq, ::prost::Message)]\npub struct Echo {\n    #[prost(message, optional, tag = \"1\")]\n    pub certificate_id: ::core::option::Option<super::super::shared::v1::CertificateId>,\n    #[prost(message, optional, tag = \"2\")]\n    pub signature: ::core::option::Option<super::super::shared::v1::EcdsaSignature>,\n    #[prost(message, optional, tag = \"3\")]\n    pub validator_id: ::core::option::Option<super::super::shared::v1::ValidatorId>,\n}\n#[derive(serde::Deserialize, serde::Serialize)]\n#[allow(clippy::derive_partial_eq_without_eq)]\n#[derive(Clone, PartialEq, ::prost::Message)]\npub struct Ready {\n    #[prost(message, optional, tag = \"1\")]\n    pub certificate_id: ::core::option::Option<super::super::shared::v1::CertificateId>,\n    #[prost(message, optional, tag = \"2\")]\n    pub signature: ::core::option::Option<super::super::shared::v1::EcdsaSignature>,\n    #[prost(message, optional, tag = \"3\")]\n    pub validator_id: ::core::option::Option<super::super::shared::v1::ValidatorId>,\n}\n#[derive(serde::Deserialize, serde::Serialize)]\n#[allow(clippy::derive_partial_eq_without_eq)]\n#[derive(Clone, PartialEq, ::prost::Message)]\npub struct DoubleEchoRequest {\n    #[prost(oneof = \"double_echo_request::Request\", tags = \"1, 2, 3\")]\n    pub request: ::core::option::Option<double_echo_request::Request>,\n}\n/// Nested message and enum types in `DoubleEchoRequest`.\npub mod double_echo_request {\n    #[derive(serde::Deserialize, serde::Serialize)]\n    #[allow(clippy::derive_partial_eq_without_eq)]\n    #[derive(Clone, PartialEq, ::prost::Oneof)]\n    pub enum Request {\n        #[prost(message, tag = \"1\")]\n        Gossip(super::Gossip),\n        #[prost(message, tag = \"2\")]\n        Echo(super::Echo),\n        #[prost(message, tag = \"3\")]\n        Ready(super::Ready),\n    }\n}\n#[derive(serde::Deserialize, serde::Serialize)]\n#[allow(clippy::derive_partial_eq_without_eq)]\n#[derive(Clone, PartialEq, ::prost::Message)]\npub struct Batch {\n    #[prost(bytes = \"vec\", repeated, tag = \"1\")]\n    pub messages: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec<u8>>,\n}\n"
  },
  {
    "path": "crates/topos-core/src/api/grpc/generated/topos.uci.v1.rs",
    "content": "/// Certificate - main exchange item\n#[derive(Eq, Hash, serde::Deserialize, serde::Serialize)]\n#[allow(clippy::derive_partial_eq_without_eq)]\n#[derive(Clone, PartialEq, ::prost::Message)]\npub struct Certificate {\n    #[prost(message, optional, tag = \"1\")]\n    pub prev_id: ::core::option::Option<super::super::shared::v1::CertificateId>,\n    #[prost(message, optional, tag = \"2\")]\n    pub source_subnet_id: ::core::option::Option<super::super::shared::v1::SubnetId>,\n    #[prost(bytes = \"vec\", tag = \"3\")]\n    pub state_root: ::prost::alloc::vec::Vec<u8>,\n    #[prost(bytes = \"vec\", tag = \"4\")]\n    pub tx_root_hash: ::prost::alloc::vec::Vec<u8>,\n    #[prost(bytes = \"vec\", tag = \"5\")]\n    pub receipts_root_hash: ::prost::alloc::vec::Vec<u8>,\n    #[prost(message, repeated, tag = \"6\")]\n    pub target_subnets: ::prost::alloc::vec::Vec<super::super::shared::v1::SubnetId>,\n    #[prost(uint32, tag = \"7\")]\n    pub verifier: u32,\n    #[prost(message, optional, tag = \"8\")]\n    pub id: ::core::option::Option<super::super::shared::v1::CertificateId>,\n    #[prost(message, optional, tag = \"9\")]\n    pub proof: ::core::option::Option<super::super::shared::v1::StarkProof>,\n    #[prost(message, optional, tag = \"10\")]\n    pub signature: ::core::option::Option<super::super::shared::v1::Frost>,\n}\n#[allow(clippy::derive_partial_eq_without_eq)]\n#[derive(Clone, PartialEq, ::prost::Message)]\npub struct OptionalCertificate {\n    #[prost(message, optional, tag = \"1\")]\n    pub value: ::core::option::Option<Certificate>,\n}\n"
  },
  {
    "path": "crates/topos-core/src/api/grpc/mod.rs",
    "content": "use self::checkpoints::StreamPositionError;\n\nuse tonic::transport::Channel;\n\nuse self::tce::v1::synchronizer_service_client::SynchronizerServiceClient;\n\npub const FILE_DESCRIPTOR_SET: &[u8] = include_bytes!(\"generated/topos.bin\");\n\npub mod checkpoints;\n\npub trait GrpcClient {\n    type Output;\n\n    fn init(destination: Channel) -> Self::Output;\n}\n\nimpl GrpcClient for SynchronizerServiceClient<Channel> {\n    type Output = Self;\n\n    fn init(channel: Channel) -> Self::Output {\n        SynchronizerServiceClient::new(channel)\n    }\n}\n\n#[derive(thiserror::Error, Debug)]\npub enum ConversionError {\n    #[error(transparent)]\n    GrpcDecode(#[from] prost::DecodeError),\n\n    #[error(\"Missing mandatory field: {0}\")]\n    MissingField(&'static str),\n\n    #[error(transparent)]\n    StreamConversion(#[from] StreamPositionError),\n}\n#[allow(warnings)]\n#[rustfmt::skip]\n#[path = \"generated/topos.p2p.rs\"]\npub mod p2p;\n\n#[path = \"\"]\npub mod tce {\n    #[rustfmt::skip]\n    #[allow(warnings)]\n    #[path = \"generated/topos.tce.v1.rs\"]\n    pub mod v1;\n\n    #[path = \"conversions/tce/v1/mod.rs\"]\n    pub mod v1_conversions;\n}\n\n#[path = \"\"]\npub mod shared {\n    #[rustfmt::skip]\n    #[allow(warnings)]\n    #[path = \"generated/topos.shared.v1.rs\"]\n    pub mod v1;\n\n    #[path = \"conversions/shared/v1/uuid.rs\"]\n    pub mod v1_conversions_uuid;\n\n    #[path = \"conversions/shared/v1/subnet.rs\"]\n    pub mod v1_conversions_subnet;\n\n    #[path = \"conversions/shared/v1/certificate.rs\"]\n    pub mod v1_conversions_certificate;\n\n    #[path = \"conversions/shared/v1/signature.rs\"]\n    pub mod v1_conversions_signature;\n\n    #[path = \"conversions/shared/v1/validator_id.rs\"]\n    pub mod v1_conversions_validator_id;\n}\n\n#[path = \".\"]\npub mod uci {\n    #[rustfmt::skip]\n    #[allow(warnings)]\n    #[path = \"generated/topos.uci.v1.rs\"]\n    pub mod v1;\n\n    #[path = \"conversions/uci/v1/uci.rs\"]\n    pub mod v1_conversions;\n}\n"
  },
  {
    "path": "crates/topos-core/src/api/mod.rs",
    "content": "pub mod graphql;\npub mod grpc;\n"
  },
  {
    "path": "crates/topos-core/src/errors.rs",
    "content": "use crate::api::grpc::checkpoints::StreamPositionError;\n\n#[derive(Debug, thiserror::Error)]\npub enum GrpcParsingError {\n    #[error(\"Malformed gRPC object: {0}\")]\n    GrpcMalformedType(&'static str),\n    #[error(transparent)]\n    PositionParsing(#[from] StreamPositionError),\n}\n"
  },
  {
    "path": "crates/topos-core/src/lib.rs",
    "content": "#[cfg_attr(docsrs, doc(cfg(feature = \"uci\")))]\npub mod uci;\n\n#[cfg_attr(docsrs, doc(cfg(feature = \"api\")))]\npub mod api;\n\npub mod errors;\npub mod types;\n\n#[cfg(test)]\nmod test;\n"
  },
  {
    "path": "crates/topos-core/src/test.rs",
    "content": "use crate::types::stream::Position;\n\n#[test]\nfn test_position() {\n    let zero = Position::ZERO;\n\n    let serialized = bincode::serialize(&zero).unwrap();\n\n    let deserialized: Position = bincode::deserialize(&serialized).unwrap();\n\n    assert_eq!(zero, deserialized);\n\n    let one: u64 = 1;\n\n    let serialized = bincode::serialize(&one).unwrap();\n\n    let deserialized: Position = bincode::deserialize(&serialized).unwrap();\n\n    assert_eq!(one, deserialized);\n}\n\n#[test]\nfn position_from_integer() {\n    let position: Position = (0u64).into();\n\n    assert_eq!(*position, 0);\n}\n"
  },
  {
    "path": "crates/topos-core/src/types/stream.rs",
    "content": "use std::{fmt, ops::Deref};\n\nuse serde::{Deserialize, Serialize};\nuse thiserror::Error;\n\nuse crate::uci::SubnetId;\n\n/// Represents the place of a certificate in the stream of the Source Subnet\n///\n/// The `Source` Subnet is the subnet that produced the certificate.\n/// A certificate should and will have the same position in this stream\n/// no matter which node or component delivered it. The position is an\n/// aggregation of the precedence chain of a certificate, starting by the\n/// genesis certificate represented by a certificate which have a prev_id\n/// equal to the `INITIAL_CERTIFICATE_ID`\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]\npub struct CertificateSourceStreamPosition {\n    // Source subnet id\n    pub subnet_id: SubnetId,\n    // Source certificate position\n    pub position: Position,\n}\n\nimpl fmt::Display for CertificateSourceStreamPosition {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        write!(f, \"{}:{}\", self.subnet_id, self.position)\n    }\n}\n\nimpl CertificateSourceStreamPosition {\n    pub fn new<P: Into<Position>>(subnet_id: SubnetId, position: P) -> Self {\n        Self {\n            subnet_id,\n            position: position.into(),\n        }\n    }\n}\n\n/// Represents the place of a certificate in the stream of a Target Subnet\n///\n/// A `Target` Subnet is a subnet that was defined as target by the certificate.\n/// A certificate can have multiple target subnets, leading to multiple\n/// CertificateTargetStreamPosition for the same certificate but never more than\n/// one CertificateTargetStreamPosition per couple (target, source).\n///\n/// The position of a certificate in a target stream will be the same accross\n/// the entire network.\n#[derive(Debug, Deserialize, Serialize, Clone, Copy)]\npub struct CertificateTargetStreamPosition {\n    pub target_subnet_id: SubnetId,\n    pub source_subnet_id: SubnetId,\n    pub position: Position,\n}\n\nimpl CertificateTargetStreamPosition {\n    pub fn new<P: Into<Position>>(\n        target_subnet_id: SubnetId,\n        source_subnet_id: SubnetId,\n        position: P,\n    ) -> Self {\n        Self {\n            target_subnet_id,\n            source_subnet_id,\n            position: position.into(),\n        }\n    }\n}\n\n/// Certificate index in a stream of both source or target subnet\n#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Copy)]\npub struct Position(u64);\n\nimpl TryFrom<Position> for usize {\n    type Error = PositionError;\n\n    fn try_from(position: Position) -> Result<usize, Self::Error> {\n        position\n            .0\n            .try_into()\n            .map_err(|_| PositionError::InvalidPosition)\n    }\n}\n\nimpl TryFrom<usize> for Position {\n    type Error = PositionError;\n\n    fn try_from(value: usize) -> Result<Self, Self::Error> {\n        Ok(Self(\n            u64::try_from(value).map_err(|_| PositionError::InvalidPosition)?,\n        ))\n    }\n}\n\nimpl From<u64> for Position {\n    fn from(value: u64) -> Self {\n        Self(value)\n    }\n}\n\nimpl Deref for Position {\n    type Target = u64;\n\n    fn deref(&self) -> &Self::Target {\n        &self.0\n    }\n}\n\nimpl PartialEq<Position> for u64 {\n    fn eq(&self, other: &Position) -> bool {\n        *self == other.0\n    }\n}\n\nimpl PartialOrd for Position {\n    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {\n        self.0.partial_cmp(&other.0)\n    }\n}\n\nimpl fmt::Display for Position {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        write!(f, \"{}\", self.0)\n    }\n}\n\nimpl Position {\n    pub const ZERO: Self = Self(0);\n\n    pub fn increment(self) -> Result<Self, PositionError> {\n        match self {\n            Self::ZERO => Ok(Self(1)),\n            Self(value) => value\n                .checked_add(1)\n                .ok_or(PositionError::MaximumPositionReached)\n                .map(Self),\n        }\n    }\n}\n\n#[derive(Debug, Error)]\npub enum PositionError {\n    #[error(\"Maximum position reached for subnet\")]\n    MaximumPositionReached,\n\n    #[error(\"Invalid expected position\")]\n    InvalidExpectedPosition,\n\n    #[error(\"\")]\n    InvalidPosition,\n}\n"
  },
  {
    "path": "crates/topos-core/src/types.rs",
    "content": "use crate::uci::{Certificate, CertificateId};\nuse serde::{Deserialize, Serialize};\n\nuse crate::errors::GrpcParsingError;\n\nuse self::stream::CertificateSourceStreamPosition;\nuse crate::api::grpc::{\n    checkpoints::SourceStreamPosition,\n    tce::v1::{ProofOfDelivery as GrpcProofOfDelivery, SignedReady},\n};\n\npub mod stream;\n\npub type Ready = String;\npub type Signature = String;\n\npub use topos_crypto::validator_id::Error as ValidatorIdConversionError;\npub use topos_crypto::validator_id::ValidatorId;\n\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]\npub struct CertificateDelivered {\n    pub certificate: Certificate,\n    pub proof_of_delivery: ProofOfDelivery,\n}\n\nimpl AsRef<CertificateDelivered> for CertificateDelivered {\n    fn as_ref(&self) -> &Self {\n        self\n    }\n}\n\n/// Certificate's Proof of Delivery\n///\n/// This structure is used to prove that a certificate has been delivered.\n/// It contains the certificate's ID, the position of the certificate in the\n/// source stream, the list of Ready messages received and the threshold.\n/// The threshold is the number of Ready messages required to consider the\n/// certificate as delivered. For a certificate, multiple Proofs of Delivery\n/// can be created on the network, each one with a different list of Ready messages.\n///\n/// Two different Proofs of Delivery for the same Certificate can still be valid\n/// if their Ready messages are valid. Because of the threshold, a certificate\n/// can be considered as delivered even with a different set of Ready messages,\n/// it simply means that the node received a different set of Ready messages\n/// than the other nodes.\n#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]\npub struct ProofOfDelivery {\n    /// The certificate's ID\n    pub certificate_id: CertificateId,\n    /// The position of the certificate in the source stream\n    pub delivery_position: CertificateSourceStreamPosition,\n    /// The list of Ready messages used to proove the certificate's delivery\n    pub readies: Vec<(Ready, Signature)>,\n    /// The threshold of Ready messages required to consider the certificate as delivered\n    pub threshold: u64,\n}\n\nimpl From<SourceStreamPosition> for CertificateSourceStreamPosition {\n    fn from(value: SourceStreamPosition) -> Self {\n        Self {\n            subnet_id: value.source_subnet_id,\n            position: value.position.into(),\n        }\n    }\n}\n\nimpl TryFrom<GrpcProofOfDelivery> for ProofOfDelivery {\n    type Error = GrpcParsingError;\n    fn try_from(value: GrpcProofOfDelivery) -> Result<Self, Self::Error> {\n        let position: SourceStreamPosition = value\n            .delivery_position\n            .ok_or(GrpcParsingError::GrpcMalformedType(\"position\"))?\n            .try_into()?;\n\n        Ok(Self {\n            certificate_id: position\n                .certificate_id\n                .ok_or(GrpcParsingError::GrpcMalformedType(\n                    \"position.certificate_id\",\n                ))?,\n            delivery_position: position.into(),\n            readies: value\n                .readies\n                .into_iter()\n                .map(|v| (v.ready, v.signature))\n                .collect(),\n            threshold: value.threshold,\n        })\n    }\n}\n\nimpl From<ProofOfDelivery> for GrpcProofOfDelivery {\n    fn from(value: ProofOfDelivery) -> Self {\n        Self {\n            delivery_position: Some(\n                SourceStreamPosition {\n                    source_subnet_id: value.delivery_position.subnet_id,\n                    position: *value.delivery_position.position,\n                    certificate_id: Some(value.certificate_id),\n                }\n                .into(),\n            ),\n            readies: value\n                .readies\n                .into_iter()\n                .map(|v| SignedReady {\n                    ready: v.0,\n                    signature: v.1,\n                })\n                .collect(),\n            threshold: value.threshold,\n        }\n    }\n}\n"
  },
  {
    "path": "crates/topos-core/src/uci/certificate.rs",
    "content": "use serde::{Deserialize, Serialize};\nuse std::borrow::Borrow;\nuse std::fmt::Debug;\n\nuse super::{\n    CertificateId, Error, Frost, ReceiptsRootHash, StarkProof, StateRoot, SubnetId, TxRootHash,\n    CERTIFICATE_ID_LENGTH, DUMMY_FROST_VERIF_DELAY, DUMMY_STARK_DELAY,\n};\n\n/// Certificate - main exchange item\n#[derive(Clone, Default, Serialize, Deserialize, PartialEq, Eq, Hash)]\npub struct Certificate {\n    pub id: CertificateId,\n    pub prev_id: CertificateId,\n    pub source_subnet_id: SubnetId,\n    pub state_root: StateRoot,\n    pub tx_root_hash: TxRootHash,\n    pub receipts_root_hash: ReceiptsRootHash,\n    pub target_subnets: Vec<SubnetId>,\n    pub verifier: u32,\n    pub proof: StarkProof,\n    pub signature: Frost,\n}\n\nimpl AsRef<Certificate> for Certificate {\n    fn as_ref(&self) -> &Self {\n        self\n    }\n}\n\nimpl Debug for Certificate {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        f.debug_struct(\"Certificate\")\n            .field(\"id\", &self.id.to_string())\n            .field(\"prev_id\", &self.prev_id.to_string())\n            .field(\"source_subnet_id\", &self.source_subnet_id.to_string())\n            .field(\n                \"state_root\",\n                &(\"0x\".to_string() + &hex::encode(self.state_root)),\n            )\n            .field(\n                \"tx_root_hash\",\n                &(\"0x\".to_string() + &hex::encode(self.tx_root_hash)),\n            )\n            .field(\n                \"receipts_root_hash\",\n                &(\"0x\".to_string() + &hex::encode(self.receipts_root_hash)),\n            )\n            .field(\n                \"target_subnets\",\n                &self\n                    .target_subnets\n                    .iter()\n                    .map(|ts| ts.to_string())\n                    .collect::<Vec<_>>(),\n            )\n            .field(\"verifier\", &self.verifier)\n            .field(\"proof\", &(\"0x\".to_string() + &hex::encode(&self.proof)))\n            .field(\n                \"signature\",\n                &(\"0x\".to_string() + &hex::encode(&self.signature)),\n            )\n            .finish()\n    }\n}\n\nimpl Certificate {\n    #[allow(clippy::too_many_arguments)]\n    pub fn new<P: Into<CertificateId>>(\n        prev_id: P,\n        source_subnet_id: SubnetId,\n        state_root: StateRoot,\n        tx_root_hash: TxRootHash,\n        receipts_root_hash: ReceiptsRootHash,\n        target_subnets: &[SubnetId],\n        verifier: u32,\n        proof: Vec<u8>,\n    ) -> Result<Certificate, Box<dyn std::error::Error>> {\n        let mut cert = Certificate {\n            id: [0; CERTIFICATE_ID_LENGTH].into(),\n            prev_id: prev_id.into(),\n            source_subnet_id,\n            state_root,\n            tx_root_hash,\n            receipts_root_hash,\n            target_subnets: target_subnets.into(),\n            verifier,\n            proof,\n            signature: Default::default(),\n        };\n\n        cert.id = Self::calculate_cert_id(&cert)?.into();\n        Ok(cert)\n    }\n\n    pub fn new_with_default_fields<P: Into<CertificateId>>(\n        prev_id: P,\n        source_subnet_id: SubnetId,\n        target_subnets: &[SubnetId],\n    ) -> Result<Certificate, Box<dyn std::error::Error>> {\n        let mut cert = Certificate {\n            id: [0; CERTIFICATE_ID_LENGTH].into(),\n            prev_id: prev_id.into(),\n            source_subnet_id,\n            state_root: Default::default(),\n            tx_root_hash: Default::default(),\n            receipts_root_hash: Default::default(),\n            target_subnets: target_subnets.into(),\n            verifier: 0,\n            proof: Default::default(),\n            signature: Default::default(),\n        };\n\n        cert.id = Self::calculate_cert_id(&cert)?.into();\n        Ok(cert)\n    }\n\n    pub fn check_signature(&self) -> Result<(), Error> {\n        std::thread::sleep(DUMMY_FROST_VERIF_DELAY);\n        Ok(())\n    }\n\n    pub fn check_proof(&self) -> Result<(), Error> {\n        std::thread::sleep(DUMMY_STARK_DELAY);\n        Ok(())\n    }\n\n    /// Signs the hash of the certificate payload\n    pub fn update_signature(&mut self, private_key: &[u8]) -> Result<(), Error> {\n        self.signature =\n            topos_crypto::signatures::sign(private_key, self.get_payload().as_slice())?;\n        Ok(())\n    }\n\n    /// Get byte payload of the certificate\n    /// Excludes frost signature\n    pub fn get_payload(&self) -> Vec<u8> {\n        let mut buffer = Vec::new();\n        buffer.extend(self.id.as_array().as_ref());\n        buffer.extend_from_slice(self.prev_id.as_array().as_ref());\n        buffer.extend_from_slice(self.source_subnet_id.as_array().as_ref());\n        buffer.extend_from_slice(self.state_root.as_ref());\n        buffer.extend_from_slice(self.tx_root_hash.as_ref());\n        buffer.extend_from_slice(self.receipts_root_hash.as_ref());\n        for target_subnet in &self.target_subnets {\n            buffer.extend_from_slice(target_subnet.as_array().as_ref());\n        }\n        buffer.extend(self.verifier.to_be_bytes().as_ref());\n        buffer.extend(self.proof.as_slice());\n        buffer\n    }\n\n    // To get unique id, calculate certificate id of certificate object using keccak256,\n    // excluding cert_id and signature fields\n    fn calculate_cert_id(certificate: &Certificate) -> Result<[u8; CERTIFICATE_ID_LENGTH], Error> {\n        let mut buffer = Vec::new();\n        buffer.extend_from_slice(certificate.prev_id.as_array().as_ref());\n        buffer.extend_from_slice(certificate.source_subnet_id.as_array().as_ref());\n        buffer.extend_from_slice(certificate.state_root.as_ref());\n        buffer.extend_from_slice(certificate.tx_root_hash.as_ref());\n        buffer.extend_from_slice(certificate.receipts_root_hash.as_ref());\n        for target_subnet in &certificate.target_subnets {\n            buffer.extend_from_slice(target_subnet.as_array().as_ref());\n        }\n        buffer.extend_from_slice(certificate.verifier.to_be_bytes().as_ref());\n        buffer.extend_from_slice(certificate.proof.as_ref());\n        let hash = topos_crypto::hash::calculate_hash(buffer.borrow());\n        Ok(hash)\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::uci::SUBNET_ID_LENGTH;\n\n    use super::*;\n    const PREV_CERTIFICATE_ID: CertificateId =\n        CertificateId::from_array([1u8; CERTIFICATE_ID_LENGTH]);\n    const TARGET_SUBNET_ID: SubnetId = SubnetId::from_array([3u8; SUBNET_ID_LENGTH]);\n    const STATE_ROOT: StateRoot = [4u8; 32];\n    const TX_ROOT_HASH: TxRootHash = [5u8; 32];\n    const RECEIPTS_ROOT_HASH: ReceiptsRootHash = [6u8; 32];\n    const PRIVATE_TEST_KEY: &str =\n        \"5fb92d6e98884f76de468fa3f6278f8807c48bebc13595d45af5bdc4da702133\";\n\n    fn generate_dummy_cert(signing_key: &[u8]) -> Certificate {\n        let public_key =\n            topos_crypto::keys::derive_public_key(signing_key).expect(\"valid public key\");\n        let source_subnet_id: [u8; SUBNET_ID_LENGTH] = public_key[1..33].try_into().unwrap();\n\n        Certificate::new(\n            PREV_CERTIFICATE_ID,\n            source_subnet_id.into(),\n            STATE_ROOT,\n            TX_ROOT_HASH,\n            RECEIPTS_ROOT_HASH,\n            &[TARGET_SUBNET_ID],\n            2,\n            Default::default(),\n        )\n        .expect(\"Dummy certificate\")\n    }\n\n    #[test]\n    fn certificate_signatures() {\n        let private_test_key = hex::decode(PRIVATE_TEST_KEY).unwrap();\n\n        let mut dummy_cert = generate_dummy_cert(&private_test_key);\n        dummy_cert\n            .update_signature(private_test_key.as_slice())\n            .expect(\"valid signature update\");\n\n        topos_crypto::signatures::verify(\n            &dummy_cert.source_subnet_id.to_secp256k1_public_key(),\n            dummy_cert.get_payload().as_slice(),\n            dummy_cert.signature.as_slice(),\n        )\n        .expect(\"valid signature check\")\n    }\n\n    #[test]\n    #[should_panic]\n    fn signature_verification_failed_corrupt_data() {\n        let private_test_key = hex::decode(PRIVATE_TEST_KEY).unwrap();\n        let mut dummy_cert = generate_dummy_cert(&private_test_key);\n\n        dummy_cert\n            .update_signature(private_test_key.as_slice())\n            .expect(\"valid signature update\");\n\n        dummy_cert.state_root[0] = 0xff;\n\n        let public_key = topos_crypto::keys::derive_public_key(private_test_key.as_slice())\n            .expect(\"valid public key\");\n\n        topos_crypto::signatures::verify(\n            &public_key,\n            dummy_cert.get_payload().as_slice(),\n            dummy_cert.signature.as_slice(),\n        )\n        .expect(\"invalid valid signature check\")\n    }\n\n    #[test]\n    #[should_panic]\n    fn signature_verification_failed_invalid_public_key() {\n        let private_test_key = hex::decode(PRIVATE_TEST_KEY).unwrap();\n        let mut dummy_cert = generate_dummy_cert(&private_test_key);\n\n        dummy_cert\n            .update_signature(private_test_key.as_slice())\n            .expect(\"valid signature update\");\n\n        dummy_cert.state_root[0] = 0xff;\n\n        let mut public_key = topos_crypto::keys::derive_public_key(private_test_key.as_slice())\n            .expect(\"valid public key\");\n        public_key[3] = 0xff;\n\n        topos_crypto::signatures::verify(\n            &dummy_cert.source_subnet_id.to_secp256k1_public_key(),\n            dummy_cert.get_payload().as_slice(),\n            dummy_cert.signature.as_slice(),\n        )\n        .expect(\"invalid valid signature check\")\n    }\n}\n"
  },
  {
    "path": "crates/topos-core/src/uci/certificate_id.rs",
    "content": "use serde::{Deserialize, Serialize};\nuse std::fmt::{Debug, Display};\nuse std::hash::Hash;\n\nuse super::{Error, CERTIFICATE_ID_LENGTH, HEX_CERTIFICATE_ID_LENGTH};\n\npub const INITIAL_CERTIFICATE_ID: CertificateId =\n    CertificateId::from_array([0u8; super::CERTIFICATE_ID_LENGTH]);\n\n#[derive(Serialize, Hash, Deserialize, Default, PartialEq, Eq, Clone, Copy)]\npub struct CertificateId {\n    id: [u8; CERTIFICATE_ID_LENGTH],\n}\n\nimpl Display for CertificateId {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        write!(f, \"0x{}\", hex::encode(self.id))\n    }\n}\n\nimpl Debug for CertificateId {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        write!(f, \"0x{}\", hex::encode(self.id))\n    }\n}\n\nimpl Ord for CertificateId {\n    fn cmp(&self, other: &Self) -> std::cmp::Ordering {\n        self.id.cmp(&other.id)\n    }\n}\n\nimpl PartialOrd for CertificateId {\n    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {\n        Some(self.id.cmp(&other.id))\n    }\n}\n\nimpl From<[u8; CERTIFICATE_ID_LENGTH]> for CertificateId {\n    fn from(value: [u8; CERTIFICATE_ID_LENGTH]) -> Self {\n        Self { id: value }\n    }\n}\n\nimpl From<CertificateId> for Vec<u8> {\n    fn from(value: CertificateId) -> Vec<u8> {\n        value.id.to_vec()\n    }\n}\n\nimpl TryFrom<&[u8]> for CertificateId {\n    type Error = Error;\n\n    fn try_from(value: &[u8]) -> Result<Self, Self::Error> {\n        let value = if value.starts_with(b\"0x\")\n            && (value.len() == (HEX_CERTIFICATE_ID_LENGTH + 2)\n                || value.len() == (CERTIFICATE_ID_LENGTH + 2))\n        {\n            &value[2..]\n        } else {\n            value\n        };\n\n        let length = value.len();\n\n        if length != CERTIFICATE_ID_LENGTH && length != HEX_CERTIFICATE_ID_LENGTH {\n            return Err(Error::ValidationError(format!(\n                \"invalid certificate id length {length} - should be {CERTIFICATE_ID_LENGTH} bytes \\\n                 array or hex encoded string of size {HEX_CERTIFICATE_ID_LENGTH}\"\n            )));\n        }\n\n        let mut id = [0; CERTIFICATE_ID_LENGTH];\n\n        if length == HEX_CERTIFICATE_ID_LENGTH {\n            let value = hex::decode(value).map_err(|_| {\n                Error::ValidationError(format!(\n                    \"invalid hex encoded certificate id string: {value:?}\"\n                ))\n            })?;\n\n            id.copy_from_slice(&value[..])\n        } else {\n            id.copy_from_slice(value);\n        }\n\n        Ok(Self { id })\n    }\n}\n\nimpl CertificateId {\n    pub const fn from_array(id: [u8; CERTIFICATE_ID_LENGTH]) -> Self {\n        Self { id }\n    }\n\n    pub const fn as_array(&self) -> &[u8; CERTIFICATE_ID_LENGTH] {\n        &self.id\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::CertificateId;\n\n    const CERTIFICATE_ID_WITH_PREFIX: &str =\n        \"0x11db8713a79c41625f4bb2221bd43ac4766fff23e78f82212f48713a6768e76a\";\n    const CERTIFICATE_ID_WITHOUT_PREFIX: &str =\n        \"11db8713a79c41625f4bb2221bd43ac4766fff23e78f82212f48713a6768e76a\";\n    const MALFORMATTED_CERTIFICATE_ID: &str = \"invalid_hex_string\";\n\n    #[test]\n    fn convert_cert_id_string_with_prefix() {\n        let certificate_id: CertificateId = CERTIFICATE_ID_WITH_PREFIX\n            .as_bytes()\n            .try_into()\n            .expect(\"Cannot convert to CertificateID\");\n\n        let expected_bytes: &[u8] = &[\n            0x11, 0xdb, 0x87, 0x13, 0xa7, 0x9c, 0x41, 0x62, 0x5f, 0x4b, 0xb2, 0x22, 0x1b, 0xd4,\n            0x3a, 0xc4, 0x76, 0x6f, 0xff, 0x23, 0xe7, 0x8f, 0x82, 0x21, 0x2f, 0x48, 0x71, 0x3a,\n            0x67, 0x68, 0xe7, 0x6a,\n        ];\n\n        assert_eq!(certificate_id.id.as_slice(), expected_bytes)\n    }\n\n    #[test]\n    fn convert_cert_id_string_without_prefix() {\n        let certificate_id: &[u8] =\n            &hex::decode(CERTIFICATE_ID_WITHOUT_PREFIX).expect(\"Cannot convert to CertificateI\");\n\n        let certificate_id: CertificateId = certificate_id\n            .try_into()\n            .expect(\"Cannot transform bytes to CertificateId\");\n\n        let expected_bytes: &[u8] = &[\n            0x11, 0xdb, 0x87, 0x13, 0xa7, 0x9c, 0x41, 0x62, 0x5f, 0x4b, 0xb2, 0x22, 0x1b, 0xd4,\n            0x3a, 0xc4, 0x76, 0x6f, 0xff, 0x23, 0xe7, 0x8f, 0x82, 0x21, 0x2f, 0x48, 0x71, 0x3a,\n            0x67, 0x68, 0xe7, 0x6a,\n        ];\n\n        assert_eq!(certificate_id.id.as_slice(), expected_bytes)\n    }\n\n    #[test]\n    fn malformatted_cert_id() {\n        let certificate_id = CertificateId::try_from(MALFORMATTED_CERTIFICATE_ID.as_bytes());\n\n        assert!(certificate_id.is_err());\n    }\n}\n"
  },
  {
    "path": "crates/topos-core/src/uci/mod.rs",
    "content": "//! Universal Certificate Interface\n//!\n//! Data structures to support Certificates' exchange\n\npub use certificate::Certificate;\npub use certificate_id::CertificateId;\npub use subnet_id::SubnetId;\n\nuse std::fmt::Debug;\nuse std::time;\nuse thiserror::Error;\n\nmod certificate;\nmod certificate_id;\nmod subnet_id;\n\npub const CERTIFICATE_ID_LENGTH: usize = 32;\npub const HEX_CERTIFICATE_ID_LENGTH: usize = 64;\npub const SUBNET_ID_LENGTH: usize = 32;\npub use certificate_id::INITIAL_CERTIFICATE_ID;\n\npub type StarkProof = Vec<u8>;\npub type Frost = Vec<u8>;\npub type Address = [u8; 20];\npub type Amount = ethereum_types::U256;\npub type StateRoot = [u8; 32];\npub type TxRootHash = [u8; 32];\npub type ReceiptsRootHash = [u8; 32];\n\n/// Heavily checked on the gossip, so not abstracted\nconst DUMMY_FROST_VERIF_DELAY: time::Duration = time::Duration::from_millis(0);\n\n/// Zero second to abstract it by considering having a great machine\nconst DUMMY_STARK_DELAY: time::Duration = time::Duration::from_millis(0);\n\n#[derive(Debug, Error)]\npub enum Error {\n    #[error(\"certificate validation error: {0}\")]\n    ValidationError(String),\n\n    #[error(\"topos crypto error: (0)\")]\n    CryptoError(#[from] topos_crypto::Error),\n}\n"
  },
  {
    "path": "crates/topos-core/src/uci/subnet_id.rs",
    "content": "use serde::{Deserialize, Serialize};\nuse std::fmt::{Debug, Display};\nuse std::hash::Hash;\nuse std::str::FromStr;\n\nuse super::{Error, SUBNET_ID_LENGTH};\n\n#[derive(Serialize, Hash, Deserialize, Default, PartialEq, Eq, Clone, Copy)]\npub struct SubnetId {\n    id: [u8; SUBNET_ID_LENGTH],\n}\n\nimpl Display for SubnetId {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        write!(f, \"0x{}\", hex::encode(self.id))\n    }\n}\n\nimpl Debug for SubnetId {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        write!(f, \"0x{}\", hex::encode(self.id))\n    }\n}\n\nimpl Ord for SubnetId {\n    fn cmp(&self, other: &Self) -> std::cmp::Ordering {\n        self.id.cmp(&other.id)\n    }\n}\n\nimpl PartialOrd for SubnetId {\n    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {\n        Some(self.id.cmp(&other.id))\n    }\n}\n\nimpl From<[u8; SUBNET_ID_LENGTH]> for SubnetId {\n    fn from(value: [u8; SUBNET_ID_LENGTH]) -> Self {\n        Self { id: value }\n    }\n}\n\nimpl From<SubnetId> for [u8; SUBNET_ID_LENGTH] {\n    fn from(value: SubnetId) -> Self {\n        value.id\n    }\n}\n\nimpl From<SubnetId> for Vec<u8> {\n    fn from(value: SubnetId) -> Vec<u8> {\n        value.id.to_vec()\n    }\n}\n\nimpl TryFrom<&[u8]> for SubnetId {\n    type Error = Error;\n\n    fn try_from(value: &[u8]) -> Result<Self, Self::Error> {\n        if value.len() != SUBNET_ID_LENGTH {\n            return Err(Error::ValidationError(format!(\n                \"invalid subnet id of length {}, expected length {SUBNET_ID_LENGTH}\",\n                value.len()\n            )));\n        }\n\n        let mut id = [0; SUBNET_ID_LENGTH];\n        id.copy_from_slice(value);\n\n        Ok(Self { id })\n    }\n}\n\nimpl FromStr for SubnetId {\n    type Err = Error;\n\n    fn from_str(s: &str) -> Result<Self, Self::Err> {\n        let s = if s.starts_with(\"0x\") {\n            hex::decode(&s[2..s.len()]).map_err(|e| {\n                Error::ValidationError(format!(\n                    \"could not decode subnet id hex encoded string '{s}' error: {e}\"\n                ))\n            })?\n        } else {\n            s.as_bytes().to_vec()\n        };\n\n        s.as_slice().try_into()\n    }\n}\n\nimpl PartialEq<[u8]> for SubnetId {\n    fn eq(&self, other: &[u8]) -> bool {\n        if let Ok(current) = Self::try_from(other) {\n            self.as_array().eq(current.as_array())\n        } else {\n            false\n        }\n    }\n}\n\nimpl SubnetId {\n    pub const fn from_array(id: [u8; SUBNET_ID_LENGTH]) -> Self {\n        Self { id }\n    }\n\n    pub const fn as_array(&self) -> &[u8; SUBNET_ID_LENGTH] {\n        &self.id\n    }\n\n    pub fn to_secp256k1_public_key(&self) -> [u8; 33] {\n        let mut public_key: [u8; 33] = [0; 33];\n        public_key[0] = 0x02;\n        public_key[1..(self.id.len() + 1)].copy_from_slice(&self.id[..]);\n        public_key\n    }\n}\n"
  },
  {
    "path": "crates/topos-core/tests/tce_layer.rs",
    "content": "use async_stream::stream;\nuse futures::{channel::oneshot, FutureExt};\nuse futures::{Stream, StreamExt};\nuse rstest::rstest;\nuse std::collections::HashMap;\nuse std::pin::Pin;\nuse std::time::Duration;\nuse test_log::test;\nuse tokio::sync::mpsc;\nuse tonic::transport::Endpoint;\nuse tonic::{transport::Server, Request, Response, Status, Streaming};\nuse topos_core::api::grpc::shared::v1::checkpoints::TargetCheckpoint;\nuse topos_core::api::grpc::shared::v1::positions::SourceStreamPosition;\nuse topos_core::api::grpc::shared::v1::{CertificateId, SubnetId};\nuse topos_core::api::grpc::tce::v1::api_service_server::{ApiService, ApiServiceServer};\nuse topos_core::api::grpc::tce::v1::synchronizer_service_client::SynchronizerServiceClient;\nuse topos_core::api::grpc::tce::v1::watch_certificates_request::{Command, OpenStream};\nuse topos_core::api::grpc::tce::v1::{\n    GetLastPendingCertificatesRequest, GetLastPendingCertificatesResponse, GetSourceHeadRequest,\n    GetSourceHeadResponse, LastPendingCertificate, SubmitCertificateRequest,\n    SubmitCertificateResponse, WatchCertificatesRequest, WatchCertificatesResponse,\n};\nuse topos_core::api::grpc::uci::v1::Certificate;\nuse topos_core::api::grpc::{shared, GrpcClient};\nuse uuid::Uuid;\n\nuse topos_test_sdk::constants::*;\n\n#[test(tokio::test)]\nasync fn create_tce_layer() {\n    struct TceServer;\n    use base64ct::{Base64, Encoding};\n\n    #[tonic::async_trait]\n    impl ApiService for TceServer {\n        type WatchCertificatesStream =\n            Pin<Box<dyn Stream<Item = Result<WatchCertificatesResponse, Status>> + Send + 'static>>;\n\n        async fn submit_certificate(\n            &self,\n            _request: Request<SubmitCertificateRequest>,\n        ) -> Result<Response<SubmitCertificateResponse>, tonic::Status> {\n            Ok(Response::new(SubmitCertificateResponse {}))\n        }\n\n        async fn get_source_head(\n            &self,\n            request: Request<GetSourceHeadRequest>,\n        ) -> Result<Response<GetSourceHeadResponse>, tonic::Status> {\n            let request = request.into_inner();\n            let return_certificate_id: CertificateId = CERTIFICATE_ID_2.into();\n            let return_prev_certificate_id: CertificateId = CERTIFICATE_ID_1.into();\n            Ok(Response::new(GetSourceHeadResponse {\n                position: Some(SourceStreamPosition {\n                    source_subnet_id: request.subnet_id.clone(),\n                    certificate_id: Some(return_certificate_id.clone()),\n                    position: 0,\n                }),\n                certificate: Some(Certificate {\n                    source_subnet_id: request.subnet_id,\n                    id: Some(return_certificate_id),\n                    prev_id: Some(return_prev_certificate_id),\n                    target_subnets: Vec::new(),\n                    ..Default::default()\n                }),\n            }))\n        }\n\n        async fn get_last_pending_certificates(\n            &self,\n            request: Request<GetLastPendingCertificatesRequest>,\n        ) -> Result<Response<GetLastPendingCertificatesResponse>, Status> {\n            let request = request.into_inner();\n            let subnet_ids = request.subnet_ids;\n\n            let return_certificate_id: CertificateId = CERTIFICATE_ID_2.into();\n            let return_prev_certificate_id: CertificateId = CERTIFICATE_ID_1.into();\n\n            let mut map = HashMap::new();\n            for subnet_id in subnet_ids {\n                map.insert(\n                    Base64::encode_string(&subnet_id.value),\n                    LastPendingCertificate {\n                        value: Some(Certificate {\n                            source_subnet_id: subnet_id.into(),\n                            id: Some(return_certificate_id.clone()),\n                            prev_id: Some(return_prev_certificate_id.clone()),\n                            target_subnets: Vec::new(),\n                            ..Default::default()\n                        }),\n                        index: 0,\n                    },\n                );\n            }\n            Ok(Response::new(GetLastPendingCertificatesResponse {\n                last_pending_certificate: map,\n            }))\n        }\n\n        async fn watch_certificates(\n            &self,\n            request: Request<tonic::Streaming<WatchCertificatesRequest>>,\n        ) -> Result<Response<Self::WatchCertificatesStream>, tonic::Status> {\n            let mut stream: Streaming<_> = request.into_inner();\n            let (tx, mut rx) = mpsc::channel::<WatchCertificatesResponse>(10);\n\n            let output = stream! {\n                loop {\n                    tokio::select! {\n                        Some(_message) = stream.next() => {\n                            let tx = tx.clone();\n                            tokio::spawn(async move {\n                                let _ = tx.send(WatchCertificatesResponse {\n                                    request_id: Some(Uuid::new_v4().into()),\n                                    event: None\n\n                                }).await;\n\n                            });\n                        }\n\n                        Some(event) = rx.recv() => {\n                            yield Ok(event);\n                        }\n\n                    }\n                }\n            };\n\n            Ok(Response::new(\n                Box::pin(output) as Self::WatchCertificatesStream\n            ))\n        }\n    }\n\n    let (tx, rx) = oneshot::channel();\n    let svc = ApiServiceServer::new(TceServer);\n\n    let jh = tokio::spawn(async move {\n        Server::builder()\n            .add_service(svc)\n            .serve_with_shutdown(\"127.0.0.1:1340\".parse().unwrap(), rx.map(drop))\n            .await\n            .unwrap();\n    });\n\n    tokio::time::sleep(Duration::from_millis(100)).await;\n\n    let mut client = topos_core::api::grpc::tce::v1::api_service_client::ApiServiceClient::connect(\n        \"http://127.0.0.1:1340\",\n    )\n    .await\n    .unwrap();\n\n    let source_subnet_id: SubnetId = SOURCE_SUBNET_ID_1.into();\n\n    let prev_certificate_id: CertificateId = CERTIFICATE_ID_1.into();\n    let certificate_id: CertificateId = CERTIFICATE_ID_2.into();\n\n    let original_certificate = Certificate {\n        source_subnet_id: Some(source_subnet_id.clone()),\n        id: Some(certificate_id),\n        prev_id: Some(prev_certificate_id),\n        target_subnets: vec![],\n        ..Default::default()\n    };\n\n    // Submit one certificate\n    let response = client\n        .submit_certificate(SubmitCertificateRequest {\n            certificate: Some(original_certificate.clone()),\n        })\n        .await\n        .map(|r| r.into_inner())\n        .unwrap();\n    assert_eq!(response, SubmitCertificateResponse {});\n\n    // Test get source head certificate\n    let response = client\n        .get_source_head(GetSourceHeadRequest {\n            subnet_id: Some(source_subnet_id.clone()),\n        })\n        .await\n        .map(|r| r.into_inner())\n        .unwrap();\n    let expected_response = GetSourceHeadResponse {\n        certificate: Some(original_certificate.clone()),\n        position: Some(SourceStreamPosition {\n            source_subnet_id: Some(source_subnet_id.clone()),\n            certificate_id: original_certificate.id.clone(),\n            position: 0,\n        }),\n    };\n    assert_eq!(response, expected_response);\n\n    // Test last pending certificate\n    let response = client\n        .get_last_pending_certificates(GetLastPendingCertificatesRequest {\n            subnet_ids: vec![source_subnet_id.clone()],\n        })\n        .await\n        .map(|r| r.into_inner())\n        .unwrap();\n\n    let mut expected_last_pending_certificate_ids = HashMap::new();\n    expected_last_pending_certificate_ids.insert(\n        Base64::encode_string(&source_subnet_id.value),\n        LastPendingCertificate {\n            value: Some(original_certificate.clone()),\n            index: 0,\n        },\n    );\n\n    let expected_response = GetLastPendingCertificatesResponse {\n        last_pending_certificate: expected_last_pending_certificate_ids,\n    };\n    assert_eq!(response, expected_response);\n\n    let command = Some(Command::OpenStream(OpenStream {\n        target_checkpoint: Some(TargetCheckpoint {\n            target_subnet_ids: vec![source_subnet_id.clone()],\n            positions: Vec::new(),\n        }),\n        source_checkpoint: None,\n    }));\n    let request_id: shared::v1::Uuid = Uuid::new_v4().into();\n    let first_request = WatchCertificatesRequest {\n        request_id: Some(request_id),\n        command,\n    };\n\n    let mut first_request_short: WatchCertificatesRequest = OpenStream {\n        target_checkpoint: Some(TargetCheckpoint {\n            target_subnet_ids: vec![source_subnet_id],\n            positions: Vec::new(),\n        }),\n        source_checkpoint: None,\n    }\n    .into();\n    first_request_short.request_id = Some(request_id);\n\n    assert_eq!(first_request, first_request_short);\n\n    let outbound = stream! {\n        yield first_request;\n    };\n\n    let mut stream = client\n        .watch_certificates(outbound)\n        .await\n        .map(|r| r.into_inner())\n        .unwrap();\n\n    let message = stream.message().await.unwrap();\n    assert!(matches!(message, Some(WatchCertificatesResponse { .. })));\n\n    tx.send(()).unwrap();\n    drop(stream);\n    jh.await.unwrap();\n}\n\n#[rstest]\n#[test(tokio::test)]\nasync fn create_grpc_client() {\n    let entrypoint = Endpoint::from_static(\"http://127.0.0.1:1340\").connect_lazy();\n\n    let _client = SynchronizerServiceClient::init(entrypoint);\n}\n"
  },
  {
    "path": "crates/topos-crypto/Cargo.toml",
    "content": "[package]\nname = \"topos-crypto\"\ndescription = \"Implementation of the Topos cryptography utility functions\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n[lints]\nworkspace = true\n\n[dependencies]\nsecp256k1.workspace = true\nbyteorder.workspace = true\nhex.workspace = true\nthiserror.workspace = true\nethers.workspace = true\nserde.workspace = true\n\nkeccak-hash = \"0.10.0\"\neth-keystore = \"0.5.0\"\n\n[dev-dependencies]\nrstest.workspace = true\ntopos-core = { path = \"../topos-core\", features = [\"api\", \"uci\"] }\nethers.workspace = true\n"
  },
  {
    "path": "crates/topos-crypto/src/hash.rs",
    "content": "use keccak_hash::keccak_256;\n\npub fn calculate_hash(data: &[u8]) -> [u8; 32] {\n    let mut hash: [u8; 32] = [0u8; 32];\n    keccak_256(data, &mut hash);\n    hash\n}\n"
  },
  {
    "path": "crates/topos-crypto/src/keys.rs",
    "content": "use crate::Error;\nuse secp256k1::{PublicKey, Secp256k1, SecretKey};\n\npub fn derive_public_key(private_key: &[u8]) -> Result<Vec<u8>, Error> {\n    let secret_key =\n        SecretKey::from_slice(private_key).map_err(|e| Error::InvalidKeyError(e.to_string()))?;\n    Ok(PublicKey::from_secret_key(&Secp256k1::new(), &secret_key)\n        .serialize()\n        .to_vec())\n}\n"
  },
  {
    "path": "crates/topos-crypto/src/keystore.rs",
    "content": "use crate::Error;\n/// Module for handling local topos node keystore\nuse std::path::Path;\n\npub const SUBNET_NODE_VALIDATOR_KEY_FILE_PATH: &str = \"/consensus/validator.key\";\n\npub fn read_private_key_from_file(\n    file_name: &std::path::PathBuf,\n    password: Option<String>,\n) -> Result<Vec<u8>, Error> {\n    let keypath = Path::new(file_name);\n    let private_key = if let Some(password) = password {\n        // Encrypted keystore in ethereum wallet format\n        eth_keystore::decrypt_key(keypath, password)?\n    } else {\n        let key = std::fs::read_to_string(keypath)?.trim().to_string();\n        hex::decode(key).map_err(|e| Error::InvalidKeyError(e.to_string()))?\n    };\n\n    Ok(private_key)\n}\n\npub fn get_keystore_path(subnet_data_dir: &str) -> std::path::PathBuf {\n    std::path::PathBuf::from(&(subnet_data_dir.to_string() + SUBNET_NODE_VALIDATOR_KEY_FILE_PATH))\n}\n"
  },
  {
    "path": "crates/topos-crypto/src/lib.rs",
    "content": "use thiserror::Error;\n\npub mod hash;\npub mod keys;\npub mod keystore;\npub mod messages;\npub mod signatures;\npub mod validator_id;\n\n#[derive(Debug, Error)]\npub enum Error {\n    #[error(\"Keystore error: {0}\")]\n    KeystoreError(#[from] eth_keystore::KeystoreError),\n\n    #[error(\"Keystore file io error: {0}\")]\n    KeystoreFileError(#[from] std::io::Error),\n\n    #[error(\"Invalid key error: {0}\")]\n    InvalidKeyError(String),\n\n    #[error(\"Elliptic curve error: {0}\")]\n    Secp256k1Error(#[from] secp256k1::Error),\n\n    #[error(\"Invalid signature: {0}\")]\n    InvalidSignature(String),\n}\n"
  },
  {
    "path": "crates/topos-crypto/src/messages.rs",
    "content": "use ethers::signers::Signer;\nuse ethers::signers::{LocalWallet, WalletError};\nuse ethers::types::{RecoveryMessage, SignatureError};\nuse ethers::utils::hash_message;\nuse std::str::FromStr;\nuse thiserror::Error;\n\npub use ethers::types::{Address, Signature, H160, U256};\n\n#[derive(Error, Debug)]\npub enum MessageSignerError {\n    #[error(\"Unable to parse private key\")]\n    PrivateKeyParsing,\n}\n\n#[derive(Debug)]\npub struct MessageSigner {\n    pub public_address: Address,\n    wallet: LocalWallet,\n}\n\nimpl FromStr for MessageSigner {\n    type Err = MessageSignerError;\n\n    fn from_str(s: &str) -> Result<Self, Self::Err> {\n        let decoded = hex::decode(s).map_err(|_| MessageSignerError::PrivateKeyParsing)?;\n\n        Self::new(&decoded[..])\n    }\n}\n\nimpl MessageSigner {\n    pub fn new(private_key: &[u8]) -> Result<Self, MessageSignerError> {\n        let wallet: LocalWallet = LocalWallet::from_bytes(private_key)\n            .map_err(|_| MessageSignerError::PrivateKeyParsing)?;\n\n        Ok(Self {\n            public_address: wallet.address(),\n            wallet,\n        })\n    }\n\n    pub fn sign_message(&self, payload: &[u8]) -> Result<Signature, WalletError> {\n        let hash = hash_message(payload);\n\n        LocalWallet::sign_hash(&self.wallet, hash)\n    }\n\n    pub fn verify_signature(\n        &self,\n        signature: Signature,\n        payload: &[u8],\n        public_key: Address,\n    ) -> Result<(), SignatureError> {\n        let message: RecoveryMessage = payload.into();\n\n        signature.verify(message, public_key)\n    }\n}\n"
  },
  {
    "path": "crates/topos-crypto/src/signatures.rs",
    "content": "use crate::Error;\nuse secp256k1::{Message, PublicKey, Secp256k1, SecretKey};\n\npub fn sign(private_key: &[u8], data: &[u8]) -> Result<Vec<u8>, crate::Error> {\n    let secp = Secp256k1::new();\n    let secret_key =\n        SecretKey::from_slice(private_key).map_err(|e| Error::InvalidKeyError(e.to_string()))?;\n    let hash = crate::hash::calculate_hash(data);\n    let message = Message::from_slice(&hash).map_err(Error::Secp256k1Error)?;\n    let signature = secp.sign_ecdsa(&message, &secret_key);\n\n    Ok(signature.serialize_compact().to_vec())\n}\n\npub fn verify(public_key: &[u8], data: &[u8], signature: &[u8]) -> Result<(), crate::Error> {\n    let secp = Secp256k1::new();\n    let public_key =\n        PublicKey::from_slice(public_key).map_err(|e| Error::InvalidKeyError(e.to_string()))?;\n    let signature = secp256k1::ecdsa::Signature::from_compact(signature)\n        .map_err(|e| Error::InvalidSignature(e.to_string()))?;\n\n    let hash = crate::hash::calculate_hash(data);\n    let message = Message::from_slice(&hash).map_err(Error::Secp256k1Error)?;\n    secp.verify_ecdsa(&message, &signature, &public_key)\n        .map_err(|e| Error::InvalidSignature(e.to_string()))\n}\n"
  },
  {
    "path": "crates/topos-crypto/src/validator_id.rs",
    "content": "use crate::messages::{Address, H160};\nuse serde::{Deserialize, Serialize};\nuse std::str::FromStr;\nuse thiserror::Error;\n\npub const VALIDATOR_ID_LENGTH: usize = 20;\n\n#[derive(Debug, Error)]\npub enum Error {\n    #[error(\"Failed to parse address string as H160\")]\n    ParseError,\n    #[error(\"Failed to convert byte array into H160: {0}\")]\n    InvalidByteLength(String),\n}\n\n#[derive(Clone, Copy, Default, Debug, Serialize, Deserialize, Eq, PartialEq, Hash)]\npub struct ValidatorId(H160);\n\nimpl ValidatorId {\n    pub fn as_bytes(&self) -> &[u8] {\n        self.0.as_bytes()\n    }\n\n    pub fn address(&self) -> Address {\n        self.0\n    }\n}\n\nimpl From<H160> for ValidatorId {\n    fn from(address: H160) -> Self {\n        ValidatorId(address)\n    }\n}\n\nimpl FromStr for ValidatorId {\n    type Err = Error;\n\n    fn from_str(address: &str) -> Result<Self, Self::Err> {\n        H160::from_str(address)\n            .map_err(|_| Error::ParseError)\n            .map(ValidatorId)\n    }\n}\n\nimpl std::fmt::Display for ValidatorId {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        write!(f, \"0x{}\", hex::encode(self.0))\n    }\n}\n"
  },
  {
    "path": "crates/topos-crypto/tests/messages.rs",
    "content": "use std::str::FromStr;\n\nuse rstest::*;\nuse topos_core::types::ValidatorId;\nuse topos_core::uci::CertificateId;\nuse topos_crypto::messages::MessageSigner;\n\n#[rstest]\npub fn test_signing_messages() {\n    let message_signer_sender =\n        MessageSigner::from_str(\"122f3ae6ade1fd136b292cea4f6243c7811160352c8821528547a1fe7c459daf\")\n            .unwrap();\n    let validator_id_sender = ValidatorId::from(message_signer_sender.public_address);\n    let certificate_id = CertificateId::from_array([0u8; 32]);\n\n    let mut payload = Vec::new();\n    payload.extend_from_slice(certificate_id.as_array());\n    payload.extend_from_slice(validator_id_sender.as_bytes());\n\n    let signature = message_signer_sender\n        .sign_message(&payload)\n        .expect(\"Cannot create Signature\");\n\n    let message_signer_receiver =\n        MessageSigner::from_str(\"a2e33a9bad88f7b7568228f51d5274c471a9217162d46f1533b6a290f0be1baf\")\n            .unwrap();\n\n    let verify = message_signer_receiver.verify_signature(\n        signature,\n        &payload,\n        validator_id_sender.address(),\n    );\n\n    assert!(verify.is_ok());\n}\n\n#[rstest]\npub fn fails_to_verify_with_own_public_address() {\n    let message_signer_sender =\n        MessageSigner::from_str(\"122f3ae6ade1fd136b292cea4f6243c7811160352c8821528547a1fe7c459daf\")\n            .unwrap();\n    let validator_id_sender = ValidatorId::from(message_signer_sender.public_address);\n    let certificate_id = CertificateId::from_array([0u8; 32]);\n\n    let mut payload = Vec::new();\n    payload.extend_from_slice(certificate_id.as_array());\n    payload.extend_from_slice(validator_id_sender.as_bytes());\n\n    let signature = message_signer_sender\n        .sign_message(&payload)\n        .expect(\"Cannot create Signature\");\n\n    let message_signer_receiver =\n        MessageSigner::from_str(\"a2e33a9bad88f7b7568228f51d5274c471a9217162d46f1533b6a290f0be1baf\")\n            .unwrap();\n    let validator_id_receiver = ValidatorId::from(message_signer_receiver.public_address);\n\n    let verify = message_signer_receiver.verify_signature(\n        signature,\n        &payload,\n        validator_id_receiver.address(),\n    );\n\n    assert!(verify.is_err());\n}\n"
  },
  {
    "path": "crates/topos-metrics/Cargo.toml",
    "content": "[package]\nname = \"topos-metrics\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n[lints]\nworkspace = true\n\n[dependencies]\nlazy_static.workspace = true\nprometheus.workspace = true\n"
  },
  {
    "path": "crates/topos-metrics/src/api.rs",
    "content": "use prometheus::{register_int_counter_with_registry, IntCounter};\n\nuse lazy_static::lazy_static;\n\nuse crate::TOPOS_METRIC_REGISTRY;\n\nlazy_static! {\n    pub static ref API_GRPC_CERTIFICATE_RECEIVED_TOTAL: IntCounter =\n        register_int_counter_with_registry!(\n            \"api_grpc_certificate_received_total\",\n            \"Number of Certificates received from the gRPC API.\",\n            TOPOS_METRIC_REGISTRY\n        )\n        .unwrap();\n}\n"
  },
  {
    "path": "crates/topos-metrics/src/double_echo.rs",
    "content": "use prometheus::{\n    register_int_counter_with_registry, register_int_gauge_with_registry, IntCounter, IntGauge,\n};\n\nuse lazy_static::lazy_static;\n\nuse crate::TOPOS_METRIC_REGISTRY;\n\nlazy_static! {\n    pub static ref DOUBLE_ECHO_ACTIVE_TASKS_COUNT: IntGauge = register_int_gauge_with_registry!(\n        \"double_echo_active_tasks_count\",\n        \"Number of active tasks in the double echo.\",\n        TOPOS_METRIC_REGISTRY\n    )\n    .unwrap();\n    pub static ref DOUBLE_ECHO_COMMAND_CHANNEL_CAPACITY_TOTAL: IntCounter =\n        register_int_counter_with_registry!(\n            \"double_echo_command_channel_capacity_total\",\n            \"Number of time the double echo command channel was at capacity.\",\n            TOPOS_METRIC_REGISTRY\n        )\n        .unwrap();\n    pub static ref DOUBLE_ECHO_BUFFER_CAPACITY_TOTAL: IntCounter =\n        register_int_counter_with_registry!(\n            \"double_echo_buffer_capacity_total\",\n            \"Number of time the double echo buffer was at capacity.\",\n            TOPOS_METRIC_REGISTRY\n        )\n        .unwrap();\n    pub static ref DOUBLE_ECHO_CURRENT_BUFFER_SIZE: IntGauge = register_int_gauge_with_registry!(\n        \"double_echo_current_buffer_size\",\n        \"Current size of the double echo buffer.\",\n        TOPOS_METRIC_REGISTRY\n    )\n    .unwrap();\n    pub static ref DOUBLE_ECHO_BUFFERED_MESSAGE_COUNT: IntGauge =\n        register_int_gauge_with_registry!(\n            \"double_echo_buffered_message_count\",\n            \"Number of message buffered in the double echo buffer.\",\n            TOPOS_METRIC_REGISTRY\n        )\n        .unwrap();\n    pub static ref DOUBLE_ECHO_BROADCAST_CREATED_TOTAL: IntCounter =\n        register_int_counter_with_registry!(\n            \"double_echo_broadcast_created_total\",\n            \"Number of broadcast created.\",\n            TOPOS_METRIC_REGISTRY\n        )\n        .unwrap();\n    pub static ref DOUBLE_ECHO_BROADCAST_FINISHED_TOTAL: IntCounter =\n        register_int_counter_with_registry!(\n            \"double_echo_broadcast_finished_total\",\n            \"Number of broadcast finished.\",\n            TOPOS_METRIC_REGISTRY\n        )\n        .unwrap();\n}\n"
  },
  {
    "path": "crates/topos-metrics/src/lib.rs",
    "content": "use prometheus::{\n    register_histogram_with_registry, register_int_counter_with_registry, Encoder, Histogram,\n    IntCounter, Registry, TextEncoder,\n};\n\nuse lazy_static::lazy_static;\nuse std::collections::hash_map::HashMap;\n\nmod api;\nmod double_echo;\nmod p2p;\nmod storage;\n\n#[cfg(test)]\nmod tests;\n\npub use api::*;\npub use double_echo::*;\npub use p2p::*;\npub use storage::*;\n\nlazy_static! {\n    pub static ref TOPOS_METRIC_REGISTRY: Registry = Registry::new_custom(\n        Some(\"topos\".to_string()),\n        Some(HashMap::from([\n            (\n                \"run_id\".to_string(),\n                std::env::var(\"TOPOS_RUN_ID\")\n                    .ok()\n                    .unwrap_or(\"default\".to_string())\n            ),\n            (\n                \"run_number\".to_string(),\n                std::env::var(\"TOPOS_RUN_NUMBER\")\n                    .ok()\n                    .unwrap_or(\"default\".to_string())\n            )\n        ]))\n    )\n    .unwrap();\n    pub static ref CERTIFICATE_PROCESSING_TOTAL: IntCounter = register_int_counter_with_registry!(\n        \"certificate_processing_total\",\n        \"Number of certificate received.\",\n        TOPOS_METRIC_REGISTRY\n    )\n    .unwrap();\n    pub static ref CERTIFICATE_PROCESSING_FROM_GOSSIP_TOTAL: IntCounter =\n        register_int_counter_with_registry!(\n            \"certificate_processing_from_gossip_total\",\n            \"Number of certificate received from gossip.\",\n            TOPOS_METRIC_REGISTRY\n        )\n        .unwrap();\n    pub static ref CERTIFICATE_PROCESSING_FROM_API_TOTAL: IntCounter =\n        register_int_counter_with_registry!(\n            \"certificate_processing_from_api_total\",\n            \"Number of certificate received from api.\",\n            TOPOS_METRIC_REGISTRY\n        )\n        .unwrap();\n    pub static ref CERTIFICATE_DELIVERED_TOTAL: IntCounter = register_int_counter_with_registry!(\n        \"certificate_delivered_total\",\n        \"Number of certificate delivered.\",\n        TOPOS_METRIC_REGISTRY\n    )\n    .unwrap();\n    pub static ref CERTIFICATE_DELIVERY_LATENCY: Histogram = register_histogram_with_registry!(\n        \"double_echo_delivery_latency\",\n        \"Latency to delivery.\",\n        prometheus::linear_buckets(0.1, 0.01, 500).unwrap(),\n        TOPOS_METRIC_REGISTRY\n    )\n    .unwrap();\n}\n\npub fn gather_metrics() -> String {\n    let mut buffer = Vec::new();\n    let encoder = TextEncoder::new();\n\n    // Gather the metrics.\n    let metric_families = prometheus::gather();\n    // Encode them to send.\n    encoder.encode(&metric_families, &mut buffer).unwrap();\n\n    let topos_metrics = TOPOS_METRIC_REGISTRY.gather();\n    encoder.encode(&topos_metrics, &mut buffer).unwrap();\n\n    String::from_utf8(buffer.clone()).unwrap()\n}\n\npub fn init_metrics() {\n    API_GRPC_CERTIFICATE_RECEIVED_TOTAL.reset();\n    P2P_EVENT_STREAM_CAPACITY_TOTAL.reset();\n    P2P_MESSAGE_RECEIVED_ON_GOSSIP_TOTAL.reset();\n    P2P_MESSAGE_RECEIVED_ON_ECHO_TOTAL.reset();\n    P2P_MESSAGE_RECEIVED_ON_READY_TOTAL.reset();\n    P2P_MESSAGE_SENT_ON_GOSSIPSUB_TOTAL.reset();\n    DOUBLE_ECHO_ACTIVE_TASKS_COUNT.set(0);\n    DOUBLE_ECHO_COMMAND_CHANNEL_CAPACITY_TOTAL.reset();\n    DOUBLE_ECHO_BUFFER_CAPACITY_TOTAL.reset();\n    DOUBLE_ECHO_CURRENT_BUFFER_SIZE.set(0);\n    DOUBLE_ECHO_BUFFERED_MESSAGE_COUNT.set(0);\n    DOUBLE_ECHO_BROADCAST_CREATED_TOTAL.reset();\n    DOUBLE_ECHO_BROADCAST_FINISHED_TOTAL.reset();\n    CERTIFICATE_PROCESSING_TOTAL.reset();\n    CERTIFICATE_PROCESSING_FROM_GOSSIP_TOTAL.reset();\n    CERTIFICATE_PROCESSING_FROM_API_TOTAL.reset();\n    CERTIFICATE_DELIVERED_TOTAL.reset();\n    STORAGE_COMMAND_CHANNEL_CAPACITY_TOTAL.reset();\n}\n"
  },
  {
    "path": "crates/topos-metrics/src/p2p.rs",
    "content": "use prometheus::{\n    register_histogram_with_registry, register_int_counter_vec_with_registry,\n    register_int_counter_with_registry, Histogram, IntCounter, IntCounterVec,\n};\n\nuse lazy_static::lazy_static;\n\nuse crate::TOPOS_METRIC_REGISTRY;\n\nlazy_static! {\n    pub static ref P2P_EVENT_STREAM_CAPACITY_TOTAL: IntCounter =\n        register_int_counter_with_registry!(\n            \"p2p_event_stream_capacity_total\",\n            \"Number of time the p2p event stream was almost at capacity.\",\n            TOPOS_METRIC_REGISTRY\n        )\n        .unwrap();\n    pub static ref P2P_DUPLICATE_MESSAGE_ID_RECEIVED_TOTAL: IntCounter =\n        register_int_counter_with_registry!(\n            \"p2p_duplicate_message_id_received_total\",\n            \"Number of time a duplicate message id was received.\",\n            TOPOS_METRIC_REGISTRY\n        )\n        .unwrap();\n    pub static ref P2P_MESSAGE_RECEIVED_ON_GOSSIP_TOTAL: IntCounter =\n        register_int_counter_with_registry!(\n            \"p2p_gossip_message_total\",\n            \"Number of gossip message received.\",\n            TOPOS_METRIC_REGISTRY\n        )\n        .unwrap();\n    pub static ref P2P_MESSAGE_RECEIVED_ON_ECHO_TOTAL: IntCounter =\n        register_int_counter_with_registry!(\n            \"p2p_echo_message_total\",\n            \"Number of echo message received.\",\n            TOPOS_METRIC_REGISTRY\n        )\n        .unwrap();\n    pub static ref P2P_MESSAGE_RECEIVED_ON_READY_TOTAL: IntCounter =\n        register_int_counter_with_registry!(\n            \"p2p_ready_message_total\",\n            \"Number of ready message received.\",\n            TOPOS_METRIC_REGISTRY\n        )\n        .unwrap();\n    pub static ref P2P_MESSAGE_SENT_ON_GOSSIPSUB_TOTAL: IntCounter =\n        register_int_counter_with_registry!(\n            \"p2p_gossipsub_message_sent_total\",\n            \"Number of gossipsub message sent.\",\n            TOPOS_METRIC_REGISTRY\n        )\n        .unwrap();\n    pub static ref P2P_GOSSIP_BATCH_SIZE: Histogram = register_histogram_with_registry!(\n        \"p2p_gossip_batch_size\",\n        \"Number of message sent in a gossip batch.\",\n        vec![1.0, 5.0, 10.0, 50.0, 100.0, 200.0, 500.0, 1000.0, 2000.0, 5000.0],\n        TOPOS_METRIC_REGISTRY\n    )\n    .unwrap();\n    pub static ref P2P_MESSAGE_DESERIALIZE_FAILURE_TOTAL: IntCounterVec =\n        register_int_counter_vec_with_registry!(\n            \"p2p_message_deserialize_failure_total\",\n            \"Number of message deserialization failure.\",\n            &[\"topic\"],\n            TOPOS_METRIC_REGISTRY\n        )\n        .unwrap();\n    pub static ref P2P_MESSAGE_SERIALIZE_FAILURE_TOTAL: IntCounterVec =\n        register_int_counter_vec_with_registry!(\n            \"p2p_message_serialize_failure_total\",\n            \"Number of message serialization failure.\",\n            &[\"topic\"],\n            TOPOS_METRIC_REGISTRY\n        )\n        .unwrap();\n}\n"
  },
  {
    "path": "crates/topos-metrics/src/storage.rs",
    "content": "use prometheus::{\n    register_histogram_with_registry, register_int_counter_with_registry,\n    register_int_gauge_with_registry, Histogram, IntCounter, IntGauge,\n};\n\nuse lazy_static::lazy_static;\n\nuse crate::TOPOS_METRIC_REGISTRY;\n\nlazy_static! {\n    pub static ref STORAGE_COMMAND_CHANNEL_CAPACITY_TOTAL: IntCounter =\n        register_int_counter_with_registry!(\n            \"storage_command_channel_capacity_total\",\n            \"Number of time the storage command channel was at capacity.\",\n            TOPOS_METRIC_REGISTRY\n        )\n        .unwrap();\n    pub static ref STORAGE_PENDING_CERTIFICATE_EXISTENCE_LATENCY: Histogram =\n        register_histogram_with_registry!(\n            \"storage_pending_certificate_existence_latency\",\n            \"Latency of the pending certificate existance check.\",\n            vec![0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1.0, 2.0, 5.0],\n            TOPOS_METRIC_REGISTRY\n        )\n        .unwrap();\n    pub static ref STORAGE_ADDING_PENDING_CERTIFICATE_LATENCY: Histogram =\n        register_histogram_with_registry!(\n            \"storage_adding_pending_certificate_latency\",\n            \"Latency of adding a pending certificate.\",\n            vec![0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1.0, 2.0, 5.0],\n            TOPOS_METRIC_REGISTRY\n        )\n        .unwrap();\n    pub static ref STORAGE_PENDING_POOL_COUNT: IntGauge = register_int_gauge_with_registry!(\n        \"storage_pending_pool_count\",\n        \"Number of certificates in the pending pool.\",\n        TOPOS_METRIC_REGISTRY\n    )\n    .unwrap();\n    pub static ref STORAGE_PRECEDENCE_POOL_COUNT: IntGauge = register_int_gauge_with_registry!(\n        \"storage_precedence_pool_count\",\n        \"Number of certificates in the precedence pool.\",\n        TOPOS_METRIC_REGISTRY\n    )\n    .unwrap();\n}\n"
  },
  {
    "path": "crates/topos-metrics/src/tests.rs",
    "content": "use crate::p2p;\n\n#[test]\nfn increment_echo_failure_ser() {\n    let m = &p2p::P2P_MESSAGE_SERIALIZE_FAILURE_TOTAL;\n\n    m.with_label_values(&[\"echo\"]).inc();\n\n    assert_eq!(m.get_metric_with_label_values(&[\"echo\"]).unwrap().get(), 1);\n    assert_eq!(m.get_metric_with_label_values(&[\"ready\"]).unwrap().get(), 0);\n}\n\n#[test]\nfn increment_echo_failure_des() {\n    let m = &p2p::P2P_MESSAGE_DESERIALIZE_FAILURE_TOTAL;\n\n    m.with_label_values(&[\"echo\"]).inc();\n\n    assert_eq!(m.get_metric_with_label_values(&[\"echo\"]).unwrap().get(), 1);\n    assert_eq!(m.get_metric_with_label_values(&[\"ready\"]).unwrap().get(), 0);\n}\n"
  },
  {
    "path": "crates/topos-node/Cargo.toml",
    "content": "[package]\nname = \"topos-node\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\ndescription = \"Runtime crate of a topos-node\"\n\n[lints]\nworkspace = true\n\n[dependencies]\ntopos-config = { path = \"../topos-config/\" }\ntopos-tce = { path = \"../topos-tce/\" }\ntopos-p2p = { path = \"../topos-p2p\" }\ntopos-sequencer = { path = \"../topos-sequencer\" }\ntopos-core = { workspace = true, features = [\"api\"] }\ntopos-certificate-spammer = { path = \"../topos-certificate-spammer\" }\ntopos-tce-broadcast = { path = \"../topos-tce-broadcast\", optional = true }\ntopos-wallet = { path = \"../topos-wallet\" }\ntopos-telemetry = { path = \"../topos-telemetry/\", features = [\"tracing\"] }\n\nasync-stream.workspace = true\nasync-trait.workspace = true\nclap.workspace = true\nhex.workspace = true\nfutures.workspace = true\nopentelemetry.workspace = true\nserde.workspace = true\nserde_json.workspace = true\ntokio = { workspace = true, features = [\"full\"] }\ntokio-util.workspace = true\ntonic.workspace = true\ntower.workspace = true\ntracing = { workspace = true, features = [\"log\"] }\ntracing-opentelemetry.workspace = true\ntracing-subscriber = { workspace = true, features = [\"env-filter\", \"json\", \"ansi\", \"fmt\"] }\nuuid.workspace = true\nrand.workspace = true\nreqwest.workspace = true\nthiserror.workspace = true\nopentelemetry-otlp = { workspace = true, features = [\"grpc-tonic\", \"metrics\", \"tls-roots\"] }\ndirs = \"5.0\"\ntracing-log = { version = \"0.1.3\", features = [\"env_logger\"] }\ntar = \"0.4.38\"\nflate2 =\"1.0.26\"\nurl = \"2.3.1\"\nonce_cell = \"1.17.1\"\nregex = \"1\"\nrlp = \"0.5.1\"\nopenssl = { version = \"0.10.61\", features = [\"vendored\"] }\n\n[dev-dependencies]\ntoml = \"0.7.4\"\ntopos-tce-broadcast = { path = \"../topos-tce-broadcast\" }\ntopos-tce-synchronizer = { path = \"../topos-tce-synchronizer\" }\ntopos-tce-gatekeeper = { path = \"../topos-tce-gatekeeper\" }\ntopos-tce-api = { path = \"../topos-tce-api\" }\ntopos-tce-storage = { path = \"../topos-tce-storage\" }\ntopos-test-sdk = { path = \"../topos-test-sdk\" }\nserde.workspace = true\nserde_json.workspace = true\ntest-log.workspace = true\nenv_logger.workspace = true\nrand.workspace = true\nfutures.workspace = true\nlibp2p = { workspace = true, features = [\"identify\"] }\nassert_cmd = \"2.0.6\"\ninsta = { version = \"1.21\", features = [\"json\", \"redactions\"] }\nrstest = { workspace = true, features = [\"async-timeout\"] }\ntempfile = \"3.8.0\"\npredicates = \"3.0.3\"\nsysinfo = \"0.29.11\"\nserial_test = {version = \"0.9.0\"}\n\n[features]\ndefault = []\n"
  },
  {
    "path": "crates/topos-node/build.rs",
    "content": "use std::process::Command;\n\nconst DEFAULT_VERSION: &str = \"detached\";\n\nfn main() {\n    // Set TOPOS_VERSION to HEAD short commit hash unless it's already set\n    if std::option_env!(\"TOPOS_VERSION\").is_none() {\n        let output = Command::new(\"git\")\n            .args([\"rev-parse\", \"--short\", \"HEAD\"])\n            .output()\n            .expect(\"failed to access the HEAD commit hash\");\n\n        let git_hash = String::from_utf8(output.stdout).unwrap();\n\n        let topos_version = if git_hash.is_empty() {\n            DEFAULT_VERSION\n        } else {\n            git_hash.as_str()\n        };\n\n        println!(\"cargo:rustc-env=TOPOS_VERSION={topos_version}\");\n    }\n}\n"
  },
  {
    "path": "crates/topos-node/src/lib.rs",
    "content": "//! Temporary lib exposition for backward topos CLI compatibility\nuse std::process::ExitStatus;\n\nuse futures::stream::FuturesUnordered;\nuse futures::StreamExt;\nuse opentelemetry::global;\nuse process::Errors;\nuse tokio::{\n    signal::{self, unix::SignalKind},\n    sync::mpsc,\n    task::JoinHandle,\n};\nuse tokio_util::sync::CancellationToken;\nuse topos_config::{\n    genesis::Genesis,\n    node::{NodeConfig, NodeRole},\n};\nuse topos_telemetry::tracing::setup_tracing;\nuse topos_wallet::SecretManager;\nuse tracing::{debug, error, info};\nuse tracing_subscriber::util::TryInitError;\n\nmod process;\n\n#[derive(Debug, thiserror::Error)]\npub enum Error {\n    #[error(transparent)]\n    GenesisFile(#[from] topos_config::genesis::Error),\n\n    #[error(\"Unable to setup tracing logger: {0}\")]\n    Tracing(#[from] TryInitError),\n\n    #[error(transparent)]\n    IO(#[from] std::io::Error),\n\n    #[error(\n        \"The role in the config file expect to have a sequencer config defined, none was found\"\n    )]\n    MissingSequencerConfig,\n\n    #[error(\"An Edge config was expected to be found in the config file\")]\n    MissingEdgeConfig,\n\n    #[error(\"A TCE config was expected to be found in the config file\")]\n    MissingTCEConfig,\n}\n\npub async fn start(\n    verbose: u8,\n    no_color: bool,\n    otlp_agent: Option<String>,\n    otlp_service_name: Option<String>,\n    no_edge_process: bool,\n    config: NodeConfig,\n) -> Result<(), Error> {\n    // Setup instrumentation if both otlp agent and otlp service name\n    // are provided as arguments\n    setup_tracing(\n        verbose,\n        no_color,\n        otlp_agent,\n        otlp_service_name,\n        env!(\"TOPOS_VERSION\"),\n    )?;\n\n    info!(\n        \"⚙️ Read the configuration from {}/config.toml\",\n        config.node_path.display()\n    );\n\n    debug!(\"TceConfig: {:?}\", config);\n\n    let config_ref = &config;\n    let genesis: Genesis = config_ref.try_into().map_err(|error| {\n        info!(\n            \"Could not load genesis.json file on path {} \\n Please make sure to have a valid \\\n             genesis.json file for your subnet in the {}/subnet/{} folder.\",\n            config.genesis_path.display(),\n            config.home_path.display(),\n            &config.base.subnet\n        );\n\n        error\n    })?;\n\n    // Get secrets\n    let keys: SecretManager = config_ref.into();\n\n    info!(\n        \"🧢 New joiner: {} for the \\\"{}\\\" subnet as {:?}\",\n        config.base.name, config.base.subnet, config.base.role\n    );\n\n    let shutdown_token = CancellationToken::new();\n    let shutdown_trigger = shutdown_token.clone();\n\n    let (shutdown_sender, shutdown_receiver) = mpsc::channel(1);\n\n    let mut processes = spawn_processes(\n        no_edge_process,\n        config,\n        genesis,\n        shutdown_sender,\n        keys,\n        shutdown_token,\n    )?;\n\n    let mut sigterm_stream = signal::unix::signal(SignalKind::terminate())?;\n\n    tokio::select! {\n        _ = sigterm_stream.recv() => {\n            info!(\"Received SIGTERM, shutting down application...\");\n            shutdown(shutdown_trigger, shutdown_receiver).await;\n        }\n        _ = signal::ctrl_c() => {\n            info!(\"Received ctrl_c, shutting down application...\");\n            shutdown( shutdown_trigger, shutdown_receiver).await;\n        }\n        Some(result) = processes.next() => {\n            shutdown(shutdown_trigger, shutdown_receiver).await;\n            processes.clear();\n            match result {\n                Ok(Ok(status)) => {\n                    if let Some(0) = status.code() {\n                        info!(\"Terminating with success error code\");\n                    } else {\n                        info!(\"Terminating with error status: {:?}\", status);\n                        std::process::exit(1);\n                    }\n                }\n                Ok(Err(e)) => {\n                    error!(\"Terminating with error: {e}\");\n                    std::process::exit(1);\n                }\n                Err(e) => {\n                    error!(\"Terminating with error: {e}\");\n                    std::process::exit(1);\n                }\n            }\n        }\n    };\n\n    Ok(())\n}\n\nfn spawn_processes(\n    no_edge_process: bool,\n    mut config: NodeConfig,\n    genesis: Genesis,\n    shutdown_sender: mpsc::Sender<()>,\n    keys: SecretManager,\n    shutdown_token: CancellationToken,\n) -> Result<FuturesUnordered<JoinHandle<Result<ExitStatus, Errors>>>, Error> {\n    let processes = FuturesUnordered::new();\n\n    // Edge node\n    if no_edge_process {\n        info!(\"Using external edge node, skip running of local edge instance...\")\n    } else {\n        let edge_config = config.edge.take().ok_or(Error::MissingEdgeConfig)?;\n        let edge_bin_config = config.edge_bin.take().ok_or(Error::MissingEdgeConfig)?;\n\n        let data_dir = config.node_path.clone();\n\n        info!(\n            \"Spawning edge process with genesis file: {}, data directory: {}, additional edge \\\n             arguments: {:?}\",\n            config.genesis_path.display(),\n            data_dir.display(),\n            edge_config.args\n        );\n\n        processes.push(process::spawn_edge_process(\n            edge_bin_config.binary_path(),\n            data_dir,\n            config.genesis_path.clone(),\n            edge_config.args,\n        ));\n    }\n\n    // Sequencer\n    if matches!(config.base.role, NodeRole::Sequencer) {\n        let sequencer_config = config\n            .sequencer\n            .take()\n            .ok_or(Error::MissingSequencerConfig)?;\n\n        info!(\n            \"Running sequencer with configuration {:?}\",\n            sequencer_config\n        );\n        processes.push(process::spawn_sequencer_process(\n            sequencer_config,\n            &keys,\n            (shutdown_token.clone(), shutdown_sender.clone()),\n        ));\n    }\n\n    // TCE\n    if config.base.subnet == \"topos\" {\n        let tce_config = config.tce.ok_or(Error::MissingTCEConfig)?;\n        info!(\"Running topos TCE service...\",);\n\n        processes.push(process::spawn_tce_process(\n            tce_config,\n            keys,\n            genesis,\n            (shutdown_token.clone(), shutdown_sender.clone()),\n        ));\n    }\n\n    drop(shutdown_sender);\n    Ok(processes)\n}\n\nasync fn shutdown(trigger: CancellationToken, mut termination: mpsc::Receiver<()>) {\n    trigger.cancel();\n    // Wait that all sender get dropped\n    info!(\"Waiting that all components dropped\");\n    let _ = termination.recv().await;\n    info!(\"Shutdown procedure finished, exiting...\");\n    // Shutdown tracing\n    global::shutdown_tracer_provider();\n}\n"
  },
  {
    "path": "crates/topos-node/src/main.rs",
    "content": "#[tokio::main]\nasync fn main() -> Result<(), Box<dyn std::error::Error>> {\n    Ok(())\n}\n"
  },
  {
    "path": "crates/topos-node/src/process.rs",
    "content": "use std::collections::HashMap;\nuse std::path::PathBuf;\nuse std::process::ExitStatus;\nuse thiserror::Error;\nuse tokio::{spawn, sync::mpsc, task::JoinHandle};\nuse tokio_util::sync::CancellationToken;\nuse topos_config::edge::command::CommandConfig;\nuse topos_config::sequencer::SequencerConfig;\nuse topos_config::tce::broadcast::ReliableBroadcastParams;\nuse topos_config::tce::{AuthKey, StorageConfiguration, TceConfig};\nuse topos_p2p::Multiaddr;\nuse topos_sequencer::SequencerConfiguration;\nuse topos_wallet::SecretManager;\nuse tracing::{debug, error, warn};\n\nuse topos_config::genesis::Genesis;\n\n#[derive(Error, Debug)]\npub enum Errors {\n    #[error(\"TCE error\")]\n    TceFailure,\n    #[error(\"Sequencer error\")]\n    SequencerFailure,\n    #[error(\"Edge error: {0}\")]\n    EdgeTerminated(#[from] std::io::Error),\n}\n\npub(crate) fn spawn_sequencer_process(\n    config: SequencerConfig,\n    keys: &SecretManager,\n    shutdown: (CancellationToken, mpsc::Sender<()>),\n) -> JoinHandle<Result<ExitStatus, Errors>> {\n    let config = SequencerConfiguration {\n        subnet_id: config.subnet_id,\n        public_key: keys.validator_pubkey(),\n        subnet_jsonrpc_http: config.subnet_jsonrpc_http,\n        subnet_jsonrpc_ws: config.subnet_jsonrpc_ws,\n        subnet_contract_address: config.subnet_contract_address,\n        tce_grpc_endpoint: config.tce_grpc_endpoint,\n        signing_key: keys.validator.clone().unwrap(),\n        verifier: 0,\n        start_block: config.start_block,\n    };\n\n    debug!(\"Sequencer args: {config:?}\");\n    spawn(async move {\n        topos_sequencer::run(config, shutdown).await.map_err(|e| {\n            error!(\"Sequencer failure: {e:?}\");\n            Errors::SequencerFailure\n        })\n    })\n}\n\npub(crate) fn spawn_tce_process(\n    mut config: TceConfig,\n    keys: SecretManager,\n    genesis: Genesis,\n    shutdown: (CancellationToken, mpsc::Sender<()>),\n) -> JoinHandle<Result<ExitStatus, Errors>> {\n    config.boot_peers = genesis\n        .boot_peers(Some(topos_p2p::constants::TCE_BOOTNODE_PORT))\n        .into_iter()\n        .chain(config.parse_boot_peers())\n        .collect::<Vec<_>>();\n    config.auth_key = keys.network.map(AuthKey::PrivateKey);\n    config.signing_key = keys.validator.map(AuthKey::PrivateKey);\n    config.p2p.is_bootnode = if let Some(AuthKey::PrivateKey(ref k)) = config.auth_key {\n        let peer_id = topos_p2p::utils::keypair_from_protobuf_encoding(&k[..])\n            .public()\n            .to_peer_id();\n\n        config.boot_peers.iter().any(|(p, _)| p == &peer_id)\n    } else {\n        false\n    };\n\n    config.validators = genesis.validators().expect(\"Cannot parse validators\");\n    config.tce_params = ReliableBroadcastParams::new(config.validators.len());\n\n    if let Some(socket) = config.libp2p_api_addr {\n        warn!(\n            \"`libp2p_api_addr` is deprecated in favor of `listen_addresses` and \\\n             `public_addresses` and will be removed in the next version. In order to keep your \\\n             node running, `libp2p_api_addr` will be used.\"\n        );\n\n        let addr: Multiaddr = format!(\"/ip4/{}/tcp/{}\", socket.ip(), socket.port())\n            .parse()\n            .expect(\"Unable to generate Multiaddr from `libp2p_api_addr`\");\n\n        config.p2p.listen_addresses = vec![addr.clone()];\n        config.p2p.public_addresses = vec![addr];\n    }\n\n    config.version = env!(\"TOPOS_VERSION\");\n    config.storage = StorageConfiguration::RocksDB(Some(config.db_path.clone()));\n\n    debug!(\"TCE args: {config:?}\");\n    spawn(async move {\n        topos_tce::launch(&config, shutdown).await.map_err(|e| {\n            error!(\"TCE process terminated: {e:?}\");\n            Errors::TceFailure\n        })\n    })\n}\n\npub fn spawn_edge_process(\n    edge_path: PathBuf,\n    data_dir: PathBuf,\n    genesis_path: PathBuf,\n    edge_args: HashMap<String, String>,\n) -> JoinHandle<Result<ExitStatus, Errors>> {\n    spawn(async move {\n        CommandConfig::new(edge_path)\n            .server(&data_dir, &genesis_path, edge_args)\n            .spawn()\n            .await\n            .map_err(Errors::EdgeTerminated)\n    })\n}\n"
  },
  {
    "path": "crates/topos-p2p/Cargo.toml",
    "content": "[package]\nname = \"topos-p2p\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n[lints]\nworkspace = true\n\n[dependencies]\nasync-trait.workspace = true\nbincode.workspace = true\nbytes.workspace = true\nfutures.workspace = true\nhex.workspace = true\nhttp-body = \"0.4.5\"\nhttp-body-util = \"0.1.0-rc.3\"\nhttp.workspace = true\nlazy_static.workspace = true\nlibp2p = { workspace = true, features = [\"macros\", \"gossipsub\", \"tcp\", \"dns\", \"tokio\", \"request-response\", \"identify\", \"kad\", \"serde\", \"yamux\", \"secp256k1\"] }\npin-project = \"1.1.3\"\nprometheus-client.workspace = true\nrand.workspace = true\nserde = { workspace = true, features = [\"derive\"] }\nsmallvec = \"1.11.1\"\nthiserror.workspace = true\ntokio = { workspace = true, features = [\"full\"] }\ntokio-stream.workspace = true\ntokio-util.workspace = true\ntonic = {workspace = true, features = [\"tls\", \"tls-roots\"]}\ntopos-metrics = { path = \"../topos-metrics/\" }\ntower.workspace = true\ntracing = { workspace = true, features = [\"attributes\"] }\nuuid.workspace = true\nvoid = \"1\"\nhyper.workspace = true\nprost.workspace = true\n\ntopos-core = { path = \"../topos-core/\" }\nip_network = \"0.4.1\"\n\n[dev-dependencies]\nlibp2p-swarm-test = \"0.3.0\"\ntest-log.workspace = true\nenv_logger.workspace = true\nrstest = { workspace = true, features = [\"async-timeout\"] }\ntracing-subscriber.workspace = true\ntopos-test-sdk = { path = \"../topos-test-sdk/\" }\nrand.workspace = true\n"
  },
  {
    "path": "crates/topos-p2p/src/behaviour/discovery.rs",
    "content": "use std::borrow::Cow;\nuse std::pin::Pin;\nuse std::task::Poll;\nuse std::time::Duration;\n\nuse crate::error::P2PError;\nuse crate::{config::DiscoveryConfig, error::CommandExecutionError};\n\nuse libp2p::kad::{\n    BootstrapOk, BootstrapResult, Event as KademliaEvent, ProgressStep, QueryId, QueryResult,\n};\nuse libp2p::swarm::ToSwarm;\nuse libp2p::{\n    identity::Keypair,\n    kad::{store::MemoryStore, Behaviour, BucketInserts, Config},\n    swarm::NetworkBehaviour,\n    Multiaddr, PeerId,\n};\nuse tokio::sync::oneshot;\nuse tracing::{debug, error, info};\n\nuse super::HealthStatus;\n\npub type PendingRecordRequest = oneshot::Sender<Result<Vec<Multiaddr>, CommandExecutionError>>;\n\n/// DiscoveryBehaviour is responsible to discover and manage connections with peers\npub(crate) struct DiscoveryBehaviour {\n    /// The inner kademlia behaviour\n    pub(crate) inner: Behaviour<MemoryStore>,\n    /// The current bootstrap query id used to track the progress of the bootstrap\n    /// and to avoid to start a new bootstrap query if the previous one is still in progress\n    pub(crate) current_bootstrap_query_id: Option<QueryId>,\n    /// The next bootstrap query interval used to schedule the next bootstrap query\n    pub(crate) next_bootstrap_query: Option<Pin<Box<tokio::time::Interval>>>,\n    /// The health status of the discovery behaviour\n    pub(crate) health_status: HealthStatus,\n}\n\nimpl DiscoveryBehaviour {\n    pub fn create(\n        config: &DiscoveryConfig,\n        peer_key: Keypair,\n        discovery_protocol: Cow<'static, [u8]>,\n        known_peers: &[(PeerId, Multiaddr)],\n        _with_mdns: bool,\n    ) -> Self {\n        let local_peer_id = peer_key.public().to_peer_id();\n        let kademlia_config = Config::default()\n            .set_replication_factor(config.replication_factor)\n            .set_kbucket_inserts(BucketInserts::Manual)\n            .set_replication_interval(config.replication_interval)\n            .set_publication_interval(config.publication_interval)\n            .set_provider_publication_interval(config.provider_publication_interval)\n            .to_owned();\n\n        let mut kademlia = Behaviour::with_config(\n            local_peer_id,\n            MemoryStore::new(local_peer_id),\n            kademlia_config,\n        );\n\n        for known_peer in known_peers {\n            info!(\n                \"Adding the known peer:{} reachable at {}\",\n                &known_peer.0, &known_peer.1\n            );\n            let x = kademlia.add_address(&known_peer.0, known_peer.1.clone());\n            info!(\n                \"Adding the known peer:{} reachable at {} - {:?}\",\n                &known_peer.0, &known_peer.1, x\n            );\n        }\n\n        Self {\n            inner: kademlia,\n            current_bootstrap_query_id: None,\n            // If the `discovery` behaviour is created without known_peers\n            // The bootstrap query interval is disabled only when the local\n            // node is a lonely bootnode, other nodes will join it.\n            next_bootstrap_query: if known_peers.is_empty() {\n                None\n            } else {\n                let mut interval = tokio::time::interval(config.bootstrap_interval);\n                interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);\n\n                Some(Box::pin(interval))\n            },\n            health_status: if known_peers.is_empty() {\n                HealthStatus::Healthy\n            } else {\n                HealthStatus::Initializing\n            },\n        }\n    }\n\n    /// Start the kademlia bootstrap process if it is not already in progress.\n    /// The bootstrap process is used to discover new peers in the network.\n    /// The bootstrap process starts by sending a `FIND_NODE` query of the local PeerId in the DHT.\n    /// Then multiple random PeerId are created in order to randomly walk the network.\n    pub fn bootstrap(&mut self) -> Result<(), P2PError> {\n        if self.current_bootstrap_query_id.is_none() {\n            let query_id = self.inner.bootstrap()?;\n            debug!(\"Started kademlia bootstrap query with query_id: {query_id:?}\");\n            self.current_bootstrap_query_id = Some(query_id);\n        }\n\n        Ok(())\n    }\n\n    /// Change the interval of the next bootstrap queries\n    pub async fn change_interval(&mut self, duration: Duration) -> Result<(), P2PError> {\n        if let Some(interval) = self.next_bootstrap_query.as_mut() {\n            let mut new_interval = tokio::time::interval(duration);\n            // Delay the next tick\n            new_interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);\n            // ignore first tick\n            _ = new_interval.tick().await;\n            interval.set(new_interval);\n        }\n\n        Ok(())\n    }\n}\n\nimpl NetworkBehaviour for DiscoveryBehaviour {\n    type ConnectionHandler = <Behaviour<MemoryStore> as NetworkBehaviour>::ConnectionHandler;\n\n    type ToSwarm = KademliaEvent;\n\n    fn handle_established_inbound_connection(\n        &mut self,\n        connection_id: libp2p::swarm::ConnectionId,\n        peer: PeerId,\n        local_addr: &Multiaddr,\n        remote_addr: &Multiaddr,\n    ) -> Result<libp2p::swarm::THandler<Self>, libp2p::swarm::ConnectionDenied> {\n        self.inner.handle_established_inbound_connection(\n            connection_id,\n            peer,\n            local_addr,\n            remote_addr,\n        )\n    }\n\n    fn handle_established_outbound_connection(\n        &mut self,\n        connection_id: libp2p::swarm::ConnectionId,\n        peer: PeerId,\n        addr: &Multiaddr,\n        role_override: libp2p::core::Endpoint,\n    ) -> Result<libp2p::swarm::THandler<Self>, libp2p::swarm::ConnectionDenied> {\n        self.inner\n            .handle_established_outbound_connection(connection_id, peer, addr, role_override)\n    }\n\n    fn on_swarm_event(&mut self, event: libp2p::swarm::FromSwarm) {\n        self.inner.on_swarm_event(event)\n    }\n\n    fn on_connection_handler_event(\n        &mut self,\n        peer_id: PeerId,\n        connection_id: libp2p::swarm::ConnectionId,\n        event: libp2p::swarm::THandlerOutEvent<Self>,\n    ) {\n        self.inner\n            .on_connection_handler_event(peer_id, connection_id, event)\n    }\n\n    fn poll(\n        &mut self,\n        cx: &mut std::task::Context<'_>,\n    ) -> Poll<libp2p::swarm::ToSwarm<Self::ToSwarm, libp2p::swarm::THandlerInEvent<Self>>> {\n        // Poll the kademlia bootstrap interval future in order to define if we need to call the\n        // `bootstrap`\n        if let Some(next_bootstrap_query) = self.next_bootstrap_query.as_mut() {\n            if next_bootstrap_query.poll_tick(cx).is_ready() {\n                if let Err(error) = self.bootstrap() {\n                    error!(\"Error while create bootstrap query: {error:?}\");\n                }\n            }\n        }\n\n        if let Poll::Ready(event) = self.inner.poll(cx) {\n            match event {\n                // When a Bootstrap query ends, we reset the `query_id`\n                ToSwarm::GenerateEvent(KademliaEvent::OutboundQueryProgressed {\n                    id,\n                    result:\n                        result @ QueryResult::Bootstrap(BootstrapResult::Ok(BootstrapOk {\n                            num_remaining: 0,\n                            ..\n                        })),\n                    step: step @ ProgressStep { last: true, .. },\n                    stats,\n                }) if Some(&id) == self.current_bootstrap_query_id.as_ref() => {\n                    if let Some(interval) = self.next_bootstrap_query.as_mut() {\n                        interval.reset();\n                    };\n\n                    self.current_bootstrap_query_id = None;\n                    debug!(\"Kademlia bootstrap completed with query_id: {id:?}\");\n\n                    return Poll::Ready(ToSwarm::GenerateEvent(\n                        KademliaEvent::OutboundQueryProgressed {\n                            id,\n                            result,\n                            stats,\n                            step,\n                        },\n                    ));\n                }\n                event => {\n                    return Poll::Ready(event);\n                }\n            }\n        }\n\n        Poll::Pending\n    }\n\n    fn handle_pending_inbound_connection(\n        &mut self,\n        connection_id: libp2p::swarm::ConnectionId,\n        local_addr: &Multiaddr,\n        remote_addr: &Multiaddr,\n    ) -> Result<(), libp2p::swarm::ConnectionDenied> {\n        self.inner\n            .handle_pending_inbound_connection(connection_id, local_addr, remote_addr)\n    }\n\n    fn handle_pending_outbound_connection(\n        &mut self,\n        connection_id: libp2p::swarm::ConnectionId,\n        maybe_peer: Option<PeerId>,\n        addresses: &[Multiaddr],\n        effective_role: libp2p::core::Endpoint,\n    ) -> Result<Vec<Multiaddr>, libp2p::swarm::ConnectionDenied> {\n        self.inner.handle_pending_outbound_connection(\n            connection_id,\n            maybe_peer,\n            addresses,\n            effective_role,\n        )\n    }\n}\n"
  },
  {
    "path": "crates/topos-p2p/src/behaviour/gossip.rs",
    "content": "use std::collections::hash_map::DefaultHasher;\nuse std::collections::HashSet;\nuse std::hash::{Hash, Hasher};\nuse std::{\n    collections::{HashMap, VecDeque},\n    env,\n    task::Poll,\n    time::Duration,\n};\n\nuse libp2p::swarm::{ConnectionClosed, FromSwarm};\nuse libp2p::PeerId;\nuse libp2p::{\n    gossipsub::{self, IdentTopic, Message, MessageAuthenticity},\n    identity::Keypair,\n    swarm::{NetworkBehaviour, THandlerInEvent, ToSwarm},\n};\nuse prost::Message as ProstMessage;\nuse topos_core::api::grpc::tce::v1::Batch;\nuse topos_metrics::P2P_GOSSIP_BATCH_SIZE;\nuse tracing::{debug, error, warn};\n\nuse crate::error::P2PError;\nuse crate::{constants, event::ComposedEvent, TOPOS_ECHO, TOPOS_GOSSIP, TOPOS_READY};\n\nuse super::HealthStatus;\n\nconst MAX_BATCH_SIZE: usize = 10;\n\npub struct Behaviour {\n    batch_size: usize,\n    gossipsub: gossipsub::Behaviour,\n    pending: HashMap<&'static str, VecDeque<Vec<u8>>>,\n    tick: tokio::time::Interval,\n    /// List of connected peers per topic.\n    connected_peer: HashMap<&'static str, HashSet<PeerId>>,\n    /// The health status of the gossip behaviour\n    pub(crate) health_status: HealthStatus,\n}\n\nimpl Behaviour {\n    pub fn publish(\n        &mut self,\n        topic: &'static str,\n        message: Vec<u8>,\n    ) -> Result<usize, &'static str> {\n        match topic {\n            TOPOS_GOSSIP => {\n                if let Ok(msg_id) = self.gossipsub.publish(IdentTopic::new(topic), message) {\n                    debug!(\"Published on topos_gossip: {:?}\", msg_id);\n                }\n            }\n            TOPOS_ECHO | TOPOS_READY => self.pending.entry(topic).or_default().push_back(message),\n            _ => return Err(\"Invalid topic\"),\n        }\n\n        Ok(0)\n    }\n\n    pub fn subscribe(&mut self) -> Result<(), P2PError> {\n        self.gossipsub\n            .subscribe(&gossipsub::IdentTopic::new(TOPOS_GOSSIP))?;\n\n        self.gossipsub\n            .subscribe(&gossipsub::IdentTopic::new(TOPOS_ECHO))?;\n\n        self.gossipsub\n            .subscribe(&gossipsub::IdentTopic::new(TOPOS_READY))?;\n\n        Ok(())\n    }\n\n    pub async fn new(peer_key: Keypair) -> Self {\n        let batch_size = env::var(\"TOPOS_GOSSIP_BATCH_SIZE\")\n            .map(|v| v.parse::<usize>())\n            .unwrap_or(Ok(MAX_BATCH_SIZE))\n            .unwrap();\n        let gossipsub = gossipsub::ConfigBuilder::default()\n            .max_transmit_size(2 * 1024 * 1024)\n            .validation_mode(gossipsub::ValidationMode::Strict)\n            .message_id_fn(|msg_id| {\n                // Content based id\n                let mut s = DefaultHasher::new();\n                msg_id.data.hash(&mut s);\n                gossipsub::MessageId::from(s.finish().to_be_bytes())\n            })\n            .build()\n            .unwrap();\n\n        let gossipsub = gossipsub::Behaviour::new_with_metrics(\n            MessageAuthenticity::Signed(peer_key),\n            gossipsub,\n            constants::METRIC_REGISTRY\n                .lock()\n                .await\n                .sub_registry_with_prefix(\"libp2p_gossipsub\"),\n            Default::default(),\n        )\n        .unwrap();\n\n        Self {\n            batch_size,\n            gossipsub,\n            pending: [\n                (TOPOS_ECHO, VecDeque::new()),\n                (TOPOS_READY, VecDeque::new()),\n            ]\n            .into_iter()\n            .collect(),\n            tick: tokio::time::interval(Duration::from_millis(\n                env::var(\"TOPOS_GOSSIP_INTERVAL\")\n                    .map(|v| v.parse::<u64>())\n                    .unwrap_or(Ok(100))\n                    .unwrap(),\n            )),\n\n            connected_peer: Default::default(),\n            health_status: Default::default(),\n        }\n    }\n}\n\nimpl NetworkBehaviour for Behaviour {\n    type ConnectionHandler = <gossipsub::Behaviour as NetworkBehaviour>::ConnectionHandler;\n\n    type ToSwarm = ComposedEvent;\n\n    fn handle_established_inbound_connection(\n        &mut self,\n        connection_id: libp2p::swarm::ConnectionId,\n        peer: libp2p::PeerId,\n        local_addr: &libp2p::Multiaddr,\n        remote_addr: &libp2p::Multiaddr,\n    ) -> Result<libp2p::swarm::THandler<Self>, libp2p::swarm::ConnectionDenied> {\n        self.gossipsub.handle_established_inbound_connection(\n            connection_id,\n            peer,\n            local_addr,\n            remote_addr,\n        )\n    }\n\n    fn handle_established_outbound_connection(\n        &mut self,\n        connection_id: libp2p::swarm::ConnectionId,\n        peer: libp2p::PeerId,\n        addr: &libp2p::Multiaddr,\n        role_override: libp2p::core::Endpoint,\n    ) -> Result<libp2p::swarm::THandler<Self>, libp2p::swarm::ConnectionDenied> {\n        self.gossipsub.handle_established_outbound_connection(\n            connection_id,\n            peer,\n            addr,\n            role_override,\n        )\n    }\n\n    fn on_swarm_event(&mut self, event: libp2p::swarm::FromSwarm) {\n        if let FromSwarm::ConnectionClosed(ConnectionClosed {\n            peer_id,\n            connection_id,\n            endpoint,\n            remaining_established,\n            ..\n        }) = &event\n        {\n            debug!(\n                \"Connection closed: {:?} {:?} {:?} {:?}\",\n                peer_id, connection_id, endpoint, remaining_established\n            );\n\n            for (_, topic) in self.connected_peer.iter_mut() {\n                topic.remove(peer_id);\n            }\n        }\n\n        self.gossipsub.on_swarm_event(event)\n    }\n\n    fn on_connection_handler_event(\n        &mut self,\n        peer_id: libp2p::PeerId,\n        connection_id: libp2p::swarm::ConnectionId,\n        event: libp2p::swarm::THandlerOutEvent<Self>,\n    ) {\n        self.gossipsub\n            .on_connection_handler_event(peer_id, connection_id, event)\n    }\n\n    fn poll(\n        &mut self,\n        cx: &mut std::task::Context<'_>,\n    ) -> Poll<ToSwarm<Self::ToSwarm, THandlerInEvent<Self>>> {\n        if self.tick.poll_tick(cx).is_ready() {\n            // Publish batch\n            for (topic, queue) in self.pending.iter_mut() {\n                if !queue.is_empty() {\n                    let num_of_message = queue.len().min(self.batch_size);\n                    let batch = Batch {\n                        messages: queue.drain(0..num_of_message).collect(),\n                    };\n\n                    debug!(\"Publishing {} {}\", batch.messages.len(), topic);\n                    let msg = batch.encode_to_vec();\n                    P2P_GOSSIP_BATCH_SIZE.observe(batch.messages.len() as f64);\n                    match self.gossipsub.publish(IdentTopic::new(*topic), msg) {\n                        Ok(message_id) => debug!(\"Published {} {}\", topic, message_id),\n                        Err(error) => error!(\"Failed to publish {}: {}\", topic, error),\n                    }\n                }\n            }\n        }\n\n        match self.gossipsub.poll(cx) {\n            Poll::Pending => return Poll::Pending,\n            Poll::Ready(ToSwarm::GenerateEvent(event)) => match event {\n                gossipsub::Event::Message {\n                    propagation_source,\n                    message_id,\n                    message:\n                        Message {\n                            source,\n                            data,\n                            topic,\n                            ..\n                        },\n                } => match topic.as_str() {\n                    TOPOS_GOSSIP => {\n                        return Poll::Ready(ToSwarm::GenerateEvent(ComposedEvent::Gossipsub(\n                            crate::event::GossipEvent::Message {\n                                topic: TOPOS_GOSSIP,\n                                message: data,\n                                source,\n                            },\n                        )))\n                    }\n                    TOPOS_ECHO => {\n                        return Poll::Ready(ToSwarm::GenerateEvent(ComposedEvent::Gossipsub(\n                            crate::event::GossipEvent::Message {\n                                topic: TOPOS_ECHO,\n                                message: data,\n                                source,\n                            },\n                        )))\n                    }\n                    TOPOS_READY => {\n                        return Poll::Ready(ToSwarm::GenerateEvent(ComposedEvent::Gossipsub(\n                            crate::event::GossipEvent::Message {\n                                topic: TOPOS_READY,\n                                message: data,\n                                source,\n                            },\n                        )))\n                    }\n                    _ => {}\n                },\n                gossipsub::Event::Subscribed { peer_id, topic } => {\n                    debug!(\"{peer_id} subscribed to {:?}\", topic);\n\n                    // If the behaviour isn't already healthy we check if this event\n                    // triggers a switch to healthy\n                    if self.health_status != HealthStatus::Healthy\n                        && self.gossipsub.topics().all(|topic| {\n                            self.gossipsub.mesh_peers(topic).peekable().peek().is_some()\n                        })\n                    {\n                        self.health_status = HealthStatus::Healthy;\n                    }\n                }\n                gossipsub::Event::Unsubscribed { peer_id, topic } => {\n                    debug!(\"{peer_id} unsubscribed from {:?}\", topic);\n                }\n                gossipsub::Event::GossipsubNotSupported { peer_id } => {\n                    debug!(\"Gossipsub not supported by {:?}\", peer_id);\n                }\n            },\n            Poll::Ready(ToSwarm::ListenOn { opts }) => {\n                return Poll::Ready(ToSwarm::ListenOn { opts })\n            }\n            Poll::Ready(ToSwarm::RemoveListener { id }) => {\n                return Poll::Ready(ToSwarm::RemoveListener { id })\n            }\n            Poll::Ready(ToSwarm::Dial { opts }) => return Poll::Ready(ToSwarm::Dial { opts }),\n            Poll::Ready(ToSwarm::NotifyHandler {\n                peer_id,\n                handler,\n                event,\n            }) => {\n                return Poll::Ready(ToSwarm::NotifyHandler {\n                    peer_id,\n                    handler,\n                    event,\n                })\n            }\n            Poll::Ready(ToSwarm::CloseConnection {\n                peer_id,\n                connection,\n            }) => {\n                return Poll::Ready(ToSwarm::CloseConnection {\n                    peer_id,\n                    connection,\n                })\n            }\n            Poll::Ready(ToSwarm::ExternalAddrExpired(addr)) => {\n                return Poll::Ready(ToSwarm::ExternalAddrExpired(addr))\n            }\n            Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)) => {\n                return Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr))\n            }\n            Poll::Ready(ToSwarm::NewExternalAddrCandidate(addr)) => {\n                return Poll::Ready(ToSwarm::NewExternalAddrCandidate(addr))\n            }\n            Poll::Ready(event) => {\n                warn!(\"Unhandled event in gossip behaviour: {:?}\", event);\n            }\n        }\n\n        Poll::Pending\n    }\n}\n"
  },
  {
    "path": "crates/topos-p2p/src/behaviour/grpc/connection.rs",
    "content": "use std::future::IntoFuture;\n\nuse futures::{future::BoxFuture, FutureExt};\nuse libp2p::{swarm::ConnectionId, Multiaddr};\nuse tokio::sync::oneshot;\nuse tonic::transport::Channel;\n\nuse super::{\n    error::{OutboundConnectionError, OutboundError},\n    RequestId,\n};\n\n/// Connection struct which represent a connection between two nodes\n/// It contains the connection id, the address of the node, the request id\n/// and the gRPC channel which is used to communicate with the node.\n#[derive(Debug)]\npub(crate) struct Connection {\n    /// The connection id\n    pub(crate) id: ConnectionId,\n\n    /// The address of the node\n    pub(crate) address: Option<Multiaddr>,\n\n    /// The request id that is served by this connection\n    pub(crate) request_id: Option<RequestId>,\n\n    /// The gRPC channel used to communicate with the node\n    pub(crate) channel: Option<Channel>,\n}\n\n/// Connection request struct which is used to open a connection to a node\npub(crate) struct OutboundConnectionRequest {\n    pub(crate) request_id: RequestId,\n    pub(crate) notifier: oneshot::Sender<Result<Channel, OutboundError>>,\n    pub(crate) protocol: String,\n}\n\n/// Struct which is used to represent a connected channel connection\n#[derive(Debug)]\npub struct OutboundConnectedConnection {\n    #[allow(dead_code)]\n    pub(crate) request_id: RequestId,\n    // TODO: Remove unused when gRPC behaviour is activated\n    #[allow(unused)]\n    pub(crate) channel: tonic::transport::Channel,\n}\n\n/// Enum that represents the different states of an outbound connection\n#[derive(Debug)]\npub enum OutboundConnection {\n    Connected(OutboundConnectedConnection),\n    Pending {\n        request_id: RequestId,\n    },\n    Opening {\n        request_id: RequestId,\n        receiver: oneshot::Receiver<Result<Channel, OutboundError>>,\n    },\n}\n\nimpl IntoFuture for OutboundConnection {\n    type Output = Result<OutboundConnectedConnection, OutboundConnectionError>;\n\n    type IntoFuture = BoxFuture<'static, Self::Output>;\n\n    fn into_future(self) -> Self::IntoFuture {\n        async move {\n            match self {\n                // The outbound connection is already opened\n                OutboundConnection::Connected(connected) => Ok(connected),\n                // The outbound connection is in pending\n                OutboundConnection::Pending { request_id } => {\n                    Err(OutboundConnectionError::AlreadyNegotiating)\n                }\n                // The connection is in opening state so we need to proceed to connect\n                OutboundConnection::Opening {\n                    request_id,\n                    receiver,\n                } => {\n                    let channel = receiver.await??;\n\n                    Ok(OutboundConnectedConnection {\n                        request_id,\n                        channel,\n                    })\n                }\n            }\n        }\n        .boxed()\n    }\n}\n"
  },
  {
    "path": "crates/topos-p2p/src/behaviour/grpc/error.rs",
    "content": "use std::sync::Arc;\n\nuse tokio::sync::oneshot;\n\n#[derive(Debug, thiserror::Error)]\npub enum OutboundError {\n    #[error(\"Unable to Dial\")]\n    DialFailure,\n    #[error(\"Peer doesn't support the protocol: {0}\")]\n    UnsupportedProtocol(String),\n    #[error(transparent)]\n    GrpcChannel(#[from] Arc<tonic::transport::Error>),\n    #[error(\"Outbound connection timeout\")]\n    Timeout,\n}\n\n#[derive(thiserror::Error, Debug)]\npub enum OutboundConnectionError {\n    #[error(transparent)]\n    Outbound(#[from] OutboundError),\n    #[error(transparent)]\n    ConnectionCanceled(#[from] oneshot::error::RecvError),\n    #[error(\"This connection is already negotiating with another client\")]\n    AlreadyNegotiating,\n}\n"
  },
  {
    "path": "crates/topos-p2p/src/behaviour/grpc/event.rs",
    "content": "use libp2p::{swarm::ConnectionId, PeerId};\nuse tonic::transport::Channel;\n\nuse super::{OutboundError, RequestId};\n\n#[derive(Debug)]\npub enum Event {\n    OutboundFailure {\n        peer_id: PeerId,\n        request_id: RequestId,\n        error: OutboundError,\n    },\n\n    OutboundSuccess {\n        peer_id: PeerId,\n        request_id: RequestId,\n        #[allow(unused)]\n        channel: Channel,\n    },\n\n    InboundNegotiatedConnection {\n        request_id: RequestId,\n        connection_id: ConnectionId,\n    },\n\n    OutboundNegotiatedConnection {\n        peer_id: PeerId,\n        request_id: RequestId,\n    },\n}\n"
  },
  {
    "path": "crates/topos-p2p/src/behaviour/grpc/handler/event.rs",
    "content": "use crate::behaviour::grpc::RequestId;\n\nuse super::ProtocolRequest;\n\n#[derive(Debug)]\npub enum Event {\n    InboundNegotiatedStream {\n        request_id: RequestId,\n        stream: libp2p::Stream,\n    },\n    OutboundNegotiatedStream {\n        request_id: RequestId,\n        stream: libp2p::Stream,\n    },\n    UnsupportedProtocol(RequestId, String),\n    OutboundTimeout(ProtocolRequest),\n}\n"
  },
  {
    "path": "crates/topos-p2p/src/behaviour/grpc/handler/protocol.rs",
    "content": "use std::collections::HashSet;\n\nuse libp2p::{core::UpgradeInfo, InboundUpgrade, OutboundUpgrade, Stream};\n\n/// UpgradeProtocol for gRPC Connection\n///\n/// This protocol is used to upgrade the connection to a gRPC connection.\n/// It is used by the `Handler` to upgrade the connection to a gRPC connection.\n/// The gRPC protocol is defined as constant but can be updated to manage different\n/// version of the protocol.\n///\n/// The `UpgradeInfo` trait is implemented to provide the protocol information.\n/// The `OutboundUpgrade` and `InboundUpgrade` traits are implemented to provide\n/// the upgrade of the connection. The upgrade is done by returning the socket\n/// wrapped in a `Future`.\n#[derive(Debug)]\npub struct GrpcUpgradeProtocol {\n    pub(crate) protocols: HashSet<String>,\n}\n\nimpl UpgradeInfo for GrpcUpgradeProtocol {\n    type Info = String;\n\n    type InfoIter = std::collections::hash_set::IntoIter<Self::Info>;\n\n    fn protocol_info(&self) -> Self::InfoIter {\n        self.protocols.clone().into_iter()\n    }\n}\n\nimpl OutboundUpgrade<Stream> for GrpcUpgradeProtocol {\n    type Output = Stream;\n\n    type Error = std::io::Error;\n\n    type Future = futures::future::Ready<Result<Self::Output, Self::Error>>;\n\n    fn upgrade_outbound(self, socket: Stream, _info: Self::Info) -> Self::Future {\n        futures::future::ready(Ok(socket))\n    }\n}\n\nimpl InboundUpgrade<Stream> for GrpcUpgradeProtocol {\n    type Output = Stream;\n\n    type Error = std::io::Error;\n\n    type Future = futures::future::Ready<Result<Self::Output, Self::Error>>;\n\n    fn upgrade_inbound(self, socket: Stream, info: Self::Info) -> Self::Future {\n        futures::future::ready(Ok(socket))\n    }\n}\n"
  },
  {
    "path": "crates/topos-p2p/src/behaviour/grpc/handler.rs",
    "content": "use std::{\n    collections::{HashSet, VecDeque},\n    sync::{\n        atomic::{AtomicU64, Ordering},\n        Arc,\n    },\n    task::Poll,\n};\n\nuse libp2p::swarm::{\n    handler::{ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound},\n    ConnectionHandler, ConnectionHandlerEvent, SubstreamProtocol,\n};\nuse tracing::{debug, warn};\n\nuse self::protocol::GrpcUpgradeProtocol;\n\nuse super::RequestId;\n\npub(crate) mod event;\nuse event::Event;\npub(crate) mod protocol;\n\n#[derive(Debug)]\npub struct ProtocolRequest {\n    pub(crate) request_id: RequestId,\n    pub(crate) protocol: String,\n}\n\n/// Handler for gRPC connections\npub struct Handler {\n    /// Next inbound request id\n    inbound_request_id: Arc<AtomicU64>,\n    /// Pending events to send\n    pending_events: VecDeque<Event>,\n    /// Optional outbound request id\n    outbound_request_id: Option<ProtocolRequest>,\n    protocols: HashSet<String>,\n    keep_alive: bool,\n}\n\nimpl Handler {\n    pub(crate) fn new(inbound_request_id: Arc<AtomicU64>, protocols: HashSet<String>) -> Self {\n        Self {\n            inbound_request_id,\n            pending_events: VecDeque::new(),\n            outbound_request_id: None,\n            protocols,\n            keep_alive: true,\n        }\n    }\n}\n\nimpl ConnectionHandler for Handler {\n    type FromBehaviour = ProtocolRequest;\n\n    type ToBehaviour = event::Event;\n\n    type InboundProtocol = GrpcUpgradeProtocol;\n\n    type OutboundProtocol = GrpcUpgradeProtocol;\n\n    type InboundOpenInfo = RequestId;\n\n    type OutboundOpenInfo = ProtocolRequest;\n\n    fn listen_protocol(&self) -> SubstreamProtocol<Self::InboundProtocol, Self::InboundOpenInfo> {\n        let id = self.inbound_request_id.fetch_add(1, Ordering::Relaxed);\n\n        SubstreamProtocol::new(\n            GrpcUpgradeProtocol {\n                protocols: self.protocols.clone(),\n            },\n            RequestId(id),\n        )\n    }\n\n    fn connection_keep_alive(&self) -> bool {\n        self.keep_alive\n    }\n\n    fn on_behaviour_event(&mut self, request: Self::FromBehaviour) {\n        let request_id = request.request_id;\n        if let Some(prev) = self.outbound_request_id.replace(request) {\n            warn!(\n                \"Received new outbound request id {:?} while previous request id {:?} is still \\\n                 pending\",\n                request_id, prev.request_id\n            );\n        }\n    }\n\n    fn on_connection_event(\n        &mut self,\n        event: ConnectionEvent<\n            Self::InboundProtocol,\n            Self::OutboundProtocol,\n            Self::InboundOpenInfo,\n            Self::OutboundOpenInfo,\n        >,\n    ) {\n        match event {\n            // New Inbound stream\n            ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound { protocol, info }) => {\n                self.pending_events\n                    .push_back(Event::InboundNegotiatedStream {\n                        request_id: info,\n                        stream: protocol,\n                    })\n            }\n            // New Outbound stream\n            ConnectionEvent::FullyNegotiatedOutbound(FullyNegotiatedOutbound {\n                protocol,\n                info,\n            }) => self\n                .pending_events\n                .push_back(Event::OutboundNegotiatedStream {\n                    request_id: info.request_id,\n                    stream: protocol,\n                }),\n            ConnectionEvent::DialUpgradeError(DialUpgradeError {\n                info,\n                error: libp2p::swarm::StreamUpgradeError::Timeout,\n            }) => {\n                self.pending_events.push_back(Event::OutboundTimeout(info));\n\n                // Closing the connection handler\n                self.keep_alive = false;\n            }\n            ConnectionEvent::DialUpgradeError(DialUpgradeError {\n                info,\n                error: libp2p::swarm::StreamUpgradeError::NegotiationFailed,\n            }) => {\n                self.pending_events\n                    .push_back(Event::UnsupportedProtocol(info.request_id, info.protocol));\n\n                // Closing the connection handler\n                self.keep_alive = false;\n            }\n            ConnectionEvent::DialUpgradeError(_)\n            | ConnectionEvent::AddressChange(_)\n            | ConnectionEvent::ListenUpgradeError(_)\n            | ConnectionEvent::LocalProtocolsChange(_)\n            | ConnectionEvent::RemoteProtocolsChange(_) => (),\n            event => warn!(\"Unhandled connection event: {:?}\", event),\n        }\n    }\n    #[allow(deprecated)]\n    fn poll(\n        &mut self,\n        cx: &mut std::task::Context<'_>,\n    ) -> std::task::Poll<\n        libp2p::swarm::ConnectionHandlerEvent<\n            Self::OutboundProtocol,\n            Self::OutboundOpenInfo,\n            Self::ToBehaviour,\n        >,\n    > {\n        if let Some(event) = self.pending_events.pop_front() {\n            return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(event));\n        }\n\n        if let Some(request) = self.outbound_request_id.take() {\n            debug!(\n                \"Starting outbound request SubstreamProtocol for {}\",\n                request.request_id\n            );\n            let mut protocols = self.protocols.clone();\n            protocols.insert(request.protocol.clone());\n            return Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest {\n                protocol: SubstreamProtocol::new(GrpcUpgradeProtocol { protocols }, request),\n            });\n        }\n\n        Poll::Pending\n    }\n}\n"
  },
  {
    "path": "crates/topos-p2p/src/behaviour/grpc/proxy.rs",
    "content": "use std::{\n    io,\n    pin::Pin,\n    task::{Context, Poll},\n};\n\nuse futures::Stream;\nuse pin_project::pin_project;\nuse tokio::sync::mpsc;\n\nuse super::stream::GrpcStream;\n\n/// Proxy for gRPC connection with the local service.\n#[pin_project]\npub(crate) struct GrpcProxy {\n    #[pin]\n    rx: mpsc::UnboundedReceiver<io::Result<GrpcStream>>,\n}\n\nimpl GrpcProxy {\n    pub(crate) fn new(rx: mpsc::UnboundedReceiver<io::Result<GrpcStream>>) -> Self {\n        Self { rx }\n    }\n}\n\nimpl Stream for GrpcProxy {\n    type Item = io::Result<GrpcStream>;\n\n    fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {\n        self.project().rx.as_mut().poll_recv(cx)\n    }\n}\n"
  },
  {
    "path": "crates/topos-p2p/src/behaviour/grpc/stream.rs",
    "content": "use std::{\n    io,\n    pin::Pin,\n    sync::{Arc, Mutex},\n    task::{Context, Poll},\n};\n\nuse futures::{AsyncRead as FuturesAsyncRead, AsyncWrite as FuturesAsyncWrite, Future};\nuse http::Uri;\nuse libp2p::{swarm::ConnectionId, PeerId};\nuse pin_project::pin_project;\nuse tokio::{\n    io::{AsyncRead, AsyncWrite, ReadBuf},\n    sync::mpsc,\n};\nuse tonic::transport::{server::Connected, Channel, Endpoint};\nuse tower::{BoxError, Service};\n\n/// Manage a gRPC Stream linked to an open [`libp2p::Stream`]\n#[pin_project]\npub(crate) struct GrpcStream {\n    #[pin]\n    stream: libp2p::Stream,\n    peer_id: PeerId,\n    connection_id: libp2p::swarm::ConnectionId,\n}\n\n/// Outbound GrpcStream initialization struct\n#[pin_project]\nstruct InitializedGrpcOutboundStream {\n    #[pin]\n    stream_rx: Arc<Mutex<mpsc::Receiver<Result<GrpcStream, BoxError>>>>,\n}\n\n/// Fully negotiated Outbound GrpcStream\n#[pin_project]\nstruct NegotiatedGrpcOutboundStream {\n    #[pin]\n    stream_rx: Arc<Mutex<mpsc::Receiver<Result<GrpcStream, BoxError>>>>,\n}\n\nimpl Future for NegotiatedGrpcOutboundStream {\n    type Output = Result<GrpcStream, BoxError>;\n\n    fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {\n        let stream = self.project();\n        let mut fut = stream\n            .stream_rx\n            .lock()\n            .expect(\"Failed to lock gRPC Outbound Stream Receiver\");\n\n        fut.poll_recv(cx).map(|option| option.unwrap())\n    }\n}\n\nimpl Service<Uri> for InitializedGrpcOutboundStream {\n    type Response = GrpcStream;\n\n    type Error = BoxError;\n\n    type Future = NegotiatedGrpcOutboundStream;\n\n    fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {\n        Poll::Ready(Ok(()))\n    }\n\n    fn call(&mut self, req: Uri) -> Self::Future {\n        NegotiatedGrpcOutboundStream {\n            stream_rx: self.stream_rx.clone(),\n        }\n    }\n}\n\nimpl GrpcStream {\n    pub fn new(stream: libp2p::Stream, peer_id: PeerId, connection_id: ConnectionId) -> Self {\n        Self {\n            stream,\n            peer_id,\n            connection_id,\n        }\n    }\n\n    /// Transform the GrpcStream into a [`tonic::transport::Channel`]\n    pub async fn into_channel(self) -> Result<Channel, tonic::transport::Error> {\n        let (sender, receiver) = mpsc::channel(1);\n\n        let connection = InitializedGrpcOutboundStream {\n            stream_rx: Arc::new(Mutex::new(receiver)),\n        };\n\n        let fut = async move {\n            Endpoint::try_from(\"http://[::]:50051\")\n                .unwrap()\n                .connect_with_connector(connection)\n                .await\n        };\n\n        let (channel, send_result) = tokio::join!(fut, sender.send(Ok(self)));\n\n        channel\n    }\n}\n\nimpl Connected for GrpcStream {\n    type ConnectInfo = ();\n\n    fn connect_info(&self) -> Self::ConnectInfo {}\n}\n\nimpl AsyncRead for GrpcStream {\n    fn poll_read(\n        self: Pin<&mut Self>,\n        cx: &mut Context<'_>,\n        buf: &mut ReadBuf<'_>,\n    ) -> Poll<io::Result<()>> {\n        let unfilled = buf.initialize_unfilled();\n\n        self.project()\n            .stream\n            .poll_read(cx, unfilled)\n            .map_ok(|len| buf.advance(len))\n    }\n}\n\nimpl AsyncWrite for GrpcStream {\n    fn poll_write(\n        self: Pin<&mut Self>,\n        cx: &mut Context<'_>,\n        buf: &[u8],\n    ) -> Poll<Result<usize, io::Error>> {\n        self.project().stream.poll_write(cx, buf)\n    }\n\n    fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {\n        self.project().stream.poll_flush(cx)\n    }\n\n    fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {\n        self.project().stream.poll_close(cx)\n    }\n}\n"
  },
  {
    "path": "crates/topos-p2p/src/behaviour/grpc.rs",
    "content": "use std::{\n    collections::{HashMap, HashSet, VecDeque},\n    io,\n    sync::{atomic::AtomicU64, Arc},\n    task::{Context, Poll},\n};\n\nuse futures::{future::BoxFuture, stream::FuturesUnordered, FutureExt, StreamExt};\nuse handler::Handler;\nuse libp2p::{\n    core::ConnectedPoint,\n    swarm::{\n        derive_prelude::{ConnectionEstablished, ListenerId, NewListener},\n        dial_opts::DialOpts,\n        ConnectionClosed, DialError, DialFailure, FromSwarm, NetworkBehaviour, ToSwarm,\n    },\n    Multiaddr, PeerId,\n};\nuse smallvec::SmallVec;\nuse std::fmt::Display;\nuse tokio::sync::{mpsc, oneshot};\nuse tonic::transport::{server::Router, Channel};\nuse tracing::{debug, info, warn};\n\nuse crate::GrpcRouter;\n\nuse self::{\n    connection::{\n        Connection, OutboundConnectedConnection, OutboundConnection, OutboundConnectionRequest,\n    },\n    error::OutboundError,\n    handler::ProtocolRequest,\n    stream::GrpcStream,\n};\npub(crate) use event::Event;\n\npub(crate) mod connection;\npub mod error;\npub mod event;\npub(crate) mod handler;\nmod proxy;\nmod stream;\n\n#[derive(Default)]\npub struct GrpcContext {\n    server: Option<GrpcRouter>,\n    client: HashSet<String>,\n}\n\nimpl GrpcContext {\n    pub(crate) fn into_parts(mut self) -> (Option<Router>, (HashSet<String>, HashSet<String>)) {\n        let (server, inbound_protocols) = self\n            .server\n            .map(|server| (Some(server.server), server.protocols))\n            .unwrap_or((None, HashSet::new()));\n\n        if self.client.is_empty() {\n            self.client = inbound_protocols.clone();\n        }\n\n        (server, (inbound_protocols, self.client))\n    }\n\n    pub fn with_router(mut self, router: GrpcRouter) -> Self {\n        self.server = Some(router);\n\n        self\n    }\n\n    pub fn add_client_protocol<S: ToString>(mut self, protocol: S) -> Self {\n        self.client.insert(protocol.to_string());\n\n        self\n    }\n\n    pub fn with_client_protocols(mut self, protocols: HashSet<String>) -> Self {\n        self.client = protocols;\n\n        self\n    }\n}\n\n/// The request id used to identify a gRPC request\n#[derive(Debug, Clone, Copy, PartialEq, Eq)]\npub struct RequestId(pub(crate) u64);\n\nimpl Display for RequestId {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        write!(f, \"{}\", self.0)\n    }\n}\n\ntype ChannelNegotiationFuture =\n    BoxFuture<'static, (Result<Channel, tonic::transport::Error>, RequestId, PeerId)>;\n\n/// gRPC behaviour for libp2p\n///\n/// Allows opening gRPC connections to peers and to accept incoming gRPC connections.\n/// It also handles the negotiation of the gRPC channel. Once the channel is established,\n/// the behaviour will return a [`GrpcStream`] that can be used to send and receive gRPC messages.\n/// A gRPC Router is optional because as a client or light client I need be able to open a connection\n/// to a peer without having a gRPC service to expose.\npub(crate) struct Behaviour {\n    /// The optional gRPC service to expose\n    service: Option<Router>,\n    /// The next request id to use\n    next_request_id: RequestId,\n    /// The next inbound request id to use\n    next_inbound_request_id: Arc<AtomicU64>,\n    /// The list of connected peers with the associated gRPC channel\n    connected: HashMap<PeerId, SmallVec<[Connection; 2]>>,\n    /// The list of known addresses for each peer managed by `add_address` and `remove_address`\n    addresses: HashMap<PeerId, SmallVec<[Multiaddr; 6]>>,\n    /// The optional inbound stream to receive gRPC connections\n    inbound_stream: Option<mpsc::UnboundedSender<io::Result<stream::GrpcStream>>>,\n    /// The list of pending outbound connections\n    pending_outbound_connections: HashMap<PeerId, OutboundConnectionRequest>,\n    /// The list of pending events to send to the swarm\n    pending_events: VecDeque<ToSwarm<Event, ProtocolRequest>>,\n    /// The list of pending channel negotiation futures\n    pending_negotiated_channels: FuturesUnordered<ChannelNegotiationFuture>,\n    inbound_protocols: HashSet<String>,\n    outbound_protocols: HashSet<String>,\n}\n\nimpl Behaviour {\n    // TODO: Remove unused when gRPC behaviour is activated\n    pub fn new(service: GrpcContext) -> Self {\n        let (service, (inbound_protocols, outbound_protocols)) = service.into_parts();\n\n        Self {\n            service,\n            inbound_protocols,\n            outbound_protocols,\n            connected: HashMap::new(),\n            addresses: HashMap::new(),\n            inbound_stream: None,\n            next_request_id: RequestId(1),\n            next_inbound_request_id: Arc::new(AtomicU64::new(0)),\n            pending_outbound_connections: HashMap::new(),\n            pending_events: VecDeque::new(),\n            pending_negotiated_channels: FuturesUnordered::new(),\n        }\n    }\n\n    /// Adds a known address for a peer that can be used for\n    /// dialing attempts by the `Swarm`\n    ///\n    /// Addresses added in this way are only removed by `remove_address`.\n    #[cfg(test)]\n    pub fn add_address(&mut self, peer: &PeerId, address: Multiaddr) {\n        self.addresses.entry(*peer).or_default().push(address);\n    }\n\n    /// Removes an address of a peer previously added via `add_address`.\n    #[cfg(test)]\n    #[allow(unused)]\n    pub fn remove_address(&mut self, peer: &PeerId, address: &Multiaddr) {\n        let mut last = false;\n        if let Some(addresses) = self.addresses.get_mut(peer) {\n            addresses.retain(|a| a != address);\n            last = addresses.is_empty();\n        }\n        if last {\n            self.addresses.remove(peer);\n        }\n    }\n\n    /// Ask the behaviour to create a new outbound connection for the given peer.\n    ///\n    /// The return value is an [`OutboundConnection`] that can be used to check the status of the\n    /// connection. If the connection is pending, the request id is returned. If the connection\n    /// is established, the gRPC channel is returned.\n    // TODO: Remove unused when gRPC behaviour is activated\n    #[allow(unused)]\n    pub fn open_outbound_connection(\n        &mut self,\n        peer_id: &PeerId,\n        protocol: String,\n    ) -> OutboundConnection {\n        // If there is a pending outbound connection for this peer\n        // return the request id\n        if let Some(request) = self.pending_outbound_connections.get(peer_id) {\n            return OutboundConnection::Pending {\n                request_id: request.request_id,\n            };\n        }\n\n        if let Some(connections) = self.connected.get_mut(peer_id) {\n            match connections.first() {\n                Some(Connection {\n                    id,\n                    address,\n                    request_id: Some(request_id),\n                    channel: Some(channel),\n                }) => OutboundConnection::Connected(OutboundConnectedConnection {\n                    request_id: *request_id,\n                    channel: channel.clone(),\n                }),\n                Some(Connection {\n                    id,\n                    address,\n                    request_id: Some(request_id),\n                    channel,\n                }) => {\n                    debug!(\n                        \"Peer already connected but no channel yet, waiting for channel \\\n                         negotiation\"\n                    );\n\n                    OutboundConnection::Pending {\n                        request_id: *request_id,\n                    }\n                }\n                Some(_) => self.open_connection(peer_id, protocol),\n                _ => {\n                    debug!(\"No connection for this peer {}\", peer_id);\n                    self.open_connection(peer_id, protocol)\n                }\n            }\n        } else {\n            debug!(\"Buffering sender as no available connection to peer {peer_id} yet\");\n            self.open_connection(peer_id, protocol)\n        }\n    }\n\n    /// Return the next outbound request id\n    fn next_request_id(&mut self) -> RequestId {\n        let request_id = self.next_request_id;\n        self.next_request_id.0 += 1;\n\n        request_id\n    }\n\n    /// Try to open a connection with the given peer.\n    fn open_connection(&mut self, peer_id: &PeerId, protocol: String) -> OutboundConnection {\n        info!(\"Opening gRPC outbound connection to peer {peer_id}\");\n\n        let (notifier, receiver) = oneshot::channel();\n        let request_id = self.next_request_id();\n\n        self.pending_outbound_connections\n            .entry(*peer_id)\n            .or_insert_with(|| OutboundConnectionRequest {\n                request_id,\n                notifier,\n                protocol,\n            });\n\n        self.pending_events.push_back(ToSwarm::Dial {\n            opts: DialOpts::peer_id(*peer_id).build(),\n        });\n\n        OutboundConnection::Opening {\n            request_id,\n            receiver,\n        }\n    }\n\n    /// Handle the [`ConnectionEstablished`] event coming from the [`Swarm`]\n    /// and try to open a gRPC channel using a [`ConnectionHandler`].\n    fn on_connection_established(\n        &mut self,\n        ConnectionEstablished {\n            peer_id,\n            connection_id,\n            endpoint,\n            failed_addresses,\n            other_established,\n        }: ConnectionEstablished,\n    ) {\n        let address = match endpoint {\n            ConnectedPoint::Dialer { address, .. } => Some(address.clone()),\n            ConnectedPoint::Listener { .. } => None,\n        };\n\n        let connection = Connection {\n            id: connection_id,\n            address,\n            request_id: None,\n            channel: None,\n        };\n\n        self.connected.entry(peer_id).or_default().push(connection);\n\n        // If there is no current established connection it means that it's the\n        // first connection with that peer\n        if other_established == 0 {\n            self.try_connect(&peer_id);\n        }\n    }\n\n    /// Starts the gRPC service if not already started\n    fn on_new_listener(&mut self, listener_id: ListenerId) {\n        if let Some(service) = self.service.take() {\n            let (tx, rx) = mpsc::unbounded_channel();\n            self.inbound_stream = Some(tx);\n            // TODO: TP-758: Switch to serve_with_incoming_shutdown at some point\n            tokio::spawn(service.serve_with_incoming(proxy::GrpcProxy::new(rx)));\n            info!(\"New gRPC proxy started and listening on {listener_id:?}\");\n        } else {\n            warn!(\n                \"Tried to instantiate a gRPC proxy on {listener_id:?} but the service is missing \\\n                 (already spawn or unprovided)\"\n            );\n        }\n    }\n\n    /// On [`ConnectionClosed`] we cleanup the `connected` state of the behaviour.\n    fn on_connection_closed(\n        &mut self,\n        ConnectionClosed {\n            peer_id,\n            connection_id,\n            endpoint,\n            remaining_established,\n        }: ConnectionClosed,\n    ) {\n        debug!(\"Connection {connection_id} closed with peer {peer_id}\");\n        if let Some(connections) = self.connected.get_mut(&peer_id) {\n            connections.retain(|conn| conn.id != connection_id);\n            if connections.is_empty() {\n                self.connected.remove(&peer_id);\n            }\n        }\n    }\n\n    /// Handle the [`DialFailure`] event comming from the [`Swarm`]\n    fn on_dial_failure(\n        &mut self,\n        DialFailure {\n            peer_id,\n            error,\n            connection_id,\n        }: DialFailure,\n    ) {\n        if let Some(peer_id) = peer_id {\n            match error {\n                DialError::DialPeerConditionFalse(_) => {\n                    self.try_connect(&peer_id);\n                }\n                _ => {\n                    if let Some(OutboundConnectionRequest {\n                        request_id,\n                        notifier,\n                        protocol,\n                    }) = self.pending_outbound_connections.remove(&peer_id)\n                    {\n                        self.pending_events.push_back(ToSwarm::GenerateEvent(\n                            Event::OutboundFailure {\n                                peer_id,\n                                request_id,\n                                error: OutboundError::DialFailure,\n                            },\n                        ));\n\n                        let _ = notifier.send(Err(OutboundError::DialFailure));\n                    }\n                }\n            }\n        }\n    }\n\n    /// Try to connect an opened outbound connection with a [`ConnectionHandler`]\n    /// in order to handle the request.\n    fn try_connect(&mut self, peer_id: &PeerId) {\n        if let Some(connections) = self.connected.get_mut(peer_id) {\n            let connection = connections.first_mut();\n            if let Some(connection) = connection {\n                if let Some(OutboundConnectionRequest {\n                    request_id,\n                    notifier,\n                    protocol,\n                }) = self.pending_outbound_connections.get(peer_id)\n                {\n                    debug!(\"gRPC Outbound connection established with {peer_id}\");\n                    self.pending_events.push_back(ToSwarm::NotifyHandler {\n                        peer_id: *peer_id,\n                        handler: libp2p::swarm::NotifyHandler::One(connection.id),\n                        event: ProtocolRequest {\n                            request_id: *request_id,\n                            protocol: protocol.clone(),\n                        },\n                    });\n                }\n            }\n        }\n    }\n}\n\nimpl NetworkBehaviour for Behaviour {\n    type ConnectionHandler = Handler;\n\n    type ToSwarm = Event;\n\n    fn handle_established_inbound_connection(\n        &mut self,\n        _connection_id: libp2p::swarm::ConnectionId,\n        peer: PeerId,\n        local_addr: &libp2p::Multiaddr,\n        remote_addr: &libp2p::Multiaddr,\n    ) -> Result<libp2p::swarm::THandler<Self>, libp2p::swarm::ConnectionDenied> {\n        Ok(Handler::new(\n            self.next_inbound_request_id.clone(),\n            self.inbound_protocols.clone(),\n        ))\n    }\n\n    fn handle_established_outbound_connection(\n        &mut self,\n        _connection_id: libp2p::swarm::ConnectionId,\n        peer: PeerId,\n        addr: &libp2p::Multiaddr,\n        role_override: libp2p::core::Endpoint,\n    ) -> Result<libp2p::swarm::THandler<Self>, libp2p::swarm::ConnectionDenied> {\n        Ok(Handler::new(\n            self.next_inbound_request_id.clone(),\n            self.outbound_protocols.clone(),\n        ))\n    }\n\n    fn handle_pending_outbound_connection(\n        &mut self,\n        _connection_id: libp2p::swarm::ConnectionId,\n        maybe_peer: Option<PeerId>,\n        _addresses: &[libp2p::Multiaddr],\n        _effective_role: libp2p::core::Endpoint,\n    ) -> Result<Vec<libp2p::Multiaddr>, libp2p::swarm::ConnectionDenied> {\n        let peer_id = match maybe_peer {\n            None => return Ok(vec![]),\n            Some(peer_id) => peer_id,\n        };\n\n        let mut addresses = Vec::new();\n        if let Some(connections) = self.connected.get(&peer_id) {\n            addresses.extend(connections.iter().filter_map(|c| c.address.clone()));\n        }\n\n        if let Some(more) = self.addresses.get(&peer_id) {\n            addresses.extend(more.into_iter().cloned());\n        }\n\n        Ok(addresses)\n    }\n\n    fn on_connection_handler_event(\n        &mut self,\n        peer_id: PeerId,\n        connection_id: libp2p::swarm::ConnectionId,\n        event: libp2p::swarm::THandlerOutEvent<Self>,\n    ) {\n        match event {\n            handler::event::Event::OutboundTimeout(request) => {\n                debug!(\n                    \"Outbound timeout for request {} with peer {peer_id}\",\n                    request.request_id\n                );\n                self.pending_events\n                    .push_back(ToSwarm::GenerateEvent(Event::OutboundFailure {\n                        peer_id,\n                        request_id: request.request_id,\n                        error: OutboundError::Timeout,\n                    }));\n\n                if let Some(connection_request) = self.pending_outbound_connections.remove(&peer_id)\n                {\n                    _ = connection_request\n                        .notifier\n                        .send(Err(OutboundError::Timeout))\n                }\n            }\n            handler::event::Event::UnsupportedProtocol(request_id, protocol) => {\n                debug!(\n                    \"Unsupported protocol {protocol} for request {request_id} with peer {peer_id}\"\n                );\n                self.pending_events\n                    .push_back(ToSwarm::GenerateEvent(Event::OutboundFailure {\n                        peer_id,\n                        request_id,\n                        error: OutboundError::UnsupportedProtocol(protocol.clone()),\n                    }));\n\n                if let Some(connection_request) = self.pending_outbound_connections.remove(&peer_id)\n                {\n                    _ = connection_request\n                        .notifier\n                        .send(Err(OutboundError::UnsupportedProtocol(protocol)))\n                }\n            }\n            handler::event::Event::InboundNegotiatedStream { request_id, stream } => {\n                debug!(\"Inbound stream negotiated for request {request_id} with peer {peer_id}\",);\n                if let Some(sender) = &mut self.inbound_stream {\n                    _ = sender.send(Ok(GrpcStream::new(stream, peer_id, connection_id)));\n                    self.pending_events.push_back(ToSwarm::GenerateEvent(\n                        Event::InboundNegotiatedConnection {\n                            request_id,\n                            connection_id,\n                        },\n                    ));\n                }\n            }\n            handler::event::Event::OutboundNegotiatedStream { request_id, stream } => {\n                debug!(\"Outbound stream negotiated for request {request_id} with peer {peer_id}\",);\n                let stream = GrpcStream::new(stream, peer_id, connection_id);\n\n                let future = stream\n                    .into_channel()\n                    .map(move |channel| (channel, request_id, peer_id))\n                    .boxed();\n\n                self.pending_negotiated_channels.push(future);\n            }\n        }\n    }\n\n    fn on_swarm_event(&mut self, event: FromSwarm) {\n        match event {\n            FromSwarm::ConnectionEstablished(connection_established) => {\n                self.on_connection_established(connection_established)\n            }\n            FromSwarm::NewListener(NewListener { listener_id }) => {\n                self.on_new_listener(listener_id)\n            }\n            FromSwarm::ConnectionClosed(connection_closed) => {\n                self.on_connection_closed(connection_closed)\n            }\n            FromSwarm::DialFailure(dial_failure) => self.on_dial_failure(dial_failure),\n            FromSwarm::AddressChange(_)\n            | FromSwarm::ExpiredListenAddr(_)\n            | FromSwarm::ExternalAddrConfirmed(_)\n            | FromSwarm::ExternalAddrExpired(_)\n            | FromSwarm::ListenFailure(_)\n            | FromSwarm::ListenerClosed(_)\n            | FromSwarm::ListenerError(_)\n            | FromSwarm::NewExternalAddrCandidate(_)\n            | FromSwarm::NewListenAddr(_) => (),\n            event => debug!(\"Unhandled event from swarm (grpc): {:?}\", event),\n        }\n    }\n\n    fn poll(\n        &mut self,\n        cx: &mut Context<'_>,\n    ) -> Poll<libp2p::swarm::ToSwarm<Self::ToSwarm, libp2p::swarm::THandlerInEvent<Self>>> {\n        // Sending event to both `Swarm` and `ConnectionHandler`\n        if let Some(ev) = self.pending_events.pop_front() {\n            return Poll::Ready(ev);\n        }\n\n        // When channel has been negotiated by the [`ConnectionHandler`] we need\n        // to update the [`Connection`] with the channel.\n        match self.pending_negotiated_channels.poll_next_unpin(cx) {\n            Poll::Ready(Some((Ok(channel), request_id, peer_id))) => {\n                debug!(\"gRPC channel ready for {} {}\", peer_id, request_id);\n                if let Some(conns) = self.connected.get_mut(&peer_id) {\n                    for conn in conns {\n                        if let Some(conn_request_id) = &conn.request_id {\n                            if request_id == *conn_request_id {\n                                conn.channel = Some(channel.clone());\n\n                                break;\n                            }\n                        }\n                    }\n                }\n\n                // Notifying the channel to the initial sender\n                if let Some(req) = self.pending_outbound_connections.remove(&peer_id) {\n                    let _ = req.notifier.send(Ok(channel.clone()));\n                    self.pending_events.push_back(ToSwarm::GenerateEvent(\n                        Event::OutboundNegotiatedConnection {\n                            request_id: req.request_id,\n                            peer_id,\n                        },\n                    ));\n                }\n\n                return Poll::Ready(ToSwarm::GenerateEvent(Event::OutboundSuccess {\n                    peer_id,\n                    request_id,\n                    channel,\n                }));\n            }\n\n            Poll::Ready(Some((Err(error), request_id, peer_id))) => {\n                debug!(\"Received error from channel negotiation {:?}\", error);\n                let error = Arc::new(error);\n                if let Some(req) = self.pending_outbound_connections.remove(&peer_id) {\n                    let _ = req\n                        .notifier\n                        .send(Err(OutboundError::GrpcChannel(error.clone())));\n                }\n\n                return Poll::Ready(ToSwarm::GenerateEvent(Event::OutboundFailure {\n                    peer_id,\n                    request_id,\n                    error: OutboundError::GrpcChannel(error),\n                }));\n            }\n            _ => {}\n        }\n\n        Poll::Pending\n    }\n}\n"
  },
  {
    "path": "crates/topos-p2p/src/behaviour/peer_info.rs",
    "content": "use libp2p::{\n    identify::Behaviour as Identify, identify::Config as IdentifyConfig,\n    identify::Event as IdentifyEvent, identity::Keypair, swarm::NetworkBehaviour,\n};\n\n#[derive(NetworkBehaviour)]\n#[behaviour(to_swarm = \"IdentifyEvent\")]\npub struct PeerInfoBehaviour {\n    identify: Identify,\n}\n\nimpl PeerInfoBehaviour {\n    pub(crate) fn new(identify_protocol: &'static str, peer_key: &Keypair) -> PeerInfoBehaviour {\n        let ident_config = IdentifyConfig::new(identify_protocol.to_string(), peer_key.public());\n\n        let identify = Identify::new(ident_config);\n\n        Self { identify }\n    }\n}\n"
  },
  {
    "path": "crates/topos-p2p/src/behaviour/topos.rs",
    "content": "// #[derive(NetworkBehaviour)]\n// #[behaviour(out_event = \"ToposOut\", event_process = true)]\n// pub struct ToposBehaviour {}\n"
  },
  {
    "path": "crates/topos-p2p/src/behaviour.rs",
    "content": "use self::{discovery::DiscoveryBehaviour, peer_info::PeerInfoBehaviour};\nuse crate::event::ComposedEvent;\nuse libp2p::swarm::NetworkBehaviour;\n\npub(crate) mod discovery;\npub(crate) mod gossip;\npub(crate) mod grpc;\npub(crate) mod peer_info;\npub(crate) mod topos;\n\n/// Represents the health status of a behaviour inside the p2p layer\n#[derive(Debug, Default, PartialEq, Eq)]\npub(crate) enum HealthStatus {\n    #[default]\n    Initializing,\n    Healthy,\n    Unhealthy,\n    Killing,\n    #[allow(unused)]\n    Recovering,\n}\n\n#[derive(NetworkBehaviour)]\n#[behaviour(to_swarm = \"ComposedEvent\")]\npub(crate) struct Behaviour {\n    /// Periodically pings and identifies the nodes we are connected to,\n    /// and store information in a cache.\n    pub(crate) peer_info: PeerInfoBehaviour,\n\n    /// DiscoveryBehaviour which handle every aspect of the node discovery\n    pub(crate) discovery: DiscoveryBehaviour,\n\n    /// Gossip behaviour which handle the gossipsub protocol\n    pub(crate) gossipsub: gossip::Behaviour,\n\n    /// Custom gRPC behaviour which handle the different TOPOS gRPC protocols\n    pub(crate) grpc: grpc::Behaviour,\n}\n"
  },
  {
    "path": "crates/topos-p2p/src/client.rs",
    "content": "use futures::future::BoxFuture;\nuse libp2p::PeerId;\nuse tokio::sync::{\n    mpsc::{self, error::SendError},\n    oneshot,\n};\nuse tonic::server::NamedService;\nuse topos_core::api::grpc::GrpcClient;\n\nuse crate::{\n    error::{CommandExecutionError, P2PError},\n    utils::GrpcOverP2P,\n    Command,\n};\n\n#[derive(Clone)]\npub struct NetworkClient {\n    pub retry_ttl: u64,\n    pub local_peer_id: PeerId,\n    pub sender: mpsc::Sender<Command>,\n    pub grpc_over_p2p: GrpcOverP2P,\n    pub shutdown_channel: mpsc::Sender<oneshot::Sender<()>>,\n}\n\nimpl NetworkClient {\n    pub async fn connected_peers(&self) -> Result<Vec<PeerId>, P2PError> {\n        let (sender, receiver) = oneshot::channel();\n        Self::send_command_with_receiver(&self.sender, Command::ConnectedPeers { sender }, receiver)\n            .await\n    }\n\n    pub async fn random_known_peer(&self) -> Result<PeerId, P2PError> {\n        let (sender, receiver) = oneshot::channel();\n        Self::send_command_with_receiver(\n            &self.sender,\n            Command::RandomKnownPeer { sender },\n            receiver,\n        )\n        .await\n    }\n\n    pub fn publish<T: std::fmt::Debug + prost::Message + 'static>(\n        &self,\n        topic: &'static str,\n        message: T,\n    ) -> BoxFuture<'static, Result<(), SendError<Command>>> {\n        let network = self.sender.clone();\n\n        Box::pin(async move {\n            network\n                .send(Command::Gossip {\n                    topic,\n                    data: message.encode_to_vec(),\n                })\n                .await\n        })\n    }\n\n    async fn send_command_with_receiver<\n        T,\n        E: From<oneshot::error::RecvError> + From<CommandExecutionError>,\n    >(\n        sender: &mpsc::Sender<Command>,\n        command: Command,\n        receiver: oneshot::Receiver<Result<T, E>>,\n    ) -> Result<T, E> {\n        if let Err(SendError(command)) = sender.send(command).await {\n            return Err(CommandExecutionError::UnableToSendCommand(command).into());\n        }\n\n        receiver.await.unwrap_or_else(|error| Err(error.into()))\n    }\n\n    pub async fn shutdown(&self) -> Result<(), P2PError> {\n        let (sender, receiver) = oneshot::channel();\n        self.shutdown_channel\n            .send(sender)\n            .await\n            .map_err(P2PError::ShutdownCommunication)?;\n\n        Ok(receiver.await?)\n    }\n\n    /// Creates a new gRPC client for the given peer.\n    pub async fn new_grpc_client<C, S>(&self, peer: PeerId) -> Result<C, P2PError>\n    where\n        C: GrpcClient<Output = C>,\n        S: NamedService,\n    {\n        self.grpc_over_p2p.create::<C, S>(peer).await\n    }\n}\n\npub enum RetryPolicy {\n    NoRetry,\n    N(usize),\n}\n"
  },
  {
    "path": "crates/topos-p2p/src/command.rs",
    "content": "use std::fmt::Display;\n\nuse libp2p::PeerId;\nuse tokio::sync::oneshot;\n\nuse crate::{behaviour::grpc::connection::OutboundConnection, error::P2PError};\n\n#[derive(Debug)]\npub enum Command {\n    /// Command to ask for the current connected peer id list\n    ConnectedPeers {\n        sender: oneshot::Sender<Result<Vec<PeerId>, P2PError>>,\n    },\n\n    Gossip {\n        topic: &'static str,\n        data: Vec<u8>,\n    },\n\n    /// Ask for the creation of a new proxy connection for a gRPC query.\n    /// The response will be sent to the sender of the command once the connection is established.\n    /// The response will be a `OutboundConnection` that can be used to create a gRPC client.\n    /// A connection is established if needed with the peer.\n    NewProxiedQuery {\n        protocol: &'static str,\n        peer: PeerId,\n        id: uuid::Uuid,\n        response: oneshot::Sender<OutboundConnection>,\n    },\n\n    /// Ask for a random known peer\n    RandomKnownPeer {\n        sender: oneshot::Sender<Result<PeerId, P2PError>>,\n    },\n}\n\nimpl Display for Command {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        match self {\n            Command::ConnectedPeers { .. } => write!(f, \"ConnectedPeers\"),\n            Command::RandomKnownPeer { .. } => write!(f, \"RandomKnownPeer\"),\n            Command::Gossip { .. } => write!(f, \"GossipMessage\"),\n            Command::NewProxiedQuery { .. } => write!(f, \"NewProxiedQuery\"),\n        }\n    }\n}\n"
  },
  {
    "path": "crates/topos-p2p/src/config.rs",
    "content": "use std::{num::NonZeroUsize, time::Duration};\n\npub struct NetworkConfig {\n    pub minimum_cluster_size: usize,\n    pub client_retry_ttl: u64,\n    pub discovery: DiscoveryConfig,\n    pub yamux_max_buffer_size: usize,\n    pub yamux_window_size: Option<u32>,\n    pub allow_private_ip: bool,\n}\n\nimpl Default for NetworkConfig {\n    fn default() -> Self {\n        Self {\n            minimum_cluster_size: Self::MINIMUM_CLUSTER_SIZE,\n            client_retry_ttl: Self::CLIENT_RETRY_TTL,\n            discovery: Default::default(),\n            yamux_max_buffer_size: usize::MAX,\n            yamux_window_size: None,\n            allow_private_ip: false,\n        }\n    }\n}\n\nimpl NetworkConfig {\n    pub const MINIMUM_CLUSTER_SIZE: usize = 5;\n    pub const CLIENT_RETRY_TTL: u64 = 200;\n}\n\npub struct DiscoveryConfig {\n    pub replication_factor: NonZeroUsize,\n    pub replication_interval: Option<Duration>,\n    pub publication_interval: Option<Duration>,\n    pub provider_publication_interval: Option<Duration>,\n    /// Interval at which the node will send bootstrap query to the network\n    ///\n    /// Defaults to [DiscoveryConfig::BOOTSTRAP_INTERVAL]\n    pub bootstrap_interval: Duration,\n    /// Interval at which the node will send fast bootstrap query to the network\n    /// Mostly used when the node is bootstrapping and failed to connect to boot peers\n    ///\n    /// Defaults to [DiscoveryConfig::FAST_BOOTSTRAP_INTERVAL]\n    pub fast_bootstrap_interval: Duration,\n}\n\nimpl Default for DiscoveryConfig {\n    fn default() -> Self {\n        Self {\n            replication_factor: NonZeroUsize::new(4).unwrap(),\n            replication_interval: Some(Duration::from_secs(10)),\n            publication_interval: Some(Duration::from_secs(10)),\n            provider_publication_interval: Some(Duration::from_secs(10)),\n            bootstrap_interval: Self::BOOTSTRAP_INTERVAL,\n            fast_bootstrap_interval: Self::FAST_BOOTSTRAP_INTERVAL,\n        }\n    }\n}\n\nimpl DiscoveryConfig {\n    /// Default bootstrap interval in seconds\n    pub const BOOTSTRAP_INTERVAL: Duration = Duration::from_secs(60);\n    /// Default fast bootstrap interval in seconds\n    pub const FAST_BOOTSTRAP_INTERVAL: Duration = Duration::from_secs(5);\n\n    pub fn with_replication_factor(mut self, replication_factor: NonZeroUsize) -> Self {\n        self.replication_factor = replication_factor;\n\n        self\n    }\n}\n"
  },
  {
    "path": "crates/topos-p2p/src/constants.rs",
    "content": "use std::{env, time::Duration};\n\nuse lazy_static::lazy_static;\nuse prometheus_client::registry::Registry;\nuse tokio::sync::Mutex;\n\nlazy_static! {\n    /// Metric Registry used to register all the metrics from libp2p::gossipsub\n    // NOTE: During tests, if multiple instances are started, they will all point to the same\n    // registry.\n    pub static ref METRIC_REGISTRY: Mutex<Registry> = Mutex::new(<Registry>::with_prefix(\"topos\"));\n    pub static ref EVENT_STREAM_BUFFER: usize = env::var(\"TCE_EVENT_STREAM_BUFFER\")\n        .ok()\n        .and_then(|v| v.parse::<usize>().ok())\n        .unwrap_or(2048 * 2);\n    pub static ref CAPACITY_EVENT_STREAM_BUFFER: usize = EVENT_STREAM_BUFFER\n        .checked_mul(10)\n        .map(|v| {\n            let r: usize = v.checked_div(100).unwrap_or(*EVENT_STREAM_BUFFER);\n            r\n        })\n        .unwrap_or(*EVENT_STREAM_BUFFER);\n    pub static ref COMMAND_STREAM_BUFFER_SIZE: usize = env::var(\"TCE_COMMAND_STREAM_BUFFER_SIZE\")\n        .ok()\n        .and_then(|v| v.parse::<usize>().ok())\n        .unwrap_or(2048);\n}\n\npub const DISCOVERY_PROTOCOL: &str = \"/tce-disco/1\";\npub const PEER_INFO_PROTOCOL: &str = \"/tce-peer-info/1\";\npub const GRPC_P2P_TOPOS_PROTOCOL: &str = \"/topos-grpc-p2p/1.0\";\n\n// FIXME: Considered as constant until customizable and exposed properly in the genesis file\npub const TCE_BOOTNODE_PORT: u16 = 9090;\n\n/// Swarm idle connection timeout\npub const IDLE_CONNECTION_TIMEOUT: Duration = Duration::from_secs(30);\n"
  },
  {
    "path": "crates/topos-p2p/src/error.rs",
    "content": "use std::io;\n\nuse libp2p::{\n    gossipsub::SubscriptionError, kad::NoKnownPeers, noise::Error as NoiseError,\n    request_response::OutboundFailure, TransportError,\n};\nuse thiserror::Error;\nuse tokio::sync::{mpsc, oneshot};\n\nuse crate::{behaviour::grpc::error::OutboundConnectionError, command::Command};\n\n#[derive(Error, Debug)]\npub enum P2PError {\n    #[error(\"Unable build a network: peer_key missing\")]\n    MissingPeerKey,\n\n    #[error(\"Unable to reach any bootnode\")]\n    UnableToReachBootnode,\n\n    #[error(\"The handle on the runtime failed\")]\n    JoinHandleFailure,\n\n    #[error(transparent)]\n    CommandError(#[from] CommandExecutionError),\n\n    #[error(\"An error occurred on the Transport layer: {0}\")]\n    TransportError(#[from] TransportError<io::Error>),\n\n    #[error(\"An error occured trying to subscribe to gossip topic: {0}\")]\n    SubscriptionError(#[from] SubscriptionError),\n\n    #[error(\"Unable to receive expected response of a oneshot channel\")]\n    OneshotReceiveError(#[from] oneshot::error::RecvError),\n\n    #[error(\"An error occurred on the Noise protocol: {0}\")]\n    NoiseProtocolError(#[from] NoiseError),\n\n    #[error(\"Error during bootstrap phase: {0}\")]\n    BootstrapError(&'static str),\n\n    #[error(\"Kademlia bootstrap query error: {0}\")]\n    KademliaBootstrapError(#[from] NoKnownPeers),\n\n    #[error(\"Unable to execute shutdown on the p2p runtime: {0}\")]\n    ShutdownCommunication(mpsc::error::SendError<oneshot::Sender<()>>),\n\n    #[error(\"Unable to create gRPC client\")]\n    UnableToCreateGrpcClient(#[from] OutboundConnectionError),\n\n    #[error(\"Gossip topics subscription failed\")]\n    GossipTopicSubscriptionFailure,\n}\n\n#[derive(Error, Debug)]\npub enum CommandExecutionError {\n    #[error(\"Unable to parse message\")]\n    ParsingError,\n\n    #[error(\"Unable to send command {0}\")]\n    UnableToSendCommand(Command),\n\n    #[error(\"Unable to perform query: {0}\")]\n    RequestOutboundFailure(#[from] OutboundFailure),\n\n    #[error(\"Unable to receive expected response of a oneshot channel\")]\n    UnableToReceiveCommandResponse(#[from] oneshot::error::RecvError),\n\n    #[error(\"Unable to send a command: {0}\")]\n    SendError(#[from] mpsc::error::SendError<Command>),\n\n    #[error(\"Failed to fetch Record from DHT\")]\n    DHTGetRecordFailed,\n\n    #[error(\"Connection with a peer has failed\")]\n    ConnectionClosed,\n\n    #[error(\"No known peer in the peer set\")]\n    NoKnownPeer,\n}\n"
  },
  {
    "path": "crates/topos-p2p/src/event.rs",
    "content": "use libp2p::{identify, kad, PeerId};\n\nuse crate::behaviour::{grpc, HealthStatus};\n\n/// Represents the events that the Gossip protocol can emit\n#[derive(Debug)]\npub enum GossipEvent {\n    /// A message has been received from a peer on one of the subscribed topics\n    Message {\n        source: Option<PeerId>,\n        topic: &'static str,\n        message: Vec<u8>,\n    },\n}\n\n#[derive(Debug)]\npub enum ComposedEvent {\n    Kademlia(Box<kad::Event>),\n    PeerInfo(Box<identify::Event>),\n    Gossipsub(GossipEvent),\n    Grpc(grpc::Event),\n    Void,\n}\n\nimpl From<grpc::Event> for ComposedEvent {\n    fn from(event: grpc::Event) -> Self {\n        ComposedEvent::Grpc(event)\n    }\n}\n\nimpl From<kad::Event> for ComposedEvent {\n    fn from(event: kad::Event) -> Self {\n        ComposedEvent::Kademlia(Box::new(event))\n    }\n}\n\nimpl From<identify::Event> for ComposedEvent {\n    fn from(event: identify::Event) -> Self {\n        ComposedEvent::PeerInfo(Box::new(event))\n    }\n}\n\nimpl From<void::Void> for ComposedEvent {\n    fn from(_: void::Void) -> Self {\n        Self::Void\n    }\n}\n\n/// Represents the events that the p2p layer can emit\n#[derive(Debug)]\npub enum Event {\n    /// An event emitted when a gossip message is received\n    Gossip { from: PeerId, data: Vec<u8> },\n    /// An event emitted when the p2p layer becomes healthy\n    Healthy,\n    /// An event emitted when the p2p layer becomes unhealthy\n    Unhealthy,\n    /// An event emitted when the p2p layer is shutting down\n    Killing,\n}\n\nimpl From<&HealthStatus> for Event {\n    fn from(value: &HealthStatus) -> Self {\n        match value {\n            HealthStatus::Healthy => Event::Healthy,\n            HealthStatus::Killing => Event::Killing,\n            _ => Event::Unhealthy,\n        }\n    }\n}\n"
  },
  {
    "path": "crates/topos-p2p/src/lib.rs",
    "content": "#![allow(unused_variables)]\nmod behaviour;\nmod client;\nmod command;\npub mod config;\npub mod constants;\npub mod error;\nmod event;\nmod runtime;\n#[cfg(test)]\nmod tests;\n\nuse std::collections::HashSet;\nuse std::convert::Infallible;\n\npub(crate) use behaviour::Behaviour;\npub use client::NetworkClient;\npub use client::RetryPolicy;\npub use command::Command;\npub use event::Event;\nuse http::Request;\nuse http::Response;\npub use libp2p::Multiaddr;\npub use libp2p::PeerId;\npub use runtime::Runtime;\n\nuse hyper::Body;\nuse tonic::body::BoxBody;\nuse tonic::server::NamedService;\nuse tonic::transport::server::Router;\nuse topos_core::api::grpc::p2p::info_service_server::InfoService;\nuse topos_core::api::grpc::p2p::info_service_server::InfoServiceServer;\nuse tower::Service;\n\npub mod network;\n\npub const TOPOS_GOSSIP: &str = \"topos_gossip\";\npub const TOPOS_ECHO: &str = \"topos_echo\";\npub const TOPOS_READY: &str = \"topos_ready\";\n\n#[macro_export]\nmacro_rules! protocol_name {\n    ($i:expr) => {\n        format!(\"/{}\", $i)\n    };\n}\n\n#[derive(Debug)]\npub(crate) struct GrpcP2pInfo {}\n#[async_trait::async_trait]\nimpl InfoService for GrpcP2pInfo {}\n\npub use behaviour::grpc::GrpcContext;\n\npub struct GrpcRouter {\n    server: Router,\n    protocols: HashSet<String>,\n}\n\nimpl GrpcRouter {\n    pub fn new(mut server: tonic::transport::Server) -> Self {\n        let mut protocols = HashSet::new();\n        protocols.insert(protocol_name!(InfoServiceServer::<GrpcP2pInfo>::NAME));\n\n        Self {\n            server: server.add_optional_service::<InfoServiceServer<GrpcP2pInfo>>(None),\n            protocols,\n        }\n    }\n\n    pub fn add_service<S>(mut self, service: S) -> Self\n    where\n        S: Service<Request<Body>, Response = Response<BoxBody>, Error = Infallible>\n            + NamedService\n            + Clone\n            + Send\n            + 'static,\n        S::Future: Send + 'static,\n    {\n        self.protocols.insert(protocol_name!(S::NAME));\n        self.server = self.server.add_service(service);\n\n        self\n    }\n}\n\npub mod utils {\n    use std::future::IntoFuture;\n\n    use libp2p::{identity, PeerId};\n    use tokio::{sync::mpsc, sync::oneshot};\n    use tonic::server::NamedService;\n    use topos_core::api::grpc::GrpcClient;\n\n    use tracing::debug;\n\n    use crate::{command::Command, error::P2PError};\n\n    #[derive(Clone)]\n    pub struct GrpcOverP2P {\n        pub(crate) proxy_sender: mpsc::Sender<Command>,\n    }\n\n    impl GrpcOverP2P {\n        pub fn new(proxy_sender: mpsc::Sender<Command>) -> Self {\n            Self { proxy_sender }\n        }\n\n        pub async fn create<C, S>(&self, peer: PeerId) -> Result<C::Output, P2PError>\n        where\n            C: GrpcClient<Output = C>,\n            S: NamedService,\n        {\n            debug!(\"Creating new instance of GRPC client for P2P\");\n            let (sender, recv) = oneshot::channel();\n            let id = uuid::Uuid::new_v4();\n\n            let _ = self\n                .proxy_sender\n                .send(Command::NewProxiedQuery {\n                    protocol: S::NAME,\n                    peer,\n                    id,\n                    response: sender,\n                })\n                .await;\n\n            let connection = recv.await?;\n\n            let connected = connection.into_future().await?;\n\n            Ok(C::init(connected.channel))\n        }\n    }\n\n    /// build peer_id keys, generate for now - either from the seed or purely random one\n    pub fn local_key_pair(secret_key_seed: Option<u8>) -> identity::Keypair {\n        // todo: load from protobuf encoded|base64 encoded config.local_key_pair\n        match secret_key_seed {\n            Some(seed) => {\n                let mut bytes = [0u8; 32];\n                bytes[0] = seed;\n                identity::Keypair::ed25519_from_bytes(bytes).expect(\"Invalid keypair\")\n            }\n            None => identity::Keypair::generate_ed25519(),\n        }\n    }\n\n    pub fn local_key_pair_from_slice(slice: &[u8]) -> identity::Keypair {\n        // todo: load from protobuf encoded|base64 encoded config.local_key_pair\n        let mut bytes = [0u8; 32];\n        if slice.len() <= 32 {\n            bytes[..slice.len()].clone_from_slice(slice);\n        } else {\n            bytes.clone_from_slice(&slice[..32]);\n        }\n\n        identity::Keypair::ed25519_from_bytes(bytes).expect(\"Invalid keypair\")\n    }\n\n    pub fn keypair_from_protobuf_encoding(priv_key: &[u8]) -> identity::Keypair {\n        identity::Keypair::from_protobuf_encoding(priv_key).expect(\"Invalid keypair retrieval\")\n    }\n}\n\n#[test]\npub fn generate_from_secp256k1() {\n    // Key living in the AWS SM or FS at libp2p/libp2p.key\n    let edge_dec_privkey =\n        hex::decode(\"08021220eb5ce97bd3e7729ac4ab077b83881426cebf19e58a9d9760d1cedfc53d772d6c\")\n            .expect(\"Failed to hex decode\");\n\n    use std::str::FromStr;\n\n    let edge_peerid =\n        PeerId::from_str(\"16Uiu2HAkxA7KW9GC2T3tQg3zHvjrnDPqfQUKTfzU3wbts8AsV6kH\").unwrap();\n\n    let keypair = utils::keypair_from_protobuf_encoding(&edge_dec_privkey);\n\n    // Verify that we end up with the same PeerId\n    assert_eq!(keypair.public().to_peer_id(), edge_peerid);\n}\n"
  },
  {
    "path": "crates/topos-p2p/src/network.rs",
    "content": "use super::{Behaviour, Event, NetworkClient, Runtime};\nuse crate::{\n    behaviour::{\n        discovery::DiscoveryBehaviour, gossip, grpc, peer_info::PeerInfoBehaviour, HealthStatus,\n    },\n    config::{DiscoveryConfig, NetworkConfig},\n    constants::{\n        self, COMMAND_STREAM_BUFFER_SIZE, DISCOVERY_PROTOCOL, EVENT_STREAM_BUFFER,\n        PEER_INFO_PROTOCOL,\n    },\n    error::P2PError,\n    utils::GrpcOverP2P,\n    GrpcContext,\n};\nuse futures::Stream;\nuse libp2p::{\n    core::{transport::MemoryTransport, upgrade},\n    dns,\n    identity::Keypair,\n    kad::store::MemoryStore,\n    noise,\n    swarm::{self, ConnectionId},\n    tcp::Config,\n    Multiaddr, PeerId, Swarm, Transport,\n};\nuse std::{\n    borrow::Cow,\n    collections::{HashMap, HashSet},\n    time::Duration,\n};\nuse tokio::sync::{mpsc, oneshot};\nuse tokio_stream::wrappers::ReceiverStream;\nuse tracing::debug;\n\npub fn builder<'a>() -> NetworkBuilder<'a> {\n    NetworkBuilder::default()\n}\n\nconst TWO_HOURS: Duration = Duration::from_secs(60 * 60 * 2);\n\n#[derive(Default)]\npub struct NetworkBuilder<'a> {\n    discovery_protocol: Option<&'static str>,\n    peer_key: Option<Keypair>,\n    listen_addresses: Option<Vec<Multiaddr>>,\n    public_addresses: Option<Vec<Multiaddr>>,\n    store: Option<MemoryStore>,\n    known_peers: &'a [(PeerId, Multiaddr)],\n    local_port: Option<u8>,\n    config: NetworkConfig,\n    grpc_context: GrpcContext,\n    memory_transport: bool,\n}\n\nimpl<'a> NetworkBuilder<'a> {\n    #[cfg(test)]\n    pub(crate) fn memory(mut self) -> Self {\n        self.memory_transport = true;\n\n        self\n    }\n    pub fn grpc_context(mut self, grpc_context: GrpcContext) -> Self {\n        self.grpc_context = grpc_context;\n\n        self\n    }\n\n    pub fn discovery_config(mut self, config: DiscoveryConfig) -> Self {\n        self.config.discovery = config;\n\n        self\n    }\n\n    pub fn minimum_cluster_size(mut self, size: usize) -> Self {\n        self.config.minimum_cluster_size = size;\n\n        self\n    }\n\n    pub fn peer_key(mut self, peer_key: Keypair) -> Self {\n        self.peer_key = Some(peer_key);\n\n        self\n    }\n\n    pub fn public_addresses<M: Into<Vec<Multiaddr>>>(mut self, addresses: M) -> Self {\n        self.public_addresses = Some(addresses.into());\n\n        self\n    }\n\n    pub fn listen_addresses<M: Into<Vec<Multiaddr>>>(mut self, addresses: M) -> Self {\n        self.listen_addresses = Some(addresses.into());\n\n        self\n    }\n\n    #[doc(hidden)]\n    pub fn allow_private_ip(mut self, allow_private_ip: bool) -> Self {\n        self.config.allow_private_ip = allow_private_ip;\n\n        self\n    }\n\n    pub fn store(mut self, store: MemoryStore) -> Self {\n        self.store = Some(store);\n\n        self\n    }\n\n    pub fn known_peers(mut self, known_peers: &'a [(PeerId, Multiaddr)]) -> Self {\n        self.known_peers = known_peers;\n\n        self\n    }\n\n    pub fn local_port(mut self, port: u8) -> Self {\n        self.local_port = Some(port);\n\n        self\n    }\n\n    pub fn discovery_protocol(mut self, protocol: &'static str) -> Self {\n        self.discovery_protocol = Some(protocol);\n\n        self\n    }\n\n    pub async fn build(\n        mut self,\n    ) -> Result<(NetworkClient, impl Stream<Item = Event>, Runtime), P2PError> {\n        let peer_key = self.peer_key.ok_or(P2PError::MissingPeerKey)?;\n        let peer_id = peer_key.public().to_peer_id();\n\n        let (command_sender, command_receiver) = mpsc::channel(*COMMAND_STREAM_BUFFER_SIZE);\n        let (event_sender, event_receiver) = mpsc::channel(*EVENT_STREAM_BUFFER);\n\n        let gossipsub = gossip::Behaviour::new(peer_key.clone()).await;\n\n        let grpc = grpc::Behaviour::new(self.grpc_context);\n\n        debug!(\"Known peers: {:?}\", self.known_peers);\n        let behaviour = Behaviour {\n            gossipsub,\n            peer_info: PeerInfoBehaviour::new(PEER_INFO_PROTOCOL, &peer_key),\n            discovery: DiscoveryBehaviour::create(\n                &self.config.discovery,\n                peer_key.clone(),\n                Cow::Borrowed(\n                    self.discovery_protocol\n                        .unwrap_or(DISCOVERY_PROTOCOL)\n                        .as_bytes(),\n                ),\n                self.known_peers,\n                false,\n            ),\n            grpc,\n        };\n\n        let multiplex_config = libp2p::yamux::Config::default();\n\n        let transport = if self.memory_transport {\n            MemoryTransport::new()\n                .upgrade(upgrade::Version::V1)\n                .authenticate(noise::Config::new(&peer_key)?)\n                .multiplex(multiplex_config)\n                .timeout(TWO_HOURS)\n                .boxed()\n        } else {\n            let tcp = libp2p::tcp::tokio::Transport::new(Config::default().nodelay(true));\n            let dns_tcp = dns::tokio::Transport::system(tcp).unwrap();\n\n            let tcp = libp2p::tcp::tokio::Transport::new(Config::default().nodelay(true));\n            dns_tcp\n                .or_transport(tcp)\n                .upgrade(upgrade::Version::V1)\n                .authenticate(noise::Config::new(&peer_key)?)\n                .multiplex(multiplex_config)\n                .timeout(TWO_HOURS)\n                .boxed()\n        };\n\n        let swarm = Swarm::new(\n            transport,\n            behaviour,\n            peer_id,\n            swarm::Config::with_tokio_executor()\n                .with_idle_connection_timeout(constants::IDLE_CONNECTION_TIMEOUT),\n        );\n\n        let (shutdown_channel, shutdown) = mpsc::channel::<oneshot::Sender<()>>(1);\n\n        let grpc_over_p2p = GrpcOverP2P::new(command_sender.clone());\n\n        let listen_addr = self\n            .listen_addresses\n            .take()\n            .expect(\"Node requires at least one address to listen for incoming connections\");\n\n        let public_addresses = self\n            .public_addresses\n            .map(|addresses| {\n                if addresses.is_empty() {\n                    listen_addr.clone()\n                } else {\n                    addresses\n                }\n            })\n            .unwrap_or(listen_addr.clone());\n\n        Ok((\n            NetworkClient {\n                retry_ttl: self.config.client_retry_ttl,\n                local_peer_id: peer_id,\n                sender: command_sender,\n                grpc_over_p2p,\n                shutdown_channel,\n            },\n            ReceiverStream::new(event_receiver),\n            Runtime {\n                swarm,\n                config: self.config,\n                peer_set: self.known_peers.iter().map(|(p, _)| *p).collect(),\n                boot_peers: self.known_peers.iter().map(|(p, _)| *p).collect(),\n                command_receiver,\n                event_sender,\n                local_peer_id: peer_id,\n                listening_on: listen_addr,\n                public_addresses,\n                active_listeners: HashSet::new(),\n                pending_record_requests: HashMap::new(),\n                shutdown,\n                health_state: crate::runtime::HealthState {\n                    bootnode_connection_retries: 3,\n                    successfully_connected_to_bootnode: if self.known_peers.is_empty() {\n                        // Node seems to be a boot node\n                        Some(ConnectionId::new_unchecked(0))\n                    } else {\n                        None\n                    },\n                    ..Default::default()\n                },\n                health_status: HealthStatus::Initializing,\n            },\n        ))\n    }\n}\n"
  },
  {
    "path": "crates/topos-p2p/src/runtime/handle_command.rs",
    "content": "use crate::{\n    error::{CommandExecutionError, P2PError},\n    protocol_name, Command, Runtime,\n};\n\nuse rand::{thread_rng, Rng};\nuse topos_metrics::P2P_MESSAGE_SENT_ON_GOSSIPSUB_TOTAL;\nuse tracing::{debug, error, warn};\n\nimpl Runtime {\n    pub(crate) async fn handle_command(&mut self, command: Command) {\n        match command {\n            Command::NewProxiedQuery {\n                peer,\n                id,\n                response,\n                protocol,\n            } => {\n                let connection = self\n                    .swarm\n                    .behaviour_mut()\n                    .grpc\n                    .open_outbound_connection(&peer, protocol_name!(protocol));\n\n                _ = response.send(connection);\n            }\n\n            Command::ConnectedPeers { sender } => {\n                if sender\n                    .send(Ok(self\n                        .swarm\n                        .connected_peers()\n                        .cloned()\n                        .collect::<Vec<_>>()))\n                    .is_err()\n                {\n                    warn!(\"Unable to notify ConnectedPeers response: initiator is dropped\");\n                }\n            }\n            Command::RandomKnownPeer { sender } => {\n                if self.peer_set.is_empty() {\n                    let _ = sender.send(Err(P2PError::CommandError(\n                        CommandExecutionError::NoKnownPeer,\n                    )));\n\n                    return;\n                }\n\n                let selected_peer: usize = thread_rng().gen_range(0..(self.peer_set.len()));\n                if sender\n                    .send(\n                        self.peer_set\n                            .iter()\n                            .nth(selected_peer)\n                            .cloned()\n                            .ok_or(P2PError::CommandError(CommandExecutionError::NoKnownPeer)),\n                    )\n                    .is_err()\n                {\n                    warn!(\"Unable to notify RandomKnownPeer response: initiator is dropped\");\n                }\n            }\n\n            Command::Gossip {\n                topic,\n                data: message,\n            } => match self.swarm.behaviour_mut().gossipsub.publish(topic, message) {\n                Ok(message_id) => {\n                    debug!(\"Published message to {topic}\");\n                    P2P_MESSAGE_SENT_ON_GOSSIPSUB_TOTAL.inc();\n                }\n                Err(err) => error!(\"Failed to publish message to {topic}: {err}\"),\n            },\n        }\n    }\n}\n"
  },
  {
    "path": "crates/topos-p2p/src/runtime/handle_event/discovery.rs",
    "content": "use libp2p::kad::{BootstrapOk, BootstrapResult, Event, QueryResult};\nuse tracing::{debug, error, info, warn};\n\nuse crate::{behaviour::HealthStatus, error::P2PError, Runtime};\n\nuse super::{EventHandler, EventResult};\n\n#[async_trait::async_trait]\nimpl EventHandler<Box<Event>> for Runtime {\n    async fn handle(&mut self, event: Box<Event>) -> EventResult {\n        match *event {\n            Event::InboundRequest { request } => {\n                // warn!(\"InboundRequest {:?}\", request);\n            }\n\n            Event::RoutingUpdated {\n                peer, addresses, ..\n            } => {\n                debug!(\"DHT -> RoutingUpdated {:?} {:?}\", peer, addresses);\n            }\n\n            Event::RoutablePeer { peer, address } => {\n                debug!(\"DHT -> RoutablePeer {:?}, {:?}\", peer, address);\n            }\n\n            Event::PendingRoutablePeer { peer, address } => {\n                debug!(\"DHT -> PendingRoutablePeer {:?}, {:?}\", peer, address);\n            }\n\n            Event::UnroutablePeer { peer } => {\n                // Ignored\n            }\n\n            Event::OutboundQueryProgressed {\n                id,\n                result:\n                    QueryResult::Bootstrap(BootstrapResult::Ok(BootstrapOk {\n                        peer,\n                        num_remaining,\n                    })),\n                stats,\n                step,\n            } if num_remaining == 0\n                && self.swarm.behaviour().discovery.health_status == HealthStatus::Initializing =>\n            {\n                if self\n                    .health_state\n                    .successfully_connected_to_bootnode\n                    .is_none()\n                {\n                    warn!(\n                        \"Bootstrap query finished but unable to connect to bootnode during \\\n                         initialization, switching from discovery(initializing) -> \\\n                         discover(unhealthy) and fast bootstrap mode\",\n                    );\n\n                    let behaviour = self.swarm.behaviour_mut();\n\n                    behaviour.discovery.health_status = HealthStatus::Unhealthy;\n                    _ = behaviour\n                        .discovery\n                        .change_interval(self.config.discovery.fast_bootstrap_interval)\n                        .await;\n                } else {\n                    warn!(\n                        \"Bootstrap query finished with bootnode, switching from \\\n                         discovery(initializing) -> discovery(healthy)\",\n                    );\n\n                    let behaviour = self.swarm.behaviour_mut();\n\n                    behaviour.discovery.health_status = HealthStatus::Healthy;\n                }\n            }\n\n            Event::OutboundQueryProgressed {\n                id,\n                result:\n                    QueryResult::Bootstrap(BootstrapResult::Ok(BootstrapOk {\n                        peer,\n                        num_remaining,\n                    })),\n                stats,\n                step,\n            } if num_remaining == 0\n                && self\n                    .health_state\n                    .successfully_connected_to_bootnode\n                    .is_none()\n                && self.swarm.behaviour().discovery.health_status == HealthStatus::Unhealthy =>\n            {\n                match self.health_state.bootnode_connection_retries.checked_sub(1) {\n                    None => {\n                        error!(\n                            \"Bootstrap query finished but unable to connect to bootnode, stopping\"\n                        );\n\n                        return Err(P2PError::UnableToReachBootnode);\n                    }\n                    Some(new) => {\n                        warn!(\n                            \"Bootstrap query finished but unable to connect to bootnode, retrying \\\n                             {} more times\",\n                            new\n                        );\n                        self.health_state.bootnode_connection_retries = new;\n                    }\n                }\n            }\n            Event::OutboundQueryProgressed {\n                id,\n                result:\n                    QueryResult::Bootstrap(BootstrapResult::Ok(BootstrapOk {\n                        peer,\n                        num_remaining,\n                    })),\n                stats,\n                step,\n            } if num_remaining == 0\n                && self\n                    .health_state\n                    .successfully_connected_to_bootnode\n                    .is_some()\n                && self.swarm.behaviour().discovery.health_status == HealthStatus::Unhealthy =>\n            {\n                info!(\n                    \"Bootstrap query finished with bootnode, switching discover(unhealthy) -> \\\n                     discover(healthy) and normal bootstrap mode\",\n                );\n\n                let behaviour = self.swarm.behaviour_mut();\n                behaviour.discovery.health_status = HealthStatus::Healthy;\n                _ = behaviour\n                    .discovery\n                    .change_interval(self.config.discovery.bootstrap_interval)\n                    .await;\n            }\n\n            Event::OutboundQueryProgressed {\n                result: QueryResult::Bootstrap(res),\n                id,\n                ..\n            } => {\n                debug!(\"BootstrapResult query: {id:?},  {res:?}\");\n            }\n\n            Event::OutboundQueryProgressed {\n                id, result, stats, ..\n            } => {}\n            Event::ModeChanged { new_mode } => {}\n        }\n\n        Ok(())\n    }\n}\n"
  },
  {
    "path": "crates/topos-p2p/src/runtime/handle_event/gossipsub.rs",
    "content": "use topos_metrics::{\n    P2P_EVENT_STREAM_CAPACITY_TOTAL, P2P_MESSAGE_DESERIALIZE_FAILURE_TOTAL,\n    P2P_MESSAGE_RECEIVED_ON_ECHO_TOTAL, P2P_MESSAGE_RECEIVED_ON_GOSSIP_TOTAL,\n    P2P_MESSAGE_RECEIVED_ON_READY_TOTAL,\n};\nuse tracing::{debug, error};\n\nuse crate::{constants, event::GossipEvent, Event, Runtime, TOPOS_ECHO, TOPOS_GOSSIP, TOPOS_READY};\nuse prost::Message;\nuse topos_core::api::grpc::tce::v1::Batch;\n\nuse super::{EventHandler, EventResult};\n\n#[async_trait::async_trait]\nimpl EventHandler<GossipEvent> for Runtime {\n    async fn handle(&mut self, event: GossipEvent) -> EventResult {\n        if let GossipEvent::Message {\n            source: Some(source),\n            message,\n            topic,\n        } = event\n        {\n            if self.event_sender.capacity() < *constants::CAPACITY_EVENT_STREAM_BUFFER {\n                P2P_EVENT_STREAM_CAPACITY_TOTAL.inc();\n            }\n\n            debug!(\"Received message from {:?} on topic {:?}\", source, topic);\n            match topic {\n                TOPOS_GOSSIP => {\n                    P2P_MESSAGE_RECEIVED_ON_GOSSIP_TOTAL.inc();\n\n                    if let Err(e) = self\n                        .event_sender\n                        .send(Event::Gossip {\n                            from: source,\n                            data: message,\n                        })\n                        .await\n                    {\n                        error!(\"Failed to send gossip event to runtime: {:?}\", e);\n                    }\n                }\n                TOPOS_ECHO | TOPOS_READY => {\n                    if topic == TOPOS_ECHO {\n                        P2P_MESSAGE_RECEIVED_ON_ECHO_TOTAL.inc();\n                    } else {\n                        P2P_MESSAGE_RECEIVED_ON_READY_TOTAL.inc();\n                    }\n                    if let Ok(Batch { messages }) = Batch::decode(&message[..]) {\n                        for message in messages {\n                            if let Err(e) = self\n                                .event_sender\n                                .send(Event::Gossip {\n                                    from: source,\n                                    data: message,\n                                })\n                                .await\n                            {\n                                error!(\"Failed to send gossip {} event to runtime: {:?}\", topic, e);\n                            }\n                        }\n                    } else {\n                        P2P_MESSAGE_DESERIALIZE_FAILURE_TOTAL\n                            .with_label_values(&[topic])\n                            .inc();\n                    }\n                }\n                _ => {\n                    error!(\"Received message on unknown topic {:?}\", topic);\n                }\n            }\n        }\n\n        Ok(())\n    }\n}\n"
  },
  {
    "path": "crates/topos-p2p/src/runtime/handle_event/grpc.rs",
    "content": "use tracing::debug;\n\nuse crate::{behaviour::grpc, Runtime};\n\nuse super::{EventHandler, EventResult};\n\n#[async_trait::async_trait]\nimpl EventHandler<grpc::Event> for Runtime {\n    async fn handle(&mut self, event: grpc::Event) -> EventResult {\n        match event {\n            grpc::Event::OutboundFailure {\n                peer_id,\n                request_id,\n                error,\n            } => {\n                debug!(\n                    \"Outbound connection failure to peer {} for request {}: {}\",\n                    peer_id, request_id, error\n                );\n            }\n            grpc::Event::OutboundSuccess {\n                peer_id,\n                request_id,\n                ..\n            } => {\n                debug!(\n                    \"Outbound connection success to peer {} for request {}\",\n                    peer_id, request_id\n                );\n            }\n            grpc::Event::InboundNegotiatedConnection {\n                request_id,\n                connection_id,\n            } => {\n                debug!(\n                    \"Inbound connection negotiated for request {} with connection {}\",\n                    request_id, connection_id\n                );\n            }\n            grpc::Event::OutboundNegotiatedConnection {\n                peer_id,\n                request_id,\n            } => {\n                debug!(\n                    \"Outbound connection negotiated to peer {} for request {}\",\n                    peer_id, request_id\n                );\n            }\n        }\n        Ok(())\n    }\n}\n"
  },
  {
    "path": "crates/topos-p2p/src/runtime/handle_event/peer_info.rs",
    "content": "use ip_network::IpNetwork;\nuse libp2p::{\n    identify::{Event as IdentifyEvent, Info as IdentifyInfo},\n    multiaddr::Protocol,\n    Multiaddr,\n};\nuse tracing::info;\n\nuse crate::{constants::PEER_INFO_PROTOCOL, Runtime};\n\nuse super::{EventHandler, EventResult};\n\n#[async_trait::async_trait]\nimpl EventHandler<Box<IdentifyEvent>> for Runtime {\n    async fn handle(&mut self, event: Box<IdentifyEvent>) -> EventResult {\n        if let IdentifyEvent::Received { peer_id, info, .. } = *event {\n            let IdentifyInfo {\n                protocol_version,\n                listen_addrs,\n                protocols,\n                observed_addr,\n                ..\n            } = info;\n\n            if !self.peer_set.contains(&peer_id)\n                && protocol_version.as_bytes() == PEER_INFO_PROTOCOL.as_bytes()\n            {\n                self.peer_set.insert(peer_id);\n                for addr in listen_addrs {\n                    if self.config.allow_private_ip || is_global_addr(&addr) {\n                        info!(\n                            \"Adding self-reported address {} from {} to Kademlia DHT.\",\n                            addr, peer_id\n                        );\n                        self.swarm\n                            .behaviour_mut()\n                            .discovery\n                            .inner\n                            .add_address(&peer_id, addr);\n                    }\n                }\n            }\n        }\n\n        Ok(())\n    }\n}\n\npub fn is_global_addr(addr: &Multiaddr) -> bool {\n    match addr.iter().next() {\n        Some(Protocol::Dns(_)) | Some(Protocol::Dns4(_)) | Some(Protocol::Dns6(_)) => true,\n        Some(Protocol::Ip4(ip)) => IpNetwork::from(ip).is_global(),\n        Some(Protocol::Ip6(ip)) => IpNetwork::from(ip).is_global(),\n        _ => false,\n    }\n}\n"
  },
  {
    "path": "crates/topos-p2p/src/runtime/handle_event.rs",
    "content": "use libp2p::{core::Endpoint, multiaddr::Protocol, swarm::SwarmEvent};\nuse tracing::{debug, error, info, warn};\n\nuse crate::{error::P2PError, event::ComposedEvent, Event, Runtime};\n\nmod discovery;\nmod gossipsub;\nmod grpc;\nmod peer_info;\n\npub type EventResult = Result<(), P2PError>;\n\n#[async_trait::async_trait]\npub(crate) trait EventHandler<T> {\n    async fn handle(&mut self, event: T) -> EventResult;\n}\n\n#[async_trait::async_trait]\nimpl EventHandler<Event> for Runtime {\n    async fn handle(&mut self, event: Event) -> EventResult {\n        if let Err(error) = self.event_sender.try_send(event) {\n            warn!(reason = %error, \"Unable to send NetworkEvent event to outer stream\");\n        }\n\n        Ok(())\n    }\n}\n\n#[async_trait::async_trait]\nimpl EventHandler<ComposedEvent> for Runtime {\n    async fn handle(&mut self, event: ComposedEvent) -> EventResult {\n        match event {\n            ComposedEvent::Kademlia(event) => self.handle(event).await,\n            ComposedEvent::PeerInfo(event) => self.handle(event).await,\n            ComposedEvent::Gossipsub(event) => self.handle(event).await,\n            ComposedEvent::Grpc(event) => self.handle(event).await,\n            ComposedEvent::Void => Ok(()),\n        }\n    }\n}\n\n#[async_trait::async_trait]\nimpl EventHandler<SwarmEvent<ComposedEvent>> for Runtime {\n    async fn handle(&mut self, event: SwarmEvent<ComposedEvent>) -> EventResult {\n        match event {\n            SwarmEvent::NewListenAddr {\n                listener_id,\n                address,\n                ..\n            } => {\n                info!(\n                    \"Local node is listening on {:?}\",\n                    address.with(Protocol::P2p(self.local_peer_id)),\n                );\n\n                self.active_listeners.insert(listener_id);\n            }\n\n            SwarmEvent::OutgoingConnectionError {\n                connection_id,\n                peer_id: Some(peer_id),\n                error,\n            } if self\n                .health_state\n                .successfully_connected_to_bootnode\n                .is_none()\n                && self.health_state.dialed_bootnode.contains(&connection_id) =>\n            {\n                warn!(\"Unable to connect to bootnode {peer_id}: {error:?}\");\n                self.health_state.dialed_bootnode.remove(&connection_id);\n                if self.health_state.dialed_bootnode.is_empty() {\n                    // We tried to connect to all bootnode without success\n                    error!(\"Unable to connect to any bootnode\");\n                }\n            }\n\n            SwarmEvent::OutgoingConnectionError {\n                peer_id,\n                error,\n                connection_id,\n            } => {\n                if let Some(peer_id) = peer_id {\n                    error!(\n                        \"OutgoingConnectionError peer_id: {peer_id} | error: {error:?} | \\\n                         connection_id: {connection_id}\"\n                    );\n                } else {\n                    error!(\n                        \"OutgoingConnectionError for unknown peer | error: {error:?} | \\\n                         connection_id: {connection_id}\"\n                    );\n                    error!(\"OutgoingConnectionError {error:?}\");\n                }\n            }\n\n            SwarmEvent::ConnectionEstablished {\n                peer_id,\n                connection_id,\n                endpoint,\n                num_established,\n                concurrent_dial_errors,\n                established_in,\n            } if self.health_state.dialed_bootnode.contains(&connection_id) => {\n                info!(\"Successfully connected to bootnode {peer_id}\");\n                if self\n                    .health_state\n                    .successfully_connected_to_bootnode\n                    .is_none()\n                {\n                    self.health_state.successfully_connected_to_bootnode = Some(connection_id);\n                    _ = self.health_state.dialed_bootnode.remove(&connection_id);\n                }\n            }\n\n            SwarmEvent::ConnectionEstablished {\n                peer_id,\n                endpoint,\n                connection_id,\n                ..\n            } => {\n                if self\n                    .health_state\n                    .successfully_connected_to_bootnode\n                    .is_none()\n                    && self.boot_peers.contains(&peer_id)\n                {\n                    info!(\n                        \"Connection established with bootnode {peer_id} as {:?}\",\n                        endpoint.to_endpoint()\n                    );\n\n                    if endpoint.to_endpoint() == Endpoint::Listener {\n                        if let Err(error) = self.swarm.dial(peer_id) {\n                            error!(\n                                \"Unable to dial bootnode {peer_id} after incoming connection: \\\n                                 {error}\"\n                            );\n                        }\n                    }\n                } else {\n                    info!(\n                        \"Connection established with peer {peer_id} as {:?}\",\n                        endpoint.to_endpoint()\n                    );\n                }\n\n                if self.swarm.connected_peers().count() >= self.config.minimum_cluster_size {\n                    if let Err(error) = self.swarm.behaviour_mut().gossipsub.subscribe() {\n                        error!(\"Unable to subscribe to gossipsub topic: {}\", error);\n\n                        return Err(P2PError::GossipTopicSubscriptionFailure);\n                    }\n                }\n            }\n\n            incoming_connection_error @ SwarmEvent::IncomingConnectionError { .. } => {\n                error!(\"{:?}\", incoming_connection_error);\n            }\n\n            SwarmEvent::IncomingConnection {\n                local_addr,\n                connection_id,\n                send_back_addr,\n            } => {\n                debug!(\n                    \"IncomingConnection | local_addr: {local_addr} | connection_id: \\\n                     {connection_id} | send_back_addr: {send_back_addr}\"\n                )\n            }\n\n            SwarmEvent::ListenerClosed {\n                listener_id,\n                addresses,\n                reason,\n            } => {\n                debug!(\n                    \"ListenerClosed {:?}: listener_id{listener_id:?} | addresses: {addresses:?} | \\\n                     reason: {reason:?}\",\n                    *self.swarm.local_peer_id()\n                );\n            }\n\n            SwarmEvent::ConnectionClosed { peer_id, cause, .. } => {\n                debug!(\"ConnectionClosed {peer_id} because of {cause:?}\");\n            }\n\n            SwarmEvent::Dialing {\n                peer_id: Some(ref peer_id),\n                connection_id,\n            } if self.boot_peers.contains(peer_id) => {\n                info!(\"Dialing bootnode {peer_id} on connection: {connection_id}\");\n                self.health_state.dialed_bootnode.insert(connection_id);\n            }\n\n            SwarmEvent::Dialing {\n                peer_id,\n                connection_id,\n            } => {\n                debug!(\"Dialing peer_id: {peer_id:?} | connection_id: {connection_id}\");\n            }\n\n            SwarmEvent::Behaviour(event) => self.handle(event).await?,\n\n            SwarmEvent::ExpiredListenAddr {\n                listener_id,\n                address,\n            } => error!(\"Unhandled ExpiredListenAddr {listener_id:?} | {address}\"),\n\n            SwarmEvent::ListenerError { listener_id, error } => {\n                error!(\"Unhandled ListenerError {listener_id:?} | {error}\")\n            }\n\n            event => {\n                warn!(\"Unhandled SwarmEvent: {:?}\", event);\n            }\n        }\n\n        let behaviour = self.swarm.behaviour();\n\n        if let Some(event) = self.healthy_status_changed() {\n            debug!(\"Healthy status changed: {:?}\", event);\n            _ = self.event_sender.send(event).await;\n        }\n\n        info!(\"Healthystatus: {:?}\", self.health_status);\n        Ok(())\n    }\n}\n"
  },
  {
    "path": "crates/topos-p2p/src/runtime/mod.rs",
    "content": "use std::collections::{HashMap, HashSet};\n\nuse crate::{\n    behaviour::{discovery::PendingRecordRequest, HealthStatus},\n    config::NetworkConfig,\n    error::P2PError,\n    runtime::handle_event::EventHandler,\n    Behaviour, Command, Event,\n};\nuse libp2p::{\n    core::transport::ListenerId, kad::QueryId, swarm::ConnectionId, Multiaddr, PeerId, Swarm,\n};\nuse tokio::{\n    spawn,\n    sync::{mpsc, oneshot},\n    task::JoinHandle,\n};\nuse tokio_stream::{Stream, StreamExt};\nuse tracing::{debug, error, info, Instrument};\n\npub struct Runtime {\n    pub(crate) config: NetworkConfig,\n    // TODO: check if needed\n    pub(crate) peer_set: HashSet<PeerId>,\n    pub(crate) swarm: Swarm<Behaviour>,\n    pub(crate) command_receiver: mpsc::Receiver<Command>,\n    pub(crate) event_sender: mpsc::Sender<Event>,\n    pub(crate) local_peer_id: PeerId,\n    pub(crate) listening_on: Vec<Multiaddr>,\n    pub(crate) public_addresses: Vec<Multiaddr>,\n\n    /// Well-known or pre-configured bootnodes to connect to in order to bootstrap the p2p layer\n    pub(crate) boot_peers: Vec<PeerId>,\n\n    /// Contains current listenerId of the swarm\n    pub active_listeners: HashSet<ListenerId>,\n\n    /// Pending DHT queries\n    pub pending_record_requests: HashMap<QueryId, PendingRecordRequest>,\n\n    /// Shutdown signal receiver from the client\n    pub(crate) shutdown: mpsc::Receiver<oneshot::Sender<()>>,\n\n    /// Internal health state of the p2p layer\n    pub(crate) health_state: HealthState,\n\n    /// Health status of the p2p layer\n    pub(crate) health_status: HealthStatus,\n}\n\nmod handle_command;\nmod handle_event;\n\n/// Internal health state of the p2p layer\n///\n/// This struct may change in the future to be more flexible and to handle more\n/// complex state transitions/representation.\n#[derive(Default)]\npub(crate) struct HealthState {\n    /// Indicates if the node has external addresses configured\n    pub(crate) has_external_addresses: bool,\n    /// Indicates if the node is listening on any address\n    pub(crate) is_listening: bool,\n    /// List the bootnodes that the node has tried to connect to\n    pub(crate) dialed_bootnode: HashSet<ConnectionId>,\n    /// Indicates if the node has successfully connected to a bootnode\n    pub(crate) successfully_connected_to_bootnode: Option<ConnectionId>,\n    /// Track the number of remaining retries to connect to any bootnode\n    pub(crate) bootnode_connection_retries: usize,\n}\n\nimpl Runtime {\n    /// Bootstrap the p2p layer runtime with the given configuration.\n    /// This method will configure, launch and start queries.\n    /// The result of this call is a p2p layer bootstrap but it doesn't mean it is\n    /// ready.\n    pub async fn bootstrap<S: Stream<Item = Event> + Unpin + Send>(\n        mut self,\n        event_stream: &mut S,\n    ) -> Result<JoinHandle<Result<(), P2PError>>, P2PError> {\n        debug!(\"Added public addresses: {:?}\", self.public_addresses);\n        for address in &self.public_addresses {\n            self.swarm.add_external_address(address.clone());\n            self.health_state.has_external_addresses = true;\n        }\n        debug!(\"Starting to listen on {:?}\", self.listening_on);\n\n        for addr in &self.listening_on {\n            if let Err(error) = self.swarm.listen_on(addr.clone()) {\n                error!(\"Couldn't start listening on {} because of {error:?}\", addr);\n\n                return Err(P2PError::TransportError(error));\n            }\n\n            self.health_state.is_listening = true;\n        }\n\n        let mut handle = spawn(self.run().in_current_span());\n\n        // Await the Event::Healthy coming from freshly started p2p layer\n        loop {\n            tokio::select! {\n                result = &mut handle => {\n                    match result {\n                        Ok(Ok(_)) => info!(\"P2P layer has been shutdown\"),\n                        Ok(Err(error)) => {\n                            error!(\"P2P layer has failed with error: {:?}\", error);\n\n                            return Err(error);\n                        }\n                        Err(_) => {\n                            error!(\"P2P layer has failed in an unexpected way.\");\n                            return Err(P2PError::JoinHandleFailure);\n                        }\n                    }\n                }\n                Some(event) = event_stream.next() => {\n                    if let Event::Healthy = event {\n                        info!(\"P2P layer is healthy\");\n                        break;\n                    }\n                }\n            }\n        }\n\n        Ok(handle)\n    }\n\n    /// Run p2p runtime\n    pub async fn run(mut self) -> Result<(), P2PError> {\n        let shutdowned: Option<oneshot::Sender<()>> = loop {\n            tokio::select! {\n                Some(event) = self.swarm.next() => {\n                    self.handle(event).in_current_span().await?\n                },\n                Some(command) = self.command_receiver.recv() => self.handle_command(command).in_current_span().await,\n                shutdown = self.shutdown.recv() => {\n                    break shutdown;\n                }\n            }\n        };\n\n        if let Some(sender) = shutdowned {\n            info!(\"Shutting down p2p runtime...\");\n            _ = sender.send(());\n        }\n\n        Ok(())\n    }\n\n    pub(crate) fn healthy_status_changed(&mut self) -> Option<Event> {\n        let behaviours = self.swarm.behaviour();\n        let gossipsub = &behaviours.gossipsub.health_status;\n        let discovery = &behaviours.discovery.health_status;\n\n        let new_status = match (discovery, gossipsub) {\n            (HealthStatus::Killing, _) | (_, HealthStatus::Killing) => HealthStatus::Killing,\n            (HealthStatus::Initializing, _) | (_, HealthStatus::Initializing) => {\n                HealthStatus::Initializing\n            }\n            (HealthStatus::Unhealthy, _) | (_, HealthStatus::Unhealthy) => HealthStatus::Unhealthy,\n            (HealthStatus::Recovering, _) | (_, HealthStatus::Recovering) => {\n                HealthStatus::Recovering\n            }\n            (HealthStatus::Healthy, HealthStatus::Healthy) => HealthStatus::Healthy,\n        };\n\n        if self.health_status != new_status {\n            self.health_status = new_status;\n\n            Some((&self.health_status).into())\n        } else {\n            None\n        }\n    }\n}\n"
  },
  {
    "path": "crates/topos-p2p/src/tests/behaviour/grpc.rs",
    "content": "use std::{collections::HashSet, future::IntoFuture, time::Duration};\n\nuse libp2p::Swarm;\nuse libp2p_swarm_test::SwarmExt;\nuse rstest::rstest;\nuse test_log::test;\nuse tokio::spawn;\nuse tokio_util::sync::CancellationToken;\nuse tonic::server::NamedService;\nuse tonic::transport::Server;\nuse topos_test_sdk::grpc::{\n    behaviour::{\n        helloworld::{\n            greeter_client::GreeterClient, greeter_server::GreeterServer, HelloRequest,\n            HelloWithDelayRequest,\n        },\n        noop::noop_server::NoopServer,\n    },\n    implementations::{self, DummyServer},\n};\n\nuse crate::{\n    behaviour::grpc::{\n        self,\n        error::{OutboundConnectionError, OutboundError},\n    },\n    protocol_name, GrpcContext, GrpcRouter,\n};\n\n#[rstest]\n#[test(tokio::test)]\n#[timeout(Duration::from_secs(5))]\nasync fn instantiate_grpc() {\n    let dummy = DummyServer {};\n    let router = GrpcContext::default()\n        .with_router(GrpcRouter::new(Server::builder()).add_service(GreeterServer::new(dummy)));\n\n    let client_protocols = {\n        let mut protocols = HashSet::new();\n\n        protocols.insert(protocol_name!(GreeterServer::<DummyServer>::NAME));\n\n        protocols\n    };\n    let mut client_swarm = Swarm::new_ephemeral(|_| grpc::Behaviour::new(GrpcContext::default()));\n    let mut server_swarm = Swarm::new_ephemeral(|_| grpc::Behaviour::new(router));\n\n    let server_peer_id = *server_swarm.local_peer_id();\n\n    server_swarm.listen().await;\n\n    let server_address = server_swarm.listeners().next().unwrap();\n\n    client_swarm\n        .behaviour_mut()\n        .add_address(&server_peer_id, server_address.clone());\n\n    let outbound_connection = client_swarm.behaviour_mut().open_outbound_connection(\n        &server_peer_id,\n        protocol_name!(GreeterServer::<DummyServer>::NAME),\n    );\n\n    let shutdown = CancellationToken::new();\n    let client_shutdown = shutdown.child_token();\n    let server_shutdown = shutdown.child_token();\n    let client_swarm = async move {\n        loop {\n            tokio::select! {\n                event = client_swarm.next_swarm_event() => {}\n                _ = client_shutdown.cancelled() => { return client_swarm; }\n            }\n        }\n    };\n\n    let server_swarm = async move {\n        loop {\n            tokio::select! {\n                _ = server_swarm.next_swarm_event() => {}\n                _ = server_shutdown.cancelled() => { return server_swarm; }\n            }\n        }\n    };\n\n    let server_swarm = spawn(server_swarm);\n    let client_swarm = spawn(client_swarm);\n    println!(\"Starting\");\n    let connection = outbound_connection.into_future().await.unwrap();\n    println!(\"Stopping\");\n\n    shutdown.cancel();\n\n    let server_swarm = server_swarm.await.unwrap();\n    let client_swarm = client_swarm.await.unwrap();\n\n    assert_eq!(\n        server_swarm.connected_peers().collect::<Vec<_>>(),\n        vec![client_swarm.local_peer_id()]\n    );\n}\n\n#[test(tokio::test)]\nasync fn opening_outbound_stream() {}\n\n#[test(tokio::test)]\nasync fn opening_outbound_stream_half_close() {}\n\n#[test(tokio::test)]\n#[ignore = \"TP-757: Need to find a way to properly close the connection after sending the query\"]\nasync fn closing_stream() {\n    let dummy = DummyServer {};\n\n    let router = GrpcContext::default()\n        .with_router(GrpcRouter::new(Server::builder()).add_service(GreeterServer::new(dummy)));\n    let client_protocols = {\n        let mut protocols = HashSet::new();\n\n        protocols.insert(protocol_name!(GreeterServer::<DummyServer>::NAME));\n\n        protocols\n    };\n    let mut client_swarm = Swarm::new_ephemeral(|_| grpc::Behaviour::new(GrpcContext::default()));\n    let mut server_swarm = Swarm::new_ephemeral(|_| grpc::Behaviour::new(router));\n\n    let server_peer_id = *server_swarm.local_peer_id();\n\n    server_swarm.listen().await;\n\n    let server_address = server_swarm.listeners().next().unwrap();\n    client_swarm\n        .behaviour_mut()\n        .add_address(&server_peer_id, server_address.clone());\n\n    let outbound_connection = client_swarm.behaviour_mut().open_outbound_connection(\n        &server_peer_id,\n        protocol_name!(GreeterServer::<DummyServer>::NAME),\n    );\n\n    let client_swarm = async move {\n        loop {\n            client_swarm.next_swarm_event().await;\n        }\n    };\n    let server_swarm = async move {\n        loop {\n            server_swarm.next_swarm_event().await;\n        }\n    };\n\n    spawn(server_swarm);\n    spawn(client_swarm);\n    let connection = outbound_connection.into_future().await.unwrap();\n\n    let mut client = GreeterClient::new(connection.channel);\n\n    let result = client\n        .say_hello_with_delay(HelloWithDelayRequest {\n            name: \"Simon\".into(),\n            delay_in_seconds: 10,\n        })\n        .await\n        .unwrap();\n\n    assert_eq!(result.into_inner().message, \"Hello Simon\");\n}\n\n#[test(tokio::test)]\nasync fn execute_query() {\n    let dummy = DummyServer {};\n\n    let router = GrpcContext::default()\n        .with_router(GrpcRouter::new(Server::builder()).add_service(GreeterServer::new(dummy)));\n    let client_protocols = {\n        let mut protocols = HashSet::new();\n\n        protocols.insert(protocol_name!(GreeterServer::<DummyServer>::NAME));\n\n        protocols\n    };\n    let mut client_swarm = Swarm::new_ephemeral(|_| grpc::Behaviour::new(GrpcContext::default()));\n    let mut server_swarm = Swarm::new_ephemeral(|_| grpc::Behaviour::new(router));\n\n    let (multiaddr, _) = server_swarm.listen().await;\n    let server_peer_id = *server_swarm.local_peer_id();\n\n    client_swarm\n        .behaviour_mut()\n        .add_address(&server_peer_id, multiaddr);\n\n    let outbound_connection = client_swarm.behaviour_mut().open_outbound_connection(\n        &server_peer_id,\n        protocol_name!(GreeterServer::<DummyServer>::NAME),\n    );\n\n    let client_swarm = async move {\n        loop {\n            client_swarm.next_swarm_event().await;\n        }\n    };\n    let server_swarm = async move {\n        loop {\n            server_swarm.next_swarm_event().await;\n        }\n    };\n\n    spawn(server_swarm);\n    spawn(client_swarm);\n    let connection = outbound_connection.into_future().await.unwrap();\n\n    let mut client = GreeterClient::new(connection.channel);\n\n    let result = client\n        .say_hello(HelloRequest {\n            name: \"Simon\".into(),\n        })\n        .await\n        .unwrap();\n\n    assert_eq!(result.into_inner().message, \"Hello Simon\");\n}\n\n#[rstest]\nfn create_context_with_only_router() {\n    let context = GrpcContext::default().with_router(GrpcRouter::new(Server::builder()));\n\n    let (router, (inbound, outbound)) = context.into_parts();\n\n    assert!(router.is_some());\n    assert_eq!(inbound, outbound);\n}\n\n#[rstest]\nfn create_context_with_only_client() {\n    let context = GrpcContext::default();\n\n    let (router, (inbound, outbound)) = context.into_parts();\n\n    assert!(router.is_none());\n    assert_eq!(inbound, outbound);\n}\n\n#[rstest]\nfn create_context_with_only_client_custom_protocol() {\n    let context = GrpcContext::default().add_client_protocol(\"/custom\");\n\n    let (router, (inbound, outbound)) = context.into_parts();\n\n    assert!(router.is_none());\n    assert_ne!(inbound, outbound);\n    assert_eq!(outbound.into_iter().collect::<Vec<_>>(), vec![\"/custom\"]);\n    assert_eq!(\n        inbound.into_iter().collect::<Vec<_>>(),\n        Vec::<String>::new()\n    );\n}\n\n#[test(tokio::test)]\nasync fn incompatible_protocol() {\n    let dummy = DummyServer {};\n\n    let router = GrpcContext::default()\n        .with_router(GrpcRouter::new(Server::builder()).add_service(GreeterServer::new(dummy)));\n\n    let client_protocols = {\n        let mut protocols = HashSet::new();\n\n        protocols.insert(protocol_name!(\n            NoopServer::<implementations::NoopServer>::NAME\n        ));\n\n        protocols\n    };\n\n    let mut client_swarm = Swarm::new_ephemeral(|_| {\n        grpc::Behaviour::new(GrpcContext::default().with_client_protocols(client_protocols))\n    });\n    let mut server_swarm = Swarm::new_ephemeral(|_| grpc::Behaviour::new(router));\n\n    let server_peer_id = *server_swarm.local_peer_id();\n\n    let (multiaddr, _) = server_swarm.listen().await;\n    client_swarm\n        .behaviour_mut()\n        .add_address(&server_peer_id, multiaddr);\n\n    let outbound_connection = client_swarm.behaviour_mut().open_outbound_connection(\n        &server_peer_id,\n        protocol_name!(NoopServer::<implementations::NoopServer>::NAME),\n    );\n\n    let client_swarm = async move {\n        loop {\n            client_swarm.next_swarm_event().await;\n        }\n    };\n    let server_swarm = async move {\n        loop {\n            server_swarm.next_swarm_event().await;\n        }\n    };\n\n    spawn(server_swarm);\n    spawn(client_swarm);\n\n    let result = outbound_connection.into_future().await;\n\n    assert!(result.is_err());\n\n    assert!(matches!(\n        result,\n        Err(OutboundConnectionError::Outbound(\n            OutboundError::UnsupportedProtocol(_)\n        ))\n    ));\n}\n"
  },
  {
    "path": "crates/topos-p2p/src/tests/behaviour/mod.rs",
    "content": "mod grpc;\n"
  },
  {
    "path": "crates/topos-p2p/src/tests/bootstrap.rs",
    "content": "use std::time::Duration;\n\nuse futures::{future::join_all, FutureExt};\nuse rstest::rstest;\nuse test_log::test;\nuse topos_test_sdk::tce::NodeConfig;\nuse tracing::Instrument;\n\n#[rstest]\n#[test(tokio::test)]\n#[timeout(Duration::from_secs(5))]\nasync fn two_bootnode_communicating() {\n    let bootnode = NodeConfig::memory(2);\n    let local = NodeConfig::memory(1);\n    let bootnode_known_peers = vec![(local.peer_id(), local.addr.clone())];\n    let local_known_peers = vec![(bootnode.peer_id(), bootnode.addr.clone())];\n\n    let mut handlers = Vec::new();\n\n    let context_local = tracing::info_span!(\"start_node\", \"peer_id\" = local.peer_id().to_string());\n\n    let context_bootnode =\n        tracing::info_span!(\"start_node\", \"peer_id\" = bootnode.peer_id().to_string());\n    handlers.push(\n        async move {\n            let (client, mut stream, runtime) = crate::network::builder()\n                .minimum_cluster_size(1)\n                .peer_key(local.keypair.clone())\n                .listen_addresses(&[local.addr.clone()])\n                .known_peers(&local_known_peers)\n                .memory()\n                .build()\n                .await\n                .expect(\"Unable to create p2p network\");\n\n            runtime.bootstrap(&mut stream).await\n        }\n        .instrument(context_local)\n        .boxed(),\n    );\n\n    handlers.push(\n        async move {\n            let (client, mut stream, runtime) = crate::network::builder()\n                .minimum_cluster_size(1)\n                .peer_key(bootnode.keypair.clone())\n                .listen_addresses(&[bootnode.addr.clone()])\n                .known_peers(&bootnode_known_peers)\n                .memory()\n                .build()\n                .await\n                .expect(\"Unable to create p2p network\");\n\n            runtime.bootstrap(&mut stream).await\n        }\n        .instrument(context_bootnode)\n        .boxed(),\n    );\n    assert!(join_all(handlers).await.iter().all(Result::is_ok));\n}\n"
  },
  {
    "path": "crates/topos-p2p/src/tests/command/mod.rs",
    "content": "mod random_peer;\n"
  },
  {
    "path": "crates/topos-p2p/src/tests/command/random_peer.rs",
    "content": "use std::time::Duration;\n\nuse rstest::rstest;\nuse test_log::test;\nuse tokio::spawn;\nuse topos_test_sdk::tce::NodeConfig;\n\nuse crate::error::P2PError;\n\n#[rstest]\n#[test(tokio::test)]\n#[timeout(Duration::from_secs(5))]\nasync fn no_random_peer() {\n    let local = NodeConfig::from_seed(1);\n\n    let (client, stream, runtime) = crate::network::builder()\n        .minimum_cluster_size(0)\n        .peer_key(local.keypair.clone())\n        .public_addresses(&[local.addr.clone()])\n        .listen_addresses(&[local.addr.clone()])\n        .public_addresses(vec![local.addr.clone()])\n        .listen_addresses(vec![local.addr.clone()])\n        .build()\n        .await\n        .expect(\"Unable to create p2p network\");\n\n    tokio::spawn(runtime.run());\n\n    let result = client.random_known_peer().await;\n\n    assert!(result.is_err());\n    assert!(matches!(\n        result,\n        Err(P2PError::CommandError(\n            crate::error::CommandExecutionError::NoKnownPeer\n        ))\n    ));\n}\n\n#[rstest]\n#[test(tokio::test)]\n#[timeout(Duration::from_secs(5))]\nasync fn return_a_peer() {\n    let local = NodeConfig::from_seed(1);\n    let expected = NodeConfig::from_seed(2);\n    let expected_peer_id = expected.keypair.public().to_peer_id();\n\n    let (client, stream, mut runtime) = crate::network::builder()\n        .minimum_cluster_size(0)\n        .peer_key(local.keypair.clone())\n        .public_addresses(vec![local.addr.clone()])\n        .listen_addresses(vec![local.addr.clone()])\n        .build()\n        .await\n        .expect(\"Unable to create p2p network\");\n\n    runtime.peer_set.insert(expected_peer_id);\n    spawn(runtime.run());\n\n    let result = client.random_known_peer().await;\n\n    assert!(result.is_ok());\n    assert!(matches!(\n        result,\n        Ok(peer) if peer == expected_peer_id\n    ));\n}\n\n#[rstest]\n#[test(tokio::test)]\n#[timeout(Duration::from_secs(5))]\nasync fn return_a_random_peer_among_100() {\n    let local = NodeConfig::from_seed(1);\n\n    let (client, stream, mut runtime) = crate::network::builder()\n        .minimum_cluster_size(0)\n        .peer_key(local.keypair.clone())\n        .public_addresses(vec![local.addr.clone()])\n        .listen_addresses(vec![local.addr.clone()])\n        .build()\n        .await\n        .expect(\"Unable to create p2p network\");\n\n    for i in 2..=100 {\n        let peer = NodeConfig::from_seed(i);\n        runtime.peer_set.insert(peer.keypair.public().to_peer_id());\n    }\n\n    spawn(runtime.run());\n\n    let first_try = client.random_known_peer().await.unwrap();\n    let second_try = client.random_known_peer().await.unwrap();\n    let third_try = client.random_known_peer().await.unwrap();\n\n    assert!(first_try != second_try);\n    assert!(first_try != third_try);\n}\n"
  },
  {
    "path": "crates/topos-p2p/src/tests/mod.rs",
    "content": "mod behaviour;\nmod bootstrap;\nmod command;\nmod support;\n"
  },
  {
    "path": "crates/topos-p2p/src/tests/support/macros.rs",
    "content": "#[macro_export]\nmacro_rules! wait_for_event {\n    ($node:ident, matches: $(|)? $( $pattern:pat_param )|+ $( if $guard: expr )? $(,)?) => {\n        let assertion = async {\n            while let Some(event) = $node.next().await {\n                if matches!(event, $( $pattern )|+ $( if $guard )?) {\n                    break;\n                }\n            }\n        };\n\n        if let Err(_) = tokio::time::timeout(std::time::Duration::from_millis(100), assertion).await\n        {\n            panic!(\"Timeout waiting for event\");\n        }\n    };\n}\n"
  },
  {
    "path": "crates/topos-p2p/src/tests/support/mod.rs",
    "content": "use libp2p::{\n    identity::{self, Keypair},\n    Multiaddr, PeerId,\n};\nuse rstest::fixture;\nuse tokio::spawn;\nuse topos_test_sdk::networking::get_available_port;\n\nuse crate::{network::NetworkBuilder, NetworkClient, Runtime};\n\npub mod macros;\n\npub type PeerAddr = (PeerId, Multiaddr);\n\n#[fixture]\npub async fn dummy_peer() -> (NetworkClient, PeerAddr) {\n    let (key, addr_dummy) = local_peer(1);\n    let dummy_peer = (key.public().to_peer_id(), addr_dummy.clone());\n\n    let (client, _stream, runtime): (_, _, Runtime) = NetworkBuilder::default()\n        .peer_key(key)\n        .listen_addresses(vec![addr_dummy.clone()])\n        .public_addresses(vec![addr_dummy])\n        .build()\n        .await\n        .unwrap();\n\n    spawn(runtime.run());\n    (client, dummy_peer)\n}\n\npub fn keypair_from_byte(seed: u8) -> Keypair {\n    let mut bytes = [0u8; 32];\n    bytes[0] = seed;\n\n    identity::Keypair::ed25519_from_bytes(bytes).expect(\"Invalid keypair\")\n}\n\npub fn local_peer(peer_index: u8) -> (Keypair, Multiaddr) {\n    let peer_id: Keypair = keypair_from_byte(peer_index);\n    let port = get_available_port();\n    let local_listen_addr: Multiaddr = format!(\"/ip4/127.0.0.1/tcp/{port}\").parse().unwrap();\n    (peer_id, local_listen_addr)\n}\n"
  },
  {
    "path": "crates/topos-p2p/tests/support/network.rs",
    "content": "use futures::{Stream, StreamExt};\nuse libp2p::{identity::Keypair, Multiaddr, PeerId};\nuse tokio::spawn;\nuse topos_p2p::{network, Client};\n\npub use topos_test_sdk::p2p::local_peer;\n\npub async fn start_node(\n    (peer_key, _, peer_addr): (Keypair, u16, Multiaddr),\n    known_peers: Vec<(PeerId, Multiaddr)>,\n) -> TestNodeContext {\n    let peer_id = peer_key.public().to_peer_id();\n\n    let (client, event_stream, event_loop) = network::builder()\n        .peer_key(peer_key)\n        .known_peers(&known_peers)\n        .listen_addr(peer_addr.clone())\n        .exposed_addresses(peer_addr.clone())\n        .build()\n        .await\n        .unwrap();\n\n    spawn(event_loop.run());\n\n    let _ = client.start_listening(peer_addr.clone()).await;\n\n    TestNodeContext {\n        peer_id,\n        client,\n        stream: Box::new(event_stream),\n    }\n}\n\npub struct TestNodeContext {\n    pub(crate) peer_id: PeerId,\n    pub(crate) client: Client,\n    stream: Box<dyn Stream<Item = topos_p2p::Event> + Unpin + Send>,\n}\n\nimpl TestNodeContext {\n    pub(crate) async fn next_event(&mut self) -> Option<topos_p2p::Event> {\n        self.stream.next().await\n    }\n}\n"
  },
  {
    "path": "crates/topos-sequencer/Cargo.toml",
    "content": "[package]\nname = \"topos-sequencer\"\ndescription = \"Implementation of the Topos protocol\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n[lints]\nworkspace = true\n\n[dependencies]\nhex.workspace = true\nserde = { workspace = true, features = [\"derive\"] }\ntokio = { workspace = true, features = [\"full\"] }\ntokio-util.workspace = true\ntracing-subscriber = {workspace = true, features = [\"fmt\", \"std\", \"env-filter\",]}\ntracing.workspace = true\ntracing-opentelemetry.workspace = true\nopentelemetry.workspace = true\n\ntopos-crypto.workspace = true\ntopos-wallet = { path = \"../topos-wallet\" }\ntopos-core = { workspace = true, features = [\"uci\"] }\ntopos-sequencer-subnet-runtime = { package = \"topos-sequencer-subnet-runtime\", path = \"../topos-sequencer-subnet-runtime\" }\ntopos-tce-proxy = { package = \"topos-tce-proxy\", path = \"../topos-tce-proxy\" }\n\n"
  },
  {
    "path": "crates/topos-sequencer/src/app_context.rs",
    "content": "//!\n//! Application logic glue\n//!\nuse crate::SequencerConfiguration;\nuse opentelemetry::trace::FutureExt;\nuse tokio::sync::mpsc;\nuse tokio_util::sync::CancellationToken;\nuse topos_sequencer_subnet_runtime::proxy::{SubnetRuntimeProxyCommand, SubnetRuntimeProxyEvent};\nuse topos_sequencer_subnet_runtime::SubnetRuntimeProxyWorker;\nuse topos_tce_proxy::{worker::TceProxyWorker, TceProxyCommand, TceProxyEvent};\nuse tracing::{debug, error, info, info_span, warn, Instrument, Span};\nuse tracing_opentelemetry::OpenTelemetrySpanExt;\n\n/// Top-level transducer sequencer app context & driver (alike)\n///\n/// Implements <...Host> traits for network and Api, listens for protocol events in events\n/// (store is not active component).\n///\n/// In the end we shall come to design where this struct receives\n/// config+data as input and runs app returning data as output\n///\npub struct AppContext {\n    pub config: SequencerConfiguration,\n    pub subnet_runtime_proxy_worker: SubnetRuntimeProxyWorker,\n    pub tce_proxy_worker: TceProxyWorker,\n}\n\npub enum AppContextStatus {\n    Finished,\n    Restarting,\n}\n\nimpl AppContext {\n    /// Factory\n    pub fn new(\n        config: SequencerConfiguration,\n        runtime_proxy_worker: SubnetRuntimeProxyWorker,\n        tce_proxy_worker: TceProxyWorker,\n    ) -> Self {\n        Self {\n            config,\n            subnet_runtime_proxy_worker: runtime_proxy_worker,\n            tce_proxy_worker,\n        }\n    }\n\n    /// Main processing loop\n    pub(crate) async fn run(\n        &mut self,\n        shutdown: (CancellationToken, mpsc::Sender<()>),\n    ) -> AppContextStatus {\n        loop {\n            tokio::select! {\n\n                // Subnet event handling\n                Ok(evt) = self.subnet_runtime_proxy_worker.next_event() => {\n                    debug!(\"runtime_proxy_worker.next_event(): {:?}\", &evt);\n                    self.on_subnet_runtime_proxy_event(evt).await;\n                },\n\n                // TCE event handling\n                Ok(tce_evt) = self.tce_proxy_worker.next_event() => {\n                    debug!(\"tce_proxy_worker.next_event(): {:?}\", &tce_evt);\n                    match tce_evt {\n                        TceProxyEvent::TceServiceFailure | TceProxyEvent::WatchCertificatesChannelFailed => {\n                            // Unrecoverable failure in interaction with the TCE. Sequencer needs to be restarted\n                            error!(\n                                \"Unrecoverable failure in sequencer <-> tce interaction. Shutting down sequencer \\\n                                 sequencer...\"\n                            );\n                            if let Err(e) = self.shutdown().await {\n                                warn!(\"Failed to shutdown: {e:?}\");\n                            }\n                            info!(\"Shutdown finished, restarting sequencer...\");\n                            return AppContextStatus::Restarting;\n                        },\n                        _ => self.on_tce_proxy_event(tce_evt).await,\n                    }\n                },\n\n                // Shutdown signal\n                _ = shutdown.0.cancelled() => {\n                    info!(\"Shutting down Sequencer app context...\");\n                    if let Err(e) = self.shutdown().await {\n                        error!(\"Failed to shutdown the Sequencer app context: {e}\");\n                    }\n                    // Drop the sender to notify the Sequencer termination\n                    drop(shutdown.1);\n                    return AppContextStatus::Finished;\n                }\n            }\n        }\n    }\n\n    async fn on_subnet_runtime_proxy_event(&mut self, evt: SubnetRuntimeProxyEvent) {\n        debug!(\"on_subnet_runtime_proxy_event : {:?}\", &evt);\n        match evt {\n            SubnetRuntimeProxyEvent::NewCertificate {\n                cert,\n                block_number: _,\n                ctx,\n            } => {\n                let span = info_span!(\"Sequencer app context\");\n                span.set_parent(ctx);\n                if let Err(e) = self\n                    .tce_proxy_worker\n                    .send_command(TceProxyCommand::SubmitCertificate {\n                        cert,\n                        ctx: span.context(),\n                    })\n                    .with_context(span.context())\n                    .instrument(span)\n                    .await\n                {\n                    error!(\"Unable to send tce proxy command {e}\");\n                }\n            }\n            SubnetRuntimeProxyEvent::NewEra(_authorities) => {\n                todo!()\n            }\n        }\n    }\n\n    async fn on_tce_proxy_event(&mut self, evt: TceProxyEvent) {\n        if let TceProxyEvent::NewDeliveredCerts { certificates, ctx } = evt {\n            let span = info_span!(\"Sequencer app context\");\n            span.set_parent(ctx);\n            async {\n                // New certificates acquired from TCE\n                for (cert, cert_position) in certificates {\n                    self.subnet_runtime_proxy_worker\n                        .eval(SubnetRuntimeProxyCommand::OnNewDeliveredCertificate {\n                            certificate: cert,\n                            position: cert_position,\n                            ctx: Span::current().context(),\n                        })\n                        .await\n                        .expect(\"Propagate new delivered Certificate to the runtime\");\n                }\n            }\n            .with_context(span.context())\n            .instrument(span)\n            .await\n        }\n    }\n\n    // Shutdown app\n    async fn shutdown(&mut self) -> Result<(), Box<dyn std::error::Error>> {\n        self.tce_proxy_worker.shutdown().await?;\n        self.subnet_runtime_proxy_worker.shutdown().await?;\n\n        Ok(())\n    }\n}\n"
  },
  {
    "path": "crates/topos-sequencer/src/lib.rs",
    "content": "use crate::app_context::{AppContext, AppContextStatus};\nuse std::io::ErrorKind::InvalidInput;\nuse std::process::ExitStatus;\nuse tokio::{\n    spawn,\n    sync::{\n        mpsc,\n        oneshot::{self, Sender},\n    },\n};\nuse tokio_util::sync::CancellationToken;\nuse topos_core::uci::{CertificateId, SubnetId};\nuse topos_sequencer_subnet_runtime::{SubnetRuntimeProxyConfig, SubnetRuntimeProxyWorker};\nuse topos_tce_proxy::{worker::TceProxyWorker, TceProxyConfig};\nuse topos_wallet::SecretKey;\nuse tracing::{debug, info, warn};\n\nmod app_context;\n\n#[derive(Debug, Clone)]\npub struct SequencerConfiguration {\n    pub subnet_id: Option<String>,\n    pub public_key: Option<Vec<u8>>,\n    pub subnet_jsonrpc_http: String,\n    pub subnet_jsonrpc_ws: Option<String>,\n    pub subnet_contract_address: String,\n    pub tce_grpc_endpoint: String,\n    pub signing_key: SecretKey,\n    pub verifier: u32,\n    pub start_block: Option<u64>,\n}\n\nasync fn launch_workers(\n    config: SequencerConfiguration,\n    ctx_send: Sender<AppContext>,\n    subnet_id: SubnetId,\n) -> Result<(), Box<dyn std::error::Error>> {\n    let (http_endpoint, mut ws_endpoint) =\n        topos_sequencer_subnet_runtime::derive_endpoints(&config.subnet_jsonrpc_http)?;\n\n    if let Some(config_ws_endpoint) = config.subnet_jsonrpc_ws.as_ref() {\n        // Use explicitly provided websocket subnet endpoint\n        ws_endpoint = config_ws_endpoint.clone();\n    }\n    // Instantiate subnet runtime proxy, handling interaction with subnet node\n    let subnet_runtime_proxy_worker = match SubnetRuntimeProxyWorker::new(\n        SubnetRuntimeProxyConfig {\n            subnet_id,\n            http_endpoint,\n            ws_endpoint,\n            subnet_contract_address: config.subnet_contract_address.clone(),\n            source_head_certificate_id: None, // Must be acquired later after TCE proxy is connected\n            verifier: config.verifier,\n            start_block: config.start_block,\n        },\n        config.signing_key.clone(),\n    )\n    .await\n    {\n        Ok(subnet_runtime_proxy) => subnet_runtime_proxy,\n        Err(e) => {\n            return Err(Box::new(e));\n        }\n    };\n\n    // Get subnet checkpoints from subnet to pass them to the TCE node\n    // It will retry using backoff algorithm, but if it fails (default max backoff elapsed time is 15 min) we can not proceed\n    let target_subnet_stream_positions = match subnet_runtime_proxy_worker.get_checkpoints().await {\n        Ok(checkpoints) => checkpoints,\n        Err(e) => {\n            return Err(Box::new(e));\n        }\n    };\n\n    // Launch Tce proxy worker for handling interaction with TCE node\n    // For initialization it will retry using backoff algorithm, but if it fails we can not proceed and we restart sequencer\n    // Once it is initialized, TCE proxy will try reconnecting in the loop (with backoff) if TCE becomes unavailable\n    let (tce_proxy_worker, source_head_certificate_id) = match TceProxyWorker::new(TceProxyConfig {\n        subnet_id,\n        tce_endpoint: config.tce_grpc_endpoint.clone(),\n        positions: target_subnet_stream_positions,\n    })\n    .await\n    {\n        Ok((tce_proxy_worker, mut source_head_certificate)) => {\n            // FIXME: If TCE returns all zeros for the source head certificate, it means that it does not have\n            // any information about the subnet. Until registration of the subnets with the topos subnet is implemented,\n            // we get genesis block (and create genesis certificate) directly from the subnet block 0\n            if let Some((cert, _position)) = &mut source_head_certificate {\n                if cert.id == CertificateId::default() {\n                    warn!(\n                        \"Tce has not provided source head certificate, starting from subnet \\\n                         genesis block...\"\n                    );\n                    source_head_certificate = None;\n                }\n            }\n\n            info!(\n                \"TCE proxy client is starting for the source subnet {:?} from the head {:?}\",\n                subnet_id, source_head_certificate\n            );\n            let source_head_certificate_id =\n                source_head_certificate.map(|(cert, position)| (cert.id, position));\n            (tce_proxy_worker, source_head_certificate_id)\n        }\n        Err(e) => {\n            panic!(\"Unable to create TCE Proxy: {e}\");\n        }\n    };\n\n    // Set source head certificate to know from where to\n    // start producing certificates\n    if let Err(e) = subnet_runtime_proxy_worker\n        .set_source_head_certificate_id(source_head_certificate_id)\n        .await\n    {\n        panic!(\"Unable to set source head certificate id: {e}\");\n    }\n\n    let _ = ctx_send.send(AppContext::new(\n        config,\n        subnet_runtime_proxy_worker,\n        tce_proxy_worker,\n    ));\n    Ok(())\n}\n\npub async fn launch(\n    config: SequencerConfiguration,\n    ctx_send: Sender<AppContext>,\n) -> Result<(), Box<dyn std::error::Error>> {\n    debug!(\"Starting topos-sequencer application\");\n\n    // If subnetID is specified as command line argument, use it\n    let subnet_id: SubnetId = if let Some(pk) = &config.public_key {\n        SubnetId::try_from(&pk[1..]).expect(\"Can parse public key into a SubnetID\")\n    } else if let Some(subnet_id) = &config.subnet_id {\n        if &subnet_id[0..2] != \"0x\" {\n            return Err(Box::new(std::io::Error::new(\n                InvalidInput,\n                \"Subnet id must start with `0x`\",\n            )));\n        }\n        hex::decode(&subnet_id[2..])?.as_slice().try_into()?\n    }\n    // Get subnet id from the subnet node if not provided via the command line argument\n    // It will retry using backoff algorithm, but if it fails (default max backoff elapsed time is 15 min) we can not proceed\n    else {\n        let http_endpoint =\n            topos_sequencer_subnet_runtime::derive_endpoints(&config.subnet_jsonrpc_http)\n                .map_err(|e| {\n                    Box::new(std::io::Error::new(\n                        InvalidInput,\n                        format!(\"Invalid subnet endpoint: {e}\"),\n                    ))\n                })?\n                .0;\n        match SubnetRuntimeProxyWorker::get_subnet_id(\n            &http_endpoint,\n            config.subnet_contract_address.as_str(),\n        )\n        .await\n        {\n            Ok(subnet_id) => {\n                info!(\"Retrieved subnet id from the subnet node {subnet_id}\");\n                subnet_id\n            }\n            Err(e) => {\n                panic!(\"Unable to get subnet id from the subnet {e}\");\n            }\n        }\n    };\n\n    launch_workers(config, ctx_send, subnet_id).await\n}\n\npub async fn run(\n    config: SequencerConfiguration,\n    shutdown: (CancellationToken, mpsc::Sender<()>),\n) -> Result<ExitStatus, Box<dyn std::error::Error>> {\n    loop {\n        let shutdown_appcontext = shutdown.clone();\n\n        let (ctx_send, mut ctx_recv) = oneshot::channel::<AppContext>();\n\n        let config = config.clone();\n        let launching = spawn(async move {\n            let _ = launch(config, ctx_send).await;\n        });\n\n        let app_context: Option<AppContext> = tokio::select! {\n            app = &mut ctx_recv => {\n                match app {\n                    Ok(context) => Some(context),\n                    Err(e) => {\n                        info!(\"Application initialized with error: {e}\");\n                        None\n                    }\n                }\n            },\n\n            // Shutdown signal\n            _ = shutdown.0.cancelled() => {\n                info!(\"Stopping Sequencer launch...\");\n                drop(shutdown.1);\n                launching.abort();\n                return Ok(ExitStatus::default());\n            }\n        };\n\n        if let Some(mut app) = app_context {\n            match app.run(shutdown_appcontext).await {\n                AppContextStatus::Restarting => {\n                    // We finish the loop, restarting sequencer here\n                    warn!(\"Restarting sequencer...\");\n                    tokio::time::sleep(tokio::time::Duration::from_secs(10)).await;\n                }\n                AppContextStatus::Finished => {\n                    info!(\"Sequencer app finished, exiting...\");\n                    return Ok(ExitStatus::default());\n                }\n            }\n        } else {\n            warn!(\"Sequencer startup sequencer failed, restarting sequencer...\");\n            tokio::time::sleep(tokio::time::Duration::from_secs(10)).await;\n        }\n    }\n}\n"
  },
  {
    "path": "crates/topos-sequencer-subnet-client/Cargo.toml",
    "content": "[package]\nname = \"topos-sequencer-subnet-client\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n[lints]\nworkspace = true\n\n[dependencies]\nhex.workspace = true\nserde_json.workspace = true\nthiserror.workspace = true\ntracing.workspace = true\ntokio.workspace = true\nbackoff.workspace = true\nserde = { workspace = true, features = [\"derive\"] }\ntiny-keccak.workspace = true\nethers.workspace = true\nethers-providers = { version = \"2.0.8\", features = [\"ws\"] }\nrustc-hex = \"2.1.0\"\n\ntopos-core = { workspace = true, features = [\"uci\", \"api\"] }\n\n[build-dependencies]\nethers.workspace = true\n"
  },
  {
    "path": "crates/topos-sequencer-subnet-client/src/lib.rs",
    "content": "pub mod subnet_contract;\n\nuse crate::subnet_contract::{create_topos_core_contract_from_json, get_block_events};\nuse ethers::abi::ethabi::ethereum_types::{H160, U256};\nuse ethers::core::k256::ecdsa::SigningKey;\nuse ethers::signers::Wallet;\nuse ethers::types::TransactionReceipt;\nuse ethers::{\n    abi::Token,\n    core::rand::thread_rng,\n    middleware::SignerMiddleware,\n    providers::{Http, Provider, ProviderError, StreamExt, Ws, WsClientError},\n    signers::{LocalWallet, Signer, WalletError},\n};\nuse ethers_providers::{Middleware, SubscriptionStream};\nuse serde::{Deserialize, Serialize};\nuse std::sync::Arc;\nuse std::time::Duration;\nuse topos_core::api::grpc::checkpoints::TargetStreamPosition;\npub use topos_core::uci::{\n    Address, Certificate, CertificateId, ReceiptsRootHash, StateRoot, SubnetId, TxRootHash,\n    CERTIFICATE_ID_LENGTH, SUBNET_ID_LENGTH,\n};\nuse tracing::{error, info, warn};\n\nconst PUSH_CERTIFICATE_GAS_LIMIT: u64 = 1000000;\n// Maximum backoff retry timeout in seconds (12 hours)\nconst SUBNET_CONNECT_BACKOFF_TIMEOUT: Duration = Duration::from_secs(12 * 3600);\nconst SUBNET_GET_CHECKPOINTS_BACKOFF_TIMEOUT: Duration = Duration::from_secs(3600);\nconst SUBNET_GET_SUBNET_ID_BACKOFF_TIMEOUT: Duration = Duration::from_secs(3600);\n\npub type BlockData = Vec<u8>;\npub type BlockNumber = u64;\npub type Hash = String;\n\n/// Event collected from the sending subnet\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub enum SubnetEvent {\n    CrossSubnetMessageSent {\n        target_subnet_id: SubnetId,\n        source_subnet_id: SubnetId,\n        nonce: u64,\n    },\n}\n\n#[derive(Default, Debug, Clone, Serialize, Deserialize)]\npub struct BlockInfo {\n    /// hash of the block.\n    pub hash: Hash,\n    /// hash of the parent block.\n    pub parent_hash: Hash,\n    /// block's number.\n    pub number: u64,\n    /// state root\n    pub state_root: StateRoot,\n    /// tx root hash\n    pub tx_root_hash: TxRootHash,\n    /// receipts root hash\n    pub receipts_root_hash: ReceiptsRootHash,\n    /// Subnet events collected in this block\n    pub events: Vec<SubnetEvent>,\n}\n\n#[derive(Debug, thiserror::Error)]\npub enum Error {\n    #[error(\"new finalized block not available\")]\n    BlockNotAvailable(u64),\n    #[error(\"next stream block is not available\")]\n    StreamBlockNotAvailable,\n    #[error(\"invalid block number: {0}\")]\n    InvalidBlockNumber(u64),\n    #[error(\"block number not available\")]\n    BlockNumberNotAvailable,\n    #[error(\"failed mutable cast\")]\n    MutableCastFailed,\n    #[error(\"json error: {source}\")]\n    JsonError {\n        #[from]\n        source: serde_json::Error,\n    },\n    #[error(\"json parse error\")]\n    JsonParseError,\n    #[error(\"invalid url: {0}\")]\n    InvalidUrl(String),\n    #[error(\"hex data decoding error: {0}\")]\n    HexDecodingError(rustc_hex::FromHexError),\n    #[error(\"ethers provider error: {0}\")]\n    EthersProviderError(ProviderError),\n    #[error(\"ethereum contract error: {0}\")]\n    ContractError(String),\n    #[error(\"event decoding error: {0}\")]\n    EventDecodingError(String),\n    #[error(\"ethereum event error: {0}\")]\n    EventError(String),\n    #[error(\"invalid argument: {message}\")]\n    InvalidArgument { message: String },\n    #[error(\"wallet error: {0}\")]\n    WalletError(WalletError),\n    #[error(\"invalid secret key: {0}\")]\n    InvalidKey(String),\n    #[error(\"error with signing ethereum transaction\")]\n    EthereumTxSignError,\n    #[error(\"web socket client error: {0}\")]\n    WsClientError(WsClientError),\n    #[error(\"input output error: {source}\")]\n    InputOutputError {\n        #[from]\n        source: std::io::Error,\n    },\n    #[error(\"invalid certificate id\")]\n    InvalidCertificateId,\n    #[error(\"invalid checkpoints data\")]\n    InvalidCheckpointsData,\n}\n\n// Subnet client for listening events from subnet node\npub struct SubnetClientListener {\n    contract: subnet_contract::IToposCore<Provider<Ws>>,\n    provider: Arc<Provider<Ws>>,\n}\n\nimpl SubnetClientListener {\n    /// Initialize a new Subnet client\n    pub async fn new(ws_subnet_endpoint: &str, contract_address: &str) -> Result<Self, Error> {\n        info!(\n            \"Connecting to subnet node at endpoint: {}\",\n            ws_subnet_endpoint\n        );\n        let ws = Provider::<Ws>::connect(ws_subnet_endpoint)\n            .await\n            .map_err(Error::EthersProviderError)?;\n        let provider = Arc::new(ws);\n\n        // Initialize Topos Core Contract from json abi\n        let contract = create_topos_core_contract_from_json(contract_address, provider.clone())?;\n\n        Ok(SubnetClientListener { contract, provider })\n    }\n\n    pub async fn new_block_subscription_stream(\n        &self,\n    ) -> Result<SubscriptionStream<Ws, ethers::types::Block<ethers::types::H256>>, Error> {\n        self.provider\n            .subscribe_blocks()\n            .await\n            .map_err(Error::EthersProviderError)\n    }\n\n    /// Subscribe and listen to runtime finalized blocks\n    pub async fn get_finalized_block(\n        &mut self,\n        next_block_number: u64,\n    ) -> Result<BlockInfo, Error> {\n        let latest_subnet_block_number = self\n            .provider\n            .get_block_number()\n            .await\n            .map_err(Error::EthersProviderError)?;\n\n        info!(\n            \"Finalized block number: next={} and latest={}\",\n            next_block_number, latest_subnet_block_number\n        );\n\n        if latest_subnet_block_number.as_u64() < next_block_number {\n            return Err(Error::BlockNotAvailable(next_block_number));\n        }\n\n        let block = self\n            .provider\n            .get_block(next_block_number)\n            .await\n            .map_err(Error::EthersProviderError)?\n            .ok_or(Error::InvalidBlockNumber(next_block_number))?;\n        let block_number = block\n            .number\n            .ok_or(Error::InvalidBlockNumber(next_block_number))?;\n        let events = match get_block_events(&self.contract, block_number).await {\n            Ok(events) => events,\n            Err(Error::EventDecodingError(e)) => {\n                // FIXME: Happens in block before subnet contract is deployed, seems like bug in ethers\n                warn!(\n                    \"Error decoding events from block {}: {e}. Topos smart contracts may not be \\\n                     deployed before the parsed block?\",\n                    block_number\n                );\n                Vec::new()\n            }\n            Err(e) => {\n                error!(\"Unable to parse events from block {}: {e}\", block_number);\n                return Err(e);\n            }\n        };\n\n        // Make block info result from all collected info\n        let block_info = BlockInfo {\n            hash: block.hash.unwrap_or_default().to_string(),\n            parent_hash: block.parent_hash.to_string(),\n            number: block_number.to_owned().as_u64(),\n            state_root: block.state_root.0,\n            tx_root_hash: block.transactions_root.0,\n            receipts_root_hash: block.receipts_root.0,\n            events,\n        };\n        info!(\n            \"Fetched new finalized block from subnet: {:?}\",\n            block_info.number\n        );\n        Ok(block_info)\n    }\n\n    /// Subscribe and listen to runtime finalized blocks\n    pub async fn get_subnet_block_number(&mut self) -> Result<u64, Error> {\n        self.provider\n            .get_block_number()\n            .await\n            .map(|block_number| block_number.as_u64())\n            .map_err(Error::EthersProviderError)\n    }\n\n    pub async fn wait_for_new_block(\n        &self,\n        stream: &mut SubscriptionStream<'_, Ws, ethers::types::Block<ethers::types::H256>>,\n    ) -> Result<BlockInfo, Error> {\n        if let Some(block) = stream.next().await {\n            let block_number = block.number.ok_or(Error::BlockNumberNotAvailable)?;\n            let events = match get_block_events(&self.contract, block_number).await {\n                Ok(events) => events,\n                Err(Error::EventDecodingError(e)) => {\n                    // FIXME: Happens in block before subnet contract is deployed, seems like bug in ethers\n                    warn!(\n                        \"Error decoding events from block {}: {e}. Topos smart contracts may not \\\n                         be deployed before the parsed block?\",\n                        block_number\n                    );\n                    Vec::new()\n                }\n                Err(e) => {\n                    error!(\"Unable to parse events from block {}: {e}\", block_number);\n                    return Err(e);\n                }\n            };\n\n            // Make block info result from all collected info\n            let block_info = BlockInfo {\n                hash: block.hash.unwrap_or_default().to_string(),\n                parent_hash: block.parent_hash.to_string(),\n                number: block_number.to_owned().as_u64(),\n                state_root: block.state_root.0,\n                tx_root_hash: block.transactions_root.0,\n                receipts_root_hash: block.receipts_root.0,\n                events,\n            };\n            Ok(block_info)\n        } else {\n            Err(Error::StreamBlockNotAvailable)\n        }\n    }\n}\n\n/// Create subnet client listener and open connection to the subnet\n/// Retry until connection is valid\npub async fn connect_to_subnet_listener_with_retry(\n    ws_runtime_endpoint: &str,\n    subnet_contract_address: &str,\n) -> Result<SubnetClientListener, crate::Error> {\n    info!(\n        \"Connecting to subnet endpoint to listen events from {} using backoff strategy...\",\n        ws_runtime_endpoint\n    );\n\n    let op = || async {\n        // Create subnet listener\n        match SubnetClientListener::new(ws_runtime_endpoint, subnet_contract_address).await {\n            Ok(subnet_listener) => Ok(subnet_listener),\n            Err(e) => {\n                error!(\"Unable to instantiate the subnet client listener: {e}\");\n                Err(new_subnet_client_proxy_backoff_err(e))\n            }\n        }\n    };\n\n    let backoff_configuration = backoff::ExponentialBackoff {\n        max_elapsed_time: Some(SUBNET_CONNECT_BACKOFF_TIMEOUT),\n        ..Default::default()\n    };\n    backoff::future::retry(backoff_configuration, op).await\n}\n\n// Subnet client for calling target network smart contract\npub struct SubnetClient {\n    pub eth_admin_address: H160,\n    contract: subnet_contract::IToposCore<SignerMiddleware<Provider<Http>, Wallet<SigningKey>>>,\n}\n\nimpl SubnetClient {\n    /// Polling interval for event filters and pending transactions\n    pub const NODE_POLLING_INTERVAL: Duration = Duration::from_millis(2000u64);\n\n    /// Initialize a new Subnet client\n    pub async fn new(\n        http_subnet_endpoint: &str,\n        eth_admin_secret_key: Option<Vec<u8>>,\n        contract_address: &str,\n    ) -> Result<Self, Error> {\n        info!(\n            \"Connecting to subnet node at endpoint: {}\",\n            http_subnet_endpoint\n        );\n\n        let http = Provider::<Http>::try_from(http_subnet_endpoint)\n            .map_err(|e| Error::InvalidUrl(e.to_string()))?\n            .interval(SubnetClient::NODE_POLLING_INTERVAL);\n\n        let wallet: LocalWallet = if let Some(eth_admin_secret_key) = &eth_admin_secret_key {\n            hex::encode(eth_admin_secret_key)\n                .parse()\n                .map_err(Error::WalletError)?\n        } else {\n            // Dummy random key, will not be used to sign transactions\n            LocalWallet::new(&mut thread_rng())\n        };\n        let chain_id = http\n            .get_chainid()\n            .await\n            .map_err(Error::EthersProviderError)?;\n        let client = Arc::new(SignerMiddleware::new(\n            http,\n            wallet.clone().with_chain_id(chain_id.as_u64()),\n        ));\n        // Initialize Topos Core Contract from json abi\n        let contract = create_topos_core_contract_from_json(contract_address, client)?;\n\n        let eth_admin_address = if let Some(eth_admin_secret_key) = eth_admin_secret_key {\n            match subnet_contract::derive_eth_address(&eth_admin_secret_key) {\n                Ok(address) => address,\n                Err(e) => {\n                    error!(\n                        \"Unable to derive admin address from secret key, error instantiating \\\n                         subnet client: {}\",\n                        e\n                    );\n                    return Err(e);\n                }\n            }\n        } else {\n            Default::default()\n        };\n\n        Ok(SubnetClient {\n            eth_admin_address,\n            contract,\n        })\n    }\n\n    pub async fn push_certificate(\n        &self,\n        cert: &Certificate,\n        cert_position: u64,\n    ) -> Result<Option<TransactionReceipt>, Error> {\n        let prev_cert_id: Token = Token::FixedBytes(cert.prev_id.as_array().to_vec());\n        let source_subnet_id: Token = Token::FixedBytes(cert.source_subnet_id.into());\n        let state_root: Token = Token::FixedBytes(cert.state_root.to_vec());\n        let tx_root: Token = Token::FixedBytes(cert.tx_root_hash.to_vec());\n        let receipt_root: Token = Token::FixedBytes(cert.receipts_root_hash.to_vec());\n        let target_subnets: Token = Token::Array(\n            cert.target_subnets\n                .iter()\n                .map(|target_subnet| Token::FixedBytes((*target_subnet).into()))\n                .collect::<Vec<Token>>(),\n        );\n        let verifier = Token::Uint(U256::from(cert.verifier));\n        let cert_id: Token = Token::FixedBytes(cert.id.as_array().to_vec());\n        let stark_proof: Token = Token::Bytes(cert.proof.clone());\n        let signature: Token = Token::Bytes(cert.signature.clone());\n        let cert_position = U256::from(cert_position);\n        let encoded_cert_bytes = ethers::abi::encode(&[\n            prev_cert_id,\n            source_subnet_id,\n            state_root,\n            tx_root,\n            receipt_root,\n            target_subnets,\n            verifier,\n            cert_id,\n            stark_proof,\n            signature,\n        ]);\n\n        let tx = self\n            .contract\n            .push_certificate(encoded_cert_bytes.into(), cert_position)\n            .gas(PUSH_CERTIFICATE_GAS_LIMIT)\n            .legacy(); // Polygon Edge only supports legacy transactions\n\n        let receipt = tx\n            .send()\n            .await\n            .map_err(|e| {\n                error!(\"Unable to push certificate: {e}\");\n                Error::ContractError(e.to_string())\n            })?\n            .await\n            .map_err(Error::EthersProviderError)?;\n\n        Ok(receipt)\n    }\n\n    /// Ask subnet for latest pushed certificates, for every source subnet\n    /// Returns list of latest stream positions for every source subnet\n    pub async fn get_checkpoints(\n        &self,\n        target_subnet_id: &topos_core::uci::SubnetId,\n    ) -> Result<Vec<TargetStreamPosition>, Error> {\n        let op = || async {\n            let mut target_stream_positions: Vec<TargetStreamPosition> = Vec::new();\n            let stream_positions = self.contract.get_checkpoints().call().await.map_err(|e| {\n                error!(\"Unable to get checkpoints: {e}\");\n                Error::ContractError(e.to_string())\n            })?;\n\n            for position in stream_positions {\n                target_stream_positions.push(TargetStreamPosition {\n                    target_subnet_id: *target_subnet_id,\n                    certificate_id: Some(\n                        TryInto::<[u8; CERTIFICATE_ID_LENGTH]>::try_into(position.cert_id)\n                            .map_err(|_| Error::InvalidCheckpointsData)?\n                            .into(),\n                    ),\n                    source_subnet_id: TryInto::<[u8; SUBNET_ID_LENGTH]>::try_into(\n                        position.source_subnet_id,\n                    )\n                    .map_err(|_| Error::InvalidCheckpointsData)?\n                    .into(),\n                    position: position.position.as_u64(),\n                });\n            }\n\n            Ok(target_stream_positions)\n        };\n\n        let backoff_configuration = backoff::ExponentialBackoff {\n            max_elapsed_time: Some(SUBNET_GET_CHECKPOINTS_BACKOFF_TIMEOUT),\n            ..Default::default()\n        };\n        backoff::future::retry(backoff_configuration, op).await\n    }\n\n    /// Ask subnet for its subnet id\n    pub async fn get_subnet_id(&self) -> Result<SubnetId, Error> {\n        let op = || async {\n            let subnet_id = self\n                .contract\n                .network_subnet_id()\n                .call()\n                .await\n                .map_err(|e| {\n                    error!(\"Unable to query network subnet id: {e}\");\n                    Error::ContractError(e.to_string())\n                })?;\n            Ok(SubnetId::from_array(subnet_id))\n        };\n        let backoff_configuration = backoff::ExponentialBackoff {\n            max_elapsed_time: Some(SUBNET_GET_SUBNET_ID_BACKOFF_TIMEOUT),\n            ..Default::default()\n        };\n        backoff::future::retry(backoff_configuration, op).await\n    }\n}\n\n/// Create new backoff library error based on error that happened\npub(crate) fn new_subnet_client_proxy_backoff_err<E: std::fmt::Display>(\n    err: E,\n) -> backoff::Error<E> {\n    // Retry according to backoff policy\n    backoff::Error::Transient {\n        err,\n        retry_after: None,\n    }\n}\n\n/// Create subnet client and open connection to the subnet\n/// Retry until connection is valid\npub async fn connect_to_subnet_with_retry(\n    http_subnet_endpoint: &str,\n    signing_key: Option<Vec<u8>>,\n    contract_address: &str,\n) -> Result<SubnetClient, crate::Error> {\n    info!(\n        \"Connecting to subnet endpoint {} using backoff strategy...\",\n        http_subnet_endpoint\n    );\n\n    let op = || async {\n        match SubnetClient::new(http_subnet_endpoint, signing_key.clone(), contract_address).await {\n            Ok(subnet_client) => Ok(subnet_client),\n            Err(e) => {\n                error!(\n                    \"Unable to instantiate http subnet client to endpoint {}: {e}\",\n                    http_subnet_endpoint,\n                );\n                Err(new_subnet_client_proxy_backoff_err(e))\n            }\n        }\n    };\n    let backoff_configuration = backoff::ExponentialBackoff {\n        max_elapsed_time: Some(SUBNET_CONNECT_BACKOFF_TIMEOUT),\n        ..Default::default()\n    };\n    backoff::future::retry(backoff_configuration, op).await\n}\n"
  },
  {
    "path": "crates/topos-sequencer-subnet-client/src/subnet_contract.rs",
    "content": "use crate::{Error, SubnetEvent};\nuse ethers::abi::ethabi::ethereum_types::{H160, U64};\nuse ethers::contract::ContractError;\nuse ethers::signers::LocalWallet;\nuse ethers::{\n    prelude::abigen,\n    providers::{Middleware, Provider, Ws},\n    signers::Signer,\n};\nuse std::sync::Arc;\nuse tracing::info;\n\nabigen!(\n    IToposCore,\n    \"npm:@topos-protocol/topos-smart-contracts@3.4.0/artifacts/contracts/interfaces/IToposCore.\\\n     sol/IToposCore.json\"\n);\n\npub(crate) fn create_topos_core_contract_from_json<T: Middleware>(\n    contract_address: &str,\n    client: Arc<T>,\n) -> Result<IToposCore<T>, Error> {\n    let address: ethers::types::Address =\n        contract_address.parse().map_err(Error::HexDecodingError)?;\n    let contract = IToposCore::new(address, client);\n    Ok(contract)\n}\n\npub(crate) async fn get_block_events(\n    contract: &IToposCore<Provider<Ws>>,\n    block_number: U64,\n) -> Result<Vec<crate::SubnetEvent>, Error> {\n    // FIXME: There is some ethers issue when parsing events\n    // from genesis block so skip it - we certainly don't expect any valid event here\n    if block_number.as_u64() == 0 {\n        return Ok(Vec::new());\n    }\n\n    // Parse only event from this particular block\n    let events = contract\n        .events()\n        .from_block(block_number)\n        .to_block(block_number);\n    let topos_core_events = events.query_with_meta().await.map_err(|e| {\n        match e {\n            ContractError::DecodingError(e) => {\n                // FIXME: events have decoding error in the blocks before contract is deployed\n                Error::EventDecodingError(e.to_string())\n            }\n            _ => Error::ContractError(e.to_string()),\n        }\n    })?;\n\n    let mut result = Vec::new();\n    for event in topos_core_events {\n        if let (IToposCoreEvents::CrossSubnetMessageSentFilter(f), meta) = event {\n            info!(\n                \"Received CrossSubnetMessageSentFilter event: {f:?}, meta {:?}\",\n                meta\n            );\n            result.push(SubnetEvent::CrossSubnetMessageSent {\n                target_subnet_id: f.target_subnet_id.into(),\n                source_subnet_id: f.source_subnet_id.into(),\n                nonce: f.nonce.as_u64(),\n            })\n        } else {\n            // Ignored other events until we need them\n        }\n    }\n\n    Ok(result)\n}\n\npub fn derive_eth_address(secret_key: &[u8]) -> Result<H160, crate::Error> {\n    let signer = hex::encode(secret_key)\n        .parse::<LocalWallet>()\n        .map_err(|e| Error::InvalidKey(e.to_string()))?;\n    Ok(signer.address())\n}\n"
  },
  {
    "path": "crates/topos-sequencer-subnet-runtime/Cargo.toml",
    "content": "[package]\nname = \"topos-sequencer-subnet-runtime\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n[lints]\nworkspace = true\n\n[dependencies]\nbyteorder.workspace = true\nhex.workspace = true\nrand = { workspace = true, features = [\"default\"] }\nrand_core.workspace = true\nserde = { workspace = true, features = [\"derive\"] }\nthiserror.workspace = true\ntokio = { workspace = true, features = [\n    \"io-util\",\n    \"io-std\",\n    \"macros\",\n    \"rt\",\n    \"rt-multi-thread\",\n    \"fs\",\n    \"time\",\n    \"sync\",\n] }\ntracing-subscriber = { workspace = true, features = [\"env-filter\", \"fmt\"] }\ntracing.workspace = true\ntracing-opentelemetry.workspace = true\nopentelemetry.workspace = true\n\ntopos-core = { workspace = true, features = [\"uci\"] }\ntopos-sequencer-subnet-client = { package = \"topos-sequencer-subnet-client\", path = \"../topos-sequencer-subnet-client\" }\ntopos-crypto = {package = \"topos-crypto\", path = \"../topos-crypto\"}\n\n[dev-dependencies]\nrstest = { workspace = true, features = [\"async-timeout\"] }\nserde_json.workspace = true\ntest-log.workspace = true\nenv_logger.workspace = true\nsecp256k1.workspace = true\nserial_test.workspace = true\ntiny-keccak.workspace = true\nethers.workspace = true\nfs_extra = \"1.3\"\n\n\ntopos-test-sdk = { path = \"../topos-test-sdk/\" }\n"
  },
  {
    "path": "crates/topos-sequencer-subnet-runtime/src/certification.rs",
    "content": "use crate::Error;\nuse std::collections::{HashSet, LinkedList};\nuse std::fmt::{Debug, Formatter};\nuse std::sync::Arc;\nuse tokio::sync::Mutex;\nuse topos_core::uci::{Certificate, CertificateId, SubnetId};\nuse topos_sequencer_subnet_client::{BlockInfo, SubnetEvent};\nuse tracing::debug;\n\npub struct Certification {\n    /// Last known certificate id for subnet\n    pub last_certificate_id: Option<CertificateId>,\n    /// Subnet id for which certificates are generated\n    pub subnet_id: SubnetId,\n    /// Type of verifier used\n    pub verifier: u32,\n    /// Key for signing certificates, currently secp256k1\n    signing_key: Vec<u8>,\n    /// Optional synchronization from particular block number\n    pub start_block: Option<u64>,\n    /// Blocks received from subnet, not yet certified. We keep them in memory until we can\n    /// generate certificate for them. They are kept as linked list to maintain\n    /// order of blocks, latest received blocks are at the end of the list\n    finalized_blocks: LinkedList<BlockInfo>,\n}\n\nimpl Debug for Certification {\n    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {\n        f.debug_struct(\"Certification instance\").finish()\n    }\n}\n\nimpl Certification {\n    pub const BLOCK_HISTORY_LENGTH: usize = 256;\n\n    pub fn new(\n        subnet_id: &SubnetId,\n        source_head_certificate_id: Option<CertificateId>,\n        verifier: u32,\n        signing_key: Vec<u8>,\n        start_block: Option<u64>,\n    ) -> Result<Arc<Mutex<Certification>>, crate::Error> {\n        Ok(Arc::new(Mutex::from(Self {\n            last_certificate_id: source_head_certificate_id,\n            finalized_blocks: LinkedList::<BlockInfo>::new(),\n            subnet_id: *subnet_id,\n            verifier,\n            signing_key,\n            start_block,\n        })))\n    }\n\n    /// Generation of Certificates\n    pub(crate) async fn generate_certificates(&mut self) -> Result<Vec<Certificate>, Error> {\n        let subnet_id = self.subnet_id;\n        let mut generated_certificates = Vec::new();\n\n        // Keep account of blocks with generated certificates so that we can remove them from\n        // finalized blocks\n        let mut certified_blocks: Vec<u64> = Vec::with_capacity(self.finalized_blocks.len());\n\n        // For every block, create one certificate\n        for block_info in &self.finalized_blocks {\n            // Parse target subnets from events\n            let mut target_subnets: HashSet<SubnetId> = HashSet::new();\n            for event in &block_info.events {\n                match event {\n                    SubnetEvent::CrossSubnetMessageSent {\n                        target_subnet_id, ..\n                    } => {\n                        target_subnets.insert(*target_subnet_id);\n                    }\n                }\n            }\n\n            // Get the id of the previous Certificate from local history\n            let previous_cert_id: CertificateId = match self.last_certificate_id {\n                Some(cert_id) => cert_id,\n                None => {\n                    // FIXME: This is genesis certificate we are generating because we are unable\n                    // to retrieve one from TCE yet\n                    CertificateId::default()\n                }\n            };\n\n            // TODO: acquire proof\n            let proof = Vec::new();\n\n            let mut certificate = Certificate::new(\n                previous_cert_id,\n                subnet_id,\n                block_info.state_root,\n                block_info.tx_root_hash,\n                block_info.receipts_root_hash,\n                &target_subnets.into_iter().collect::<Vec<_>>(),\n                self.verifier,\n                proof,\n            )\n            .map_err(|e| Error::CertificateGenerationError(e.to_string()))?;\n            certificate\n                .update_signature(self.get_signing_key())\n                .map_err(Error::CertificateSigningError)?;\n            generated_certificates.push(certificate);\n            certified_blocks.push(block_info.number);\n        }\n\n        // Check for inconsistencies\n        let is_genesis_certificate: bool = self\n            .finalized_blocks\n            .front()\n            .map(|b| b.number == 0 || self.start_block.is_some())\n            .unwrap_or(false);\n        let last_known_certificate_id = if is_genesis_certificate {\n            // We are creating genesis certificate, there were no previous certificates\n            // In case where start block is present, we also consider start block as genesis certificate,\n            // so it has no history (prev cert id all 0)\n            CertificateId::default()\n        } else {\n            self.last_certificate_id\n                .ok_or(Error::InvalidPreviousCertificateId)?\n        };\n\n        for new_cert in &generated_certificates {\n            if last_known_certificate_id == new_cert.id {\n                // This should not happen\n                panic!(\"Same certificate generated multiple times: {new_cert:?}\");\n            }\n        }\n\n        // Set info about latest known certificate for subnet\n        if let Some(generated_certificate) = generated_certificates.iter().last() {\n            self.last_certificate_id = Some(generated_certificate.id);\n        }\n\n        // Remove processed blocks\n        for processed_block_number in certified_blocks {\n            let front_block_number = self.finalized_blocks.front().map(|front| front.number);\n\n            if front_block_number.is_some() {\n                if Some(processed_block_number) == front_block_number {\n                    debug!(\n                        \"Block {processed_block_number} processed and removed from the block list\"\n                    );\n                    self.finalized_blocks.pop_front();\n                } else {\n                    panic!(\n                        \"Block history is inconsistent, this should not happen! \\\n                         processed_block_number: {processed_block_number}, front_number: {:?}\",\n                        front_block_number\n                    );\n                }\n            }\n        }\n\n        Ok(generated_certificates)\n    }\n\n    pub fn get_signing_key(&self) -> &[u8] {\n        self.signing_key.as_slice()\n    }\n\n    /// Expand short block history. Remove older blocks\n    pub fn append_blocks(&mut self, blocks: Vec<BlockInfo>) {\n        self.finalized_blocks.extend(blocks);\n    }\n}\n"
  },
  {
    "path": "crates/topos-sequencer-subnet-runtime/src/lib.rs",
    "content": "//! implementation of Topos Reliable Broadcast to be used in the Transmission Control Engine (TCE)\n//!\n//! Abstracted from actual transport implementation.\n//! Abstracted from actual storage implementation.\n//!\nuse proxy::SubnetRuntimeProxy;\nuse std::sync::Arc;\nuse thiserror::Error;\nuse tokio::sync::Mutex;\nuse tokio::sync::{mpsc, oneshot};\nuse topos_core::api::grpc::checkpoints::TargetStreamPosition;\nuse topos_core::uci::{CertificateId, SubnetId};\n\npub type Peer = String;\n\npub mod certification;\npub mod proxy;\n\nuse crate::proxy::{SubnetRuntimeProxyCommand, SubnetRuntimeProxyEvent};\n\n// Optimal Size of event channel is yet to be determined. Now just putting a number\nconst EVENT_SUBSCRIBER_CHANNEL_SIZE: usize = 64;\n\n#[derive(Debug, Error)]\npub enum Error {\n    #[error(\"Peers error: {err}\")]\n    BadPeers { err: String },\n\n    #[error(\"Command error: {err}\")]\n    BadCommand { err: String },\n\n    #[error(\"Tokio join error: {source}\")]\n    TokioError { source: tokio::task::JoinError },\n\n    #[error(\"Failed to acquire locked object\")]\n    UnlockError,\n\n    #[error(\"Unexpected type of transaction\")]\n    InvalidTransactionType,\n\n    #[error(\"subnet client error: {source}\")]\n    SubnetError {\n        #[from]\n        source: topos_sequencer_subnet_client::Error,\n    },\n\n    #[error(\"Unable to retrieve key error: {source}\")]\n    UnableToRetrieveKey {\n        #[from]\n        source: topos_crypto::Error,\n    },\n\n    #[error(\"Unable to execute shutdown on the subnet runtime proxy: {0}\")]\n    ShutdownCommunication(mpsc::error::SendError<oneshot::Sender<()>>),\n\n    #[error(\"Shutdown channel receive error {0}\")]\n    ShutdownSignalReceiveError(tokio::sync::oneshot::error::RecvError),\n\n    #[error(\"Invalid previous certificate id\")]\n    InvalidPreviousCertificateId,\n\n    #[error(\"Ill formed subnet history\")]\n    IllFormedSubnetHistory,\n\n    #[error(\"Unable to create certificate {0}\")]\n    CertificateGenerationError(String),\n\n    #[error(\"Certificate signing error: {0}\")]\n    CertificateSigningError(topos_core::uci::Error),\n\n    #[error(\"Unable to set source head certificate: {0}\")]\n    SourceHeadCertChannelError(String),\n\n    #[error(\"Unable to send command: {0}\")]\n    CommandEvalChannelError(String),\n\n    #[error(\"Invalid endpoint: {0}\")]\n    InvalidEndpoint(String),\n}\n\n#[derive(Debug, Clone)]\npub struct SubnetRuntimeProxyConfig {\n    pub subnet_id: SubnetId,\n    pub http_endpoint: String,\n    pub ws_endpoint: String,\n    pub subnet_contract_address: String,\n    pub source_head_certificate_id: Option<CertificateId>,\n    pub verifier: u32,\n    pub start_block: Option<u64>,\n}\n\n/// Thread safe client to the protocol aggregate\n#[derive(Debug)]\npub struct SubnetRuntimeProxyWorker {\n    runtime_proxy: Arc<Mutex<SubnetRuntimeProxy>>,\n    commands: mpsc::Sender<SubnetRuntimeProxyCommand>,\n    events: mpsc::Receiver<SubnetRuntimeProxyEvent>,\n}\n\nimpl SubnetRuntimeProxyWorker {\n    /// Creates new instance of the aggregate and returns proxy to it.\n    /// New client instances to the same aggregate can be cloned from the returned one.\n    /// Aggregate is spawned as new task.\n    pub async fn new(\n        config: SubnetRuntimeProxyConfig,\n        signing_key: Vec<u8>,\n    ) -> Result<Self, Error> {\n        let runtime_proxy = SubnetRuntimeProxy::spawn_new(config, signing_key)?;\n        let (events_sender, events_rcv) =\n            mpsc::channel::<SubnetRuntimeProxyEvent>(EVENT_SUBSCRIBER_CHANNEL_SIZE);\n        let commands;\n        {\n            let mut runtime_proxy = runtime_proxy.lock().await;\n            commands = runtime_proxy.commands_channel.clone();\n            runtime_proxy.events_subscribers.push(events_sender);\n        }\n\n        Ok(Self {\n            runtime_proxy,\n            commands,\n            events: events_rcv,\n        })\n    }\n\n    /// Schedule command for execution\n    pub async fn eval(&self, cmd: SubnetRuntimeProxyCommand) -> Result<(), Error> {\n        self.commands\n            .send(cmd)\n            .await\n            .map_err(|e| Error::CommandEvalChannelError(e.to_string()))\n    }\n\n    /// Pollable (in select!) events' listener\n    pub async fn next_event(&mut self) -> Result<SubnetRuntimeProxyEvent, Error> {\n        let event = self.events.recv().await;\n        Ok(event.unwrap())\n    }\n\n    /// Shutdown subnet runtime proxy worker\n    pub async fn shutdown(&mut self) -> Result<(), Error> {\n        let runtime_proxy = self.runtime_proxy.lock().await;\n        runtime_proxy.shutdown().await\n    }\n\n    pub async fn get_checkpoints(&self) -> Result<Vec<TargetStreamPosition>, Error> {\n        let runtime_proxy = self.runtime_proxy.lock().await;\n        runtime_proxy.get_checkpoints().await\n    }\n\n    pub async fn get_subnet_id(\n        http_endpoint: &str,\n        contract_address: &str,\n    ) -> Result<SubnetId, Error> {\n        SubnetRuntimeProxy::get_subnet_id(http_endpoint, contract_address).await\n    }\n\n    pub async fn set_source_head_certificate_id(\n        &self,\n        source_head_certificate_id: Option<(CertificateId, u64)>,\n    ) -> Result<(), Error> {\n        let mut runtime_proxy = self.runtime_proxy.lock().await;\n        runtime_proxy\n            .set_source_head_certificate_id(source_head_certificate_id)\n            .await\n    }\n}\n\n/// From the user provided subnet node endpoint (could be ip:port, http://ip:port, https://ip:port)\n/// derive http and ws endpoints that will be used to communicate with the subnet\npub fn derive_endpoints(endpoint: &str) -> Result<(String, String), Error> {\n    let http_endpoint: String;\n    let ws_endpoint: String;\n\n    if endpoint.starts_with(\"https\") {\n        // Use https endpoint as it is\n        // Derive wss endpoint\n        http_endpoint = endpoint.to_string();\n        ws_endpoint = http_endpoint.replace(\"https\", \"wss\") + \"/ws\";\n    } else if endpoint.starts_with(\"http\") {\n        // Use http endpoint as it is\n        // Derive ws endpoint\n        http_endpoint = endpoint.to_string();\n        ws_endpoint = http_endpoint.replace(\"http\", \"ws\") + \"/ws\";\n    } else {\n        http_endpoint = format!(\"http://{}\", endpoint);\n        ws_endpoint = format!(\"ws://{}/ws\", endpoint);\n    }\n    Ok((http_endpoint, ws_endpoint))\n}\n\npub mod testing {\n    use super::*;\n\n    pub fn get_runtime(\n        runtime_proxy_worker: &SubnetRuntimeProxyWorker,\n    ) -> Arc<Mutex<SubnetRuntimeProxy>> {\n        runtime_proxy_worker.runtime_proxy.clone()\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    #[test]\n    fn test_derive_endpoints() {\n        use super::derive_endpoints;\n        let (http_endpoint, ws_endpoint) = derive_endpoints(\"10.10.10.13:321\").unwrap();\n        assert_eq!(\n            (http_endpoint.as_str(), ws_endpoint.as_str()),\n            (\"http://10.10.10.13:321\", \"ws://10.10.10.13:321/ws\")\n        );\n        let (http_endpoint, ws_endpoint) = derive_endpoints(\"http://www.example.com\").unwrap();\n        assert_eq!(\n            (http_endpoint.as_str(), ws_endpoint.as_str()),\n            (\"http://www.example.com\", \"ws://www.example.com/ws\")\n        );\n        let (http_endpoint, ws_endpoint) = derive_endpoints(\"https://www.example.com:123\").unwrap();\n        assert_eq!(\n            (http_endpoint.as_str(), ws_endpoint.as_str()),\n            (\n                \"https://www.example.com:123\",\n                \"wss://www.example.com:123/ws\"\n            )\n        );\n    }\n}\n"
  },
  {
    "path": "crates/topos-sequencer-subnet-runtime/src/proxy.rs",
    "content": "//! Protocol implementation guts.\n//!\nuse crate::{certification::Certification, Error, SubnetRuntimeProxyConfig};\nuse opentelemetry::trace::FutureExt;\nuse opentelemetry::Context;\nuse serde::{Deserialize, Serialize};\nuse std::fmt::{Debug, Formatter};\nuse std::sync::Arc;\nuse tokio::sync::Mutex;\nuse tokio::sync::{mpsc, oneshot};\nuse tokio::time::Duration;\nuse topos_core::api::grpc::checkpoints::TargetStreamPosition;\nuse topos_core::uci::{Certificate, CertificateId, SubnetId};\nuse topos_sequencer_subnet_client::{BlockInfo, SubnetClient, SubnetClientListener};\nuse tracing::{debug, error, field, info, info_span, instrument, warn, Instrument, Span};\nuse tracing_opentelemetry::OpenTelemetrySpanExt;\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct Authorities {\n    // TODO: proper dependencies to block type etc\n}\n\n#[derive(Debug, Clone)]\npub enum SubnetRuntimeProxyEvent {\n    /// New certificate is generated\n    NewCertificate {\n        cert: Box<Certificate>,\n        block_number: u64,\n        ctx: Context,\n    },\n    /// New set of authorities in charge of the threshold signature\n    NewEra(Vec<Authorities>),\n}\n\n#[derive(Debug)]\npub enum SubnetRuntimeProxyCommand {\n    /// Upon receiving a new delivered Certificate from the TCE\n    OnNewDeliveredCertificate {\n        certificate: Certificate,\n        position: u64,\n        ctx: Context,\n    },\n}\n\npub struct SubnetRuntimeProxy {\n    pub commands_channel: mpsc::Sender<SubnetRuntimeProxyCommand>,\n    pub events_subscribers: Vec<mpsc::Sender<SubnetRuntimeProxyEvent>>,\n    pub config: SubnetRuntimeProxyConfig,\n    pub certification: Arc<Mutex<Certification>>,\n    command_task_shutdown: mpsc::Sender<oneshot::Sender<()>>,\n    block_task_shutdown: mpsc::Sender<oneshot::Sender<()>>,\n    source_head_certificate_id_sender: Option<oneshot::Sender<Option<(CertificateId, u64)>>>,\n}\n\nimpl Debug for SubnetRuntimeProxy {\n    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {\n        f.debug_struct(\"RuntimeProxy instance\").finish()\n    }\n}\n\nimpl SubnetRuntimeProxy {\n    pub fn spawn_new(\n        config: SubnetRuntimeProxyConfig,\n        signing_key: Vec<u8>,\n    ) -> Result<Arc<Mutex<SubnetRuntimeProxy>>, crate::Error> {\n        info!(\n            \"Spawning new runtime proxy, http endpoint: {}, ws endpoint {} ethereum contract \\\n             address: {}, \",\n            &config.http_endpoint, &config.ws_endpoint, &config.subnet_contract_address\n        );\n        let (command_sender, mut command_rcv) = mpsc::channel::<SubnetRuntimeProxyCommand>(256);\n        let ws_runtime_endpoint = config.ws_endpoint.clone();\n        let http_runtime_endpoint = config.http_endpoint.clone();\n        let subnet_contract_address = Arc::new(config.subnet_contract_address.clone());\n        let (command_task_shutdown_channel, mut command_task_shutdown) =\n            mpsc::channel::<oneshot::Sender<()>>(1);\n        let (block_task_shutdown_channel, mut block_task_shutdown) =\n            mpsc::channel::<oneshot::Sender<()>>(1);\n        let (source_head_certificate_id_sender, source_head_certificate_id_received) =\n            oneshot::channel();\n\n        let certification = Certification::new(\n            &config.subnet_id,\n            None,\n            config.verifier,\n            signing_key.clone(),\n            config.start_block,\n        )?;\n\n        let runtime_proxy = Arc::new(Mutex::from(Self {\n            commands_channel: command_sender,\n            events_subscribers: Vec::new(),\n            config: config.clone(),\n            command_task_shutdown: command_task_shutdown_channel,\n            block_task_shutdown: block_task_shutdown_channel,\n            certification: certification.clone(),\n            source_head_certificate_id_sender: Some(source_head_certificate_id_sender),\n        }));\n\n        // Runtime block task\n        {\n            let runtime_proxy = runtime_proxy.clone();\n            let subnet_contract_address = subnet_contract_address.clone();\n            tokio::spawn(async move {\n                // If the `start_block` sequencer parameter is provided, first block retrieved from blockchain (for genesis certificate)\n                // will be `start_block`. `default_block_sync_start` is hence `start_block`-1\n                // as first block retrieved from subnet node is `latest_acquired_subnet_block_number` + 1\n                let default_block_sync_start: i128 = config\n                    .start_block\n                    .map(|block_number| (block_number - 1) as i128)\n                    .unwrap_or(-1);\n                let mut latest_acquired_subnet_block_number: i128 = default_block_sync_start;\n\n                {\n                    // To start producing certificates, we need to know latest delivered or pending certificate id from TCE\n                    // Lock certification component and wait until we acquire first certificate id for this network\n                    let mut certification = certification.lock().await;\n                    if certification.last_certificate_id.is_none() {\n                        info!(\n                            \"Waiting for the source head certificate id to continue with \\\n                             certificate generation\"\n                        );\n                        // Wait for last_certificate_id retrieved on TCE component setup\n                        match source_head_certificate_id_received.await {\n                            Ok(certificate_and_position) => {\n                                info!(\n                                    \"Source head certificate id received {:?}\",\n                                    certificate_and_position\n                                );\n                                // If tce source head position is provided, continue synchronizing from it\n                                // If the `start_block` sequencer parameter is provided and tce source head is missing,\n                                // we should start synchronizing from that block instead of genesis\n                                // If neither tce source head position nor start_block parameters are provided,\n                                // sync should start form -1, so that first fetched is subnet genesis block\n                                let cert_id = certificate_and_position.map(|(id, _position)| id);\n                                let position: i128 = certificate_and_position\n                                    .map(|(_id, position)| position as i128)\n                                    .unwrap_or(default_block_sync_start);\n                                // Certificate generation is now ready to run\n                                certification.last_certificate_id = cert_id;\n                                latest_acquired_subnet_block_number = position;\n                            }\n                            Err(e) => {\n                                // This panic should not happen unless other task retrieving source head certificate has failed\n                                // In that case, close the tread with panic\n                                panic!(\n                                    \"Failed to get source head certificate, unable to proceed \\\n                                     with certificate generation: {e}\"\n                                )\n                            }\n                        }\n                    }\n                }\n\n                // Establish the connection with the Subnet\n                let subnet_listener: Option<SubnetClientListener> = tokio::select! {\n                        // Create subnet client\n                        Ok(client) = topos_sequencer_subnet_client::connect_to_subnet_listener_with_retry(\n                            ws_runtime_endpoint.as_str(),\n                            subnet_contract_address.as_str(),\n                        ) => {\n                            Some(client)\n                        }\n                        _ = block_task_shutdown.recv() => {\n                            None\n                    }\n                };\n                let mut subnet_listener = subnet_listener.expect(\"subnet listener\");\n\n                // Sync missing blocks\n                loop {\n                    let current_subnet_block_number: Option<i128> = tokio::select! {\n                            block_number = subnet_listener.get_subnet_block_number() => {\n                                match block_number {\n                                    Ok(block_number) => {\n                                        Some(block_number as i128)\n                                    }\n                                    Err(e) => {\n                                        error!(\"Failed to get subnet block number: {:?}, trying again...\", e);\n                                        tokio::time::sleep(Duration::from_secs(10)).await;\n                                        continue;\n                                    }\n                                }\n                            }\n                            _ = block_task_shutdown.recv() => {\n                                info!(\"Shutting down sync missing blocks task\");\n                                return;\n                            }\n                    };\n                    let current_subnet_block_number = current_subnet_block_number\n                        .expect(\"need valid subnet block number to start syncing\");\n\n                    if latest_acquired_subnet_block_number == current_subnet_block_number {\n                        info!(\n                            \"Finished synchronization of blocks, latest block received is {}\",\n                            latest_acquired_subnet_block_number\n                        );\n                        break;\n                    }\n\n                    info!(\n                        \"Latest retrieved subnet block is {}, current subnet block is {}\",\n                        latest_acquired_subnet_block_number, current_subnet_block_number\n                    );\n                    // Sync historical blocks\n                    while latest_acquired_subnet_block_number < current_subnet_block_number {\n                        let next_block_number = latest_acquired_subnet_block_number + 1;\n                        info!(\"Retrieving historical block {}\", next_block_number);\n                        tokio::select! {\n                            result = SubnetRuntimeProxy::retrieve_and_process_block(\n                                runtime_proxy.clone(),\n                                &mut subnet_listener,\n                                certification.clone(),\n                                next_block_number as u64,\n                            ) => {\n                                if let Err(e) = result {\n                                    error!(\"Unable to perform initial subnet block sync: {e}, trying again...\");\n                                    tokio::time::sleep(Duration::from_secs(10)).await;\n                                    continue;\n                                } else {\n                                    latest_acquired_subnet_block_number = next_block_number;\n                                }\n                            }\n                            _ = block_task_shutdown.recv() => {\n                                info!(\"Shutting down sync missing blocks task during synchronization\");\n                                return;\n                            }\n                        }\n\n                        // Give it a little rest for other threads to do their job\n                        tokio::time::sleep(Duration::from_millis(20)).await;\n                    }\n                }\n\n                // Create a new subscription stream to listen for new blocks from subnet node\n                let mut subscription_stream =\n                    match subnet_listener.new_block_subscription_stream().await {\n                        Ok(stream) => stream,\n                        Err(e) => {\n                            panic!(\n                                \"Failed to open subnet node block subscription stream, unable to \\\n                                 proceed with certificate generation: {e}\"\n                            )\n                        }\n                    };\n\n                info!(\"Block subscription stream opened, listening for new blocks...\");\n\n                // Go to standard mode of listening for new blocks\n                let shutdowned: Option<oneshot::Sender<()>> = loop {\n                    tokio::select! {\n                        result = subnet_listener.wait_for_new_block(&mut subscription_stream) => {\n                            match result {\n                                Ok(block) => {\n                                    let new_block_number = block.number as i128;\n                                    info!(\"Successfully received new block {} from the subnet subscription\", new_block_number);\n                                    if let Err(e) = SubnetRuntimeProxy::process_block(\n                                        runtime_proxy.clone(),\n                                        certification.clone(),\n                                        block\n                                    ).await {\n                                        error!(\"Failed to process next block: {}, exit block production!\", e);\n                                        break None;\n                                    }\n                                }\n                                Err(e) => {\n                                    error!(\"Failed to retrieve next block: {}, trying again soon\", e);\n                                    tokio::time::sleep(Duration::from_millis(1000)).await;\n                                    continue;\n                                }\n                            }\n                        }\n                        shutdown = block_task_shutdown.recv() => {\n                            break shutdown;\n                        }\n                    }\n                };\n\n                if let Some(sender) = shutdowned {\n                    info!(\"Shutting down subnet runtime block processing task\");\n                    _ = sender.send(());\n                } else {\n                    warn!(\"Shutting down subnet runtime block processing task due to error\");\n                }\n            })\n        };\n\n        // Runtime command task\n        tokio::spawn(async move {\n            // Establish the connection with the Subnet\n            let mut subnet_client: Option<SubnetClient> = tokio::select! {\n                    // Create subnet client\n                    Ok(client) = topos_sequencer_subnet_client::connect_to_subnet_with_retry(\n                        http_runtime_endpoint.as_ref(),\n                        Some(signing_key.clone()),\n                        subnet_contract_address.as_str(),\n                    ) => {\n                        info!(\"Connected to subnet node {}\", &http_runtime_endpoint);\n                        Some(client)\n                    }\n                    _ = command_task_shutdown.recv() => {\n                        None\n                    }\n            };\n\n            let shutdowned: Option<oneshot::Sender<()>> = loop {\n                tokio::select! {\n                    // Poll runtime proxy commands channel\n                    cmd = command_rcv.recv() => {\n                        Self::on_command(&config, subnet_client.as_mut().unwrap(), cmd).await;\n                    },\n                    shutdown = command_task_shutdown.recv() => {\n                        break shutdown;\n                    }\n                }\n            };\n\n            if let Some(sender) = shutdowned {\n                info!(\"Shutting down subnet runtime command processing task\");\n                _ = sender.send(());\n            } else {\n                warn!(\"Shutting down subnet runtime command processing task due to error\");\n            }\n        });\n\n        Ok(runtime_proxy)\n    }\n\n    async fn retrieve_and_process_block(\n        subnet_runtime_proxy: Arc<Mutex<SubnetRuntimeProxy>>,\n        subnet_listener: &mut SubnetClientListener,\n        certification: Arc<Mutex<Certification>>,\n        next_block: u64,\n    ) -> Result<(), Error> {\n        match subnet_listener.get_finalized_block(next_block).await {\n            Ok(block_info) => {\n                let block_number = block_info.number;\n                info!(\n                    \"Successfully fetched the finalized block {block_number} from the subnet \\\n                     runtime\"\n                );\n\n                let mut certification = certification.lock().await;\n\n                // Update certificate block history\n                certification.append_blocks(vec![block_info]);\n\n                let new_certificates = match certification.generate_certificates().await {\n                    Ok(certificates) => certificates,\n                    Err(e) => {\n                        error!(\"Unable to generate certificates: {e}\");\n                        return Err(e);\n                    }\n                };\n\n                debug!(\"Generated new certificates {new_certificates:?}\");\n\n                for cert in new_certificates {\n                    Self::send_new_certificate(subnet_runtime_proxy.clone(), cert, next_block).await\n                }\n                info!(\"Block {} processed\", next_block);\n                Ok(())\n            }\n            Err(topos_sequencer_subnet_client::Error::BlockNotAvailable(block_number)) => {\n                warn!(\"New block {block_number} not yet available, trying again soon\");\n                Err(Error::SubnetError {\n                    source: topos_sequencer_subnet_client::Error::BlockNotAvailable(block_number),\n                })\n            }\n            Err(e) => {\n                // TODO: Determine if task should end on some type of error\n                error!(\"Failed to fetch the new finalized block: {e}\");\n                Err(Error::SubnetError { source: e })\n            }\n        }\n    }\n\n    async fn process_block(\n        subnet_runtime_proxy: Arc<Mutex<SubnetRuntimeProxy>>,\n        certification: Arc<Mutex<Certification>>,\n        block_info: BlockInfo,\n    ) -> Result<(), Error> {\n        let mut certification = certification.lock().await;\n        let block_number = block_info.number;\n\n        // Update certificate block history\n        certification.append_blocks(vec![block_info]);\n\n        let new_certificates = certification.generate_certificates().await?;\n\n        debug!(\"Generated new certificates {new_certificates:?}\");\n\n        for cert in new_certificates {\n            Self::send_new_certificate(subnet_runtime_proxy.clone(), cert, block_number).await\n        }\n        info!(\"Block {} processed\", block_number);\n        Ok(())\n    }\n\n    /// Dispatch newly generated certificate to TCE client\n    #[instrument(name = \"NewCertificate\", fields(certification = field::Empty, source_subnet_id = field::Empty, certificate_id = field::Empty))]\n    async fn send_new_certificate(\n        subnet_runtime_proxy: Arc<Mutex<SubnetRuntimeProxy>>,\n        cert: Certificate,\n        block_number: u64,\n    ) {\n        let mut runtime_proxy = subnet_runtime_proxy.lock().await;\n        Span::current().record(\"certificate_id\", cert.id.to_string());\n        Span::current().record(\"source_subnet_id\", cert.source_subnet_id.to_string());\n\n        runtime_proxy\n            .send_out_event(SubnetRuntimeProxyEvent::NewCertificate {\n                cert: Box::new(cert),\n                block_number,\n                ctx: Span::current().context(),\n            })\n            .with_current_context()\n            .instrument(Span::current())\n            .await;\n    }\n\n    /// Send certificate to target subnet Topos Core contract for verification\n    async fn push_certificate(\n        runtime_proxy_config: &SubnetRuntimeProxyConfig,\n        subnet_client: &SubnetClient,\n        cert: &Certificate,\n        position: u64,\n    ) -> Result<Option<String>, Error> {\n        debug!(\n            \"Pushing certificate with id {} to target subnet {}, tcc {}\",\n            cert.id, runtime_proxy_config.subnet_id, runtime_proxy_config.subnet_contract_address,\n        );\n        let receipt = subnet_client.push_certificate(cert, position).await?;\n        debug!(\"Push certificate transaction receipt: {:?}\", &receipt);\n        let tx_hash =\n            receipt.map(|tx_receipt| \"0x\".to_string() + &hex::encode(tx_receipt.transaction_hash));\n        Ok(tx_hash)\n    }\n\n    async fn on_command(\n        runtime_proxy_config: &SubnetRuntimeProxyConfig,\n        subnet_client: &SubnetClient,\n        mb_cmd: Option<SubnetRuntimeProxyCommand>,\n    ) {\n        match mb_cmd {\n            Some(cmd) => match cmd {\n                // Process certificate retrieved from TCE node\n                SubnetRuntimeProxyCommand::OnNewDeliveredCertificate {\n                    certificate,\n                    position,\n                    ctx,\n                } => {\n                    let span_subnet_runtime_proxy = info_span!(\"Subnet Runtime Proxy\");\n                    span_subnet_runtime_proxy.set_parent(ctx);\n\n                    async {\n                        info!(\n                            \"Processing certificate received from TCE, cert_id={}\",\n                            &certificate.id\n                        );\n\n                        // Verify certificate signature\n                        // Well known subnet id is public key for certificate verification\n                        // Public key of secp256k1 is 33 bytes, we are keeping last 32 bytes as subnet id\n                        // Add manually first byte 0x02\n                        let public_key = certificate.source_subnet_id.to_secp256k1_public_key();\n\n                        // Verify signature of the certificate\n                        match topos_crypto::signatures::verify(\n                            &public_key,\n                            certificate.get_payload().as_slice(),\n                            certificate.signature.as_slice(),\n                        ) {\n                            Ok(()) => {\n                                info!(\"Certificate {} passed verification\", certificate.id)\n                            }\n                            Err(e) => {\n                                error!(\"Failed to verify certificate id {}: {e}\", certificate.id);\n                                return;\n                            }\n                        }\n\n                        let span_push_certificate = info_span!(\"Subnet push certificate call\");\n\n                        // Push the Certificate to the ToposCore contract on the target subnet\n                        match SubnetRuntimeProxy::push_certificate(\n                            runtime_proxy_config,\n                            subnet_client,\n                            &certificate,\n                            position,\n                        )\n                        .with_context(span_push_certificate.context())\n                        .instrument(span_push_certificate)\n                        .await\n                        {\n                            Ok(tx_hash) => {\n                                debug!(\n                                    \"Successfully pushed the Certificate {} to target subnet with \\\n                                     tx hash {:?}\",\n                                    &certificate.id, &tx_hash\n                                );\n                            }\n                            Err(e) => {\n                                error!(\n                                    \"Failed to push the Certificate {} to target subnet: {e}\",\n                                    &certificate.id\n                                );\n                            }\n                        }\n                    }\n                    .with_context(span_subnet_runtime_proxy.context())\n                    .instrument(span_subnet_runtime_proxy)\n                    .await\n                }\n            },\n            _ => {\n                warn!(\"Empty command was passed\");\n            }\n        }\n    }\n\n    async fn send_out_event(&mut self, evt: SubnetRuntimeProxyEvent) {\n        for tx in &self.events_subscribers {\n            if let Err(e) = tx.send(evt.clone()).await {\n                error!(\"Unable to send subnet runtime proxy event: {e}\");\n            }\n        }\n    }\n\n    /// Shutdown subnet runtime proxy tasks\n    pub async fn shutdown(&self) -> Result<(), Error> {\n        let (command_task_sender, command_task_receiver) = oneshot::channel();\n        self.command_task_shutdown\n            .send(command_task_sender)\n            .await\n            .map_err(Error::ShutdownCommunication)?;\n        command_task_receiver\n            .await\n            .map_err(Error::ShutdownSignalReceiveError)?;\n\n        let (block_task_sender, block_task_receiver) = oneshot::channel();\n        self.block_task_shutdown\n            .send(block_task_sender)\n            .await\n            .map_err(Error::ShutdownCommunication)?;\n        block_task_receiver\n            .await\n            .map_err(Error::ShutdownSignalReceiveError)?;\n        Ok(())\n    }\n\n    pub async fn set_source_head_certificate_id(\n        &mut self,\n        source_head_certificate_id: Option<(CertificateId, u64)>,\n    ) -> Result<(), Error> {\n        self.source_head_certificate_id_sender\n            .take()\n            .ok_or_else(|| {\n                Error::SourceHeadCertChannelError(\n                    \"source head certificate id was previously set\".to_string(),\n                )\n            })?\n            .send(source_head_certificate_id)\n            .map_err(|_| Error::SourceHeadCertChannelError(\"channel error\".to_string()))\n    }\n\n    pub async fn get_checkpoints(&self) -> Result<Vec<TargetStreamPosition>, Error> {\n        info!(\"Connecting to subnet to query for checkpoints...\");\n        let http_runtime_endpoint = self.config.http_endpoint.as_ref();\n        // Create subnet client\n        let subnet_client = match topos_sequencer_subnet_client::connect_to_subnet_with_retry(\n            http_runtime_endpoint,\n            None, // We do not need actual key here as we are just reading state\n            self.config.subnet_contract_address.as_str(),\n        )\n        .await\n        {\n            Ok(subnet_client) => {\n                info!(\n                    \"Connected to subnet node to acquire checkpoints {}\",\n                    http_runtime_endpoint\n                );\n                subnet_client\n            }\n            Err(e) => {\n                error!(\"Unable to connect to the subnet node to get checkpoints: {e}\");\n                return Err(Error::SubnetError { source: e });\n            }\n        };\n\n        match subnet_client.get_checkpoints(&self.config.subnet_id).await {\n            Ok(checkpoints) => {\n                info!(\"Successfully retrieved the Checkpoints\");\n                Ok(checkpoints)\n            }\n            Err(e) => {\n                error!(\n                    \"Unable to get the checkpoints for subnet {}\",\n                    self.config.subnet_id\n                );\n                Err(Error::SubnetError { source: e })\n            }\n        }\n    }\n\n    /// Get the particular subnet id (identifying subnet in the topos protocol)\n    /// from the subnet node smart contract\n    pub async fn get_subnet_id(\n        http_endpoint: &str,\n        contract_address: &str,\n    ) -> Result<SubnetId, Error> {\n        info!(\"Connecting to subnet to query for subnet id...\");\n        // Create subnet client\n        let subnet_client = match topos_sequencer_subnet_client::connect_to_subnet_with_retry(\n            http_endpoint,\n            None, // We do not need actual key here as we are just reading state\n            contract_address,\n        )\n        .await\n        {\n            Ok(subnet_client) => {\n                info!(\n                    \"Connected to subnet node to acquire subnet id {}\",\n                    http_endpoint\n                );\n                subnet_client\n            }\n            Err(e) => {\n                error!(\"Unable to connect to the subnet node to get subnet id: {e}\");\n                return Err(Error::SubnetError { source: e });\n            }\n        };\n\n        match subnet_client.get_subnet_id().await {\n            Ok(subnet_id) => {\n                info!(\"Successfully retrieved the subnet id for subnet: {subnet_id}\");\n                Ok(subnet_id)\n            }\n            Err(e) => {\n                error!(\"Unable to get the subnet id {e}\",);\n                Err(Error::SubnetError { source: e })\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "crates/topos-sequencer-subnet-runtime/tests/common/abi.rs",
    "content": "use ethers::{\n    contract::abigen,\n    core::k256::ecdsa::SigningKey,\n    middleware::SignerMiddleware,\n    prelude::Wallet,\n    providers::{Http, Provider},\n};\n\n//TODO I haven't find a way to parametrize version, macro accepts strictly string literal\n// `topos-smart-contracts` build artifacts directory must be copied to the root topos directory to run these tests\nabigen!(\n    TokenDeployerContract,\n    \"./artifacts/contracts/topos-core/TokenDeployer.sol/TokenDeployer.json\"\n);\nabigen!(\n    ToposCoreContract,\n    \"./artifacts/contracts/topos-core/ToposCore.sol/ToposCore.json\"\n);\nabigen!(\n    ToposCoreProxyContract,\n    \"./artifacts/contracts/topos-core/ToposCoreProxy.sol/ToposCoreProxy.json\"\n);\nabigen!(\n    ToposMessagingContract,\n    \"./artifacts/contracts/topos-core/ToposMessaging.sol/ToposMessaging.json\"\n);\nabigen!(\n    ERC20MessagingContract,\n    \"./artifacts/contracts/examples/ERC20Messaging.sol/ERC20Messaging.json\"\n);\nabigen!(\n    IToposCore,\n    \"./artifacts/contracts/interfaces/IToposCore.sol/IToposCore.json\"\n);\nabigen!(\n    IToposMessaging,\n    \"./artifacts/contracts/interfaces/IToposMessaging.sol/IToposMessaging.json\"\n);\nabigen!(\n    IERC20Messaging,\n    \"./artifacts/contracts/interfaces/IERC20Messaging.sol/IERC20Messaging.json\"\n);\n\nabigen!(\n    IERC20,\n    r\"[\n       function totalSupply() external view returns (uint)\n\n       function balanceOf(address account) external view returns (uint)\n\n       function transfer(address recipient, uint amount) external returns (bool)\n\n       function allowance(address owner, address spender) external view returns (uint)\n\n       function approve(address spender, uint amount) external returns (bool)\n\n       function transferFrom(address sender, address recipient, uint amount) external returns (bool)\n       ]\"\n);\n\npub type IToposCoreClient = IToposCore<SignerMiddleware<Provider<Http>, Wallet<SigningKey>>>;\npub type IToposMessagingClient =\n    IToposMessaging<SignerMiddleware<Provider<Http>, Wallet<SigningKey>>>;\npub type IERC20Client = IERC20<SignerMiddleware<Provider<Http>, Wallet<SigningKey>>>;\npub type IERC20MessagingClient =\n    IERC20Messaging<SignerMiddleware<Provider<Http>, Wallet<SigningKey>>>;\n"
  },
  {
    "path": "crates/topos-sequencer-subnet-runtime/tests/common/mod.rs",
    "content": "pub(crate) mod abi;\npub mod subnet_test_data;\n"
  },
  {
    "path": "crates/topos-sequencer-subnet-runtime/tests/common/subnet_test_data.rs",
    "content": "use std::io::Write;\n\npub const TEST_VALIDATOR_KEY_FILE_DATA: &str =\n    r#\"11eddfae7abe45531b3f18342c8062969323a7131d3043f1a33c40df74803cc7\"#;\n\n#[allow(dead_code)]\npub fn generate_test_subnet_data_dir() -> Result<std::path::PathBuf, Box<dyn std::error::Error>> {\n    const TEST_VALIDATOR_SUBNET_DATA_DIR: &str = \"/test_data\";\n    const TEST_VALIDATOR_KEY_FILE_NAME: &str = \"/consensus/validator.key\";\n    let tmp = std::env::temp_dir().to_str().unwrap().to_string();\n    let subnet_data_dir = std::path::PathBuf::from(tmp.clone() + TEST_VALIDATOR_SUBNET_DATA_DIR);\n    let consensus_dir =\n        std::path::PathBuf::from(tmp.clone() + TEST_VALIDATOR_SUBNET_DATA_DIR + \"/consensus\");\n    let keystore_file_path = std::path::PathBuf::from(\n        tmp + TEST_VALIDATOR_SUBNET_DATA_DIR + TEST_VALIDATOR_KEY_FILE_NAME,\n    );\n    std::fs::create_dir_all(consensus_dir)?;\n    let mut keystore_file = std::fs::File::create(keystore_file_path)?;\n    writeln!(&mut keystore_file, \"{TEST_VALIDATOR_KEY_FILE_DATA}\",)?;\n    Ok(subnet_data_dir)\n}\n\npub fn generate_test_private_key() -> Vec<u8> {\n    hex::decode(TEST_VALIDATOR_KEY_FILE_DATA).unwrap()\n}\n"
  },
  {
    "path": "crates/topos-sequencer-subnet-runtime/tests/subnet_contract.rs",
    "content": "#![allow(unknown_lints)]\nuse crate::common::abi;\nuse ethers::{\n    abi::{ethabi::ethereum_types::U256, Address},\n    core::types::Filter,\n    middleware::SignerMiddleware,\n    providers::{Http, Middleware, Provider},\n    signers::{LocalWallet, Signer},\n    types::{Block, H256},\n};\nuse rstest::*;\nuse serial_test::serial;\nuse std::collections::HashSet;\nuse std::process::{Child, Command};\nuse std::sync::Arc;\nuse test_log::test;\nuse tokio::sync::Mutex;\nuse topos_core::uci::{Certificate, CertificateId, SubnetId, SUBNET_ID_LENGTH};\nuse topos_sequencer_subnet_runtime::proxy::{SubnetRuntimeProxyCommand, SubnetRuntimeProxyEvent};\nuse tracing::{error, info, warn, Span};\nuse tracing_opentelemetry::OpenTelemetrySpanExt;\n\nmod common;\nuse crate::common::subnet_test_data::generate_test_private_key;\nuse topos_core::api::grpc::checkpoints::TargetStreamPosition;\nuse topos_sequencer_subnet_runtime::{SubnetRuntimeProxyConfig, SubnetRuntimeProxyWorker};\n\nuse topos_test_sdk::constants::*;\n\n// Local test network with default 2 seconds block\nconst STANDALONE_SUBNET_BLOCK_TIME: u64 = 2;\n// Local test network with 12 seconds block, usefull for multiple transactions in one block tests\nconst STANDALONE_SUBNET_WITH_LONG_BLOCKS_BLOCK_TIME: u64 = 12;\n\nconst SUBNET_RPC_PORT: u32 = 8545;\n// Account 0x4AAb25B4fAd0Beaac466050f3A7142A502f4Cf0a\nconst TEST_SECRET_ETHEREUM_KEY: &str =\n    \"ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80\";\nconst TEST_ETHEREUM_ACCOUNT: &str = \"0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266\";\nconst TEST_SUBNET_ID: &str = \"6464646464646464646464646464646464646464646464646464646464646464\";\nconst TOKEN_SYMBOL: &str = \"TKX\";\n\n// Accounts pre-filled in STANDALONE_SUBNET_WITH_LONG_BLOCKS\nconst TEST_ACCOUNT_ALITH_KEY: &str =\n    \"59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d\";\nconst TEST_ACCOUNT_ALITH_ACCOUNT: &str = \"0x70997970C51812dc3A010C7d01b50e0d17dc79C8\";\nconst TEST_ACCOUNT_BALATHAR_KEY: &str =\n    \"5de4111afa1a4b94908f83103eb1f1706367c2e68ca870fc3fb9a804cdab365a\";\nconst TEST_ACCOUNT_BALATHAR_ACCOUNT: &str = \"0x3C44CdDdB6a900fa2b585dd299e03d12FA4293BC\";\nconst TEST_ACCOUNT_CEZAR_KEY: &str =\n    \"7c852118294e51e653712a81e05800f419141751be58f605c371e15141b007a6\";\nconst TEST_ACCOUNT_CEZAR_ACCOUNT: &str = \"0x90F79bf6EB2c4f870365E785982E1f101E93b906\";\n\nconst PREV_CERTIFICATE_ID_1: CertificateId = CERTIFICATE_ID_4;\nconst PREV_CERTIFICATE_ID_2: CertificateId = CERTIFICATE_ID_5;\nconst CERTIFICATE_ID_1: CertificateId = CERTIFICATE_ID_6;\nconst CERTIFICATE_ID_2: CertificateId = CERTIFICATE_ID_7;\nconst CERTIFICATE_ID_3: CertificateId = CERTIFICATE_ID_8;\nconst DEFAULT_GAS: u64 = 5_000_000;\n\nfn spawn_subnet_node(\n    port: u32,\n    block_time: u64, // Block time in seconds\n) -> std::io::Result<Child> {\n    // Ignore output, too verbose\n    let child = Command::new(\"anvil\")\n        .args([\n            \"--block-time\",\n            &block_time.to_string(),\n            \"--port\",\n            &port.to_string(),\n        ])\n        .stdout(std::process::Stdio::null())\n        .spawn();\n\n    child\n}\n\n#[allow(dead_code)]\nstruct Context {\n    pub i_topos_core: abi::IToposCoreClient,\n    pub i_topos_messaging: abi::IToposMessagingClient,\n    pub i_erc20_messaging: abi::IERC20MessagingClient,\n    pub subnet_node_handle: Option<std::process::Child>,\n    pub port: u32,\n}\n\nimpl Context {\n    pub async fn shutdown(mut self) -> Result<(), Box<dyn std::error::Error>> {\n        // Wait for the subnet node to close\n        self.subnet_node_handle\n            .take()\n            .expect(\"Valid subnet node handle\")\n            .kill()\n            .expect(\"Could not kill anvil subprocess\");\n        Ok(())\n    }\n\n    pub fn jsonrpc(&self) -> String {\n        format!(\"http://127.0.0.1:{}\", self.port)\n    }\n\n    pub fn jsonrpc_ws(&self) -> String {\n        format!(\"ws://127.0.0.1:{}\", self.port)\n    }\n}\n\nimpl Drop for Context {\n    fn drop(&mut self) {\n        if let Some(mut child) = self.subnet_node_handle.take() {\n            child.kill().expect(\"Could not kill anvil subprocess\");\n        }\n    }\n}\n\nasync fn create_new_erc20msg_client(\n    deploy_key: &str,\n    endpoint: &str,\n    erc20_messaging_contract_address: Address,\n) -> Result<abi::IERC20MessagingClient, Box<dyn std::error::Error>> {\n    let wallet: LocalWallet = deploy_key.parse()?;\n    let http_provider =\n        Provider::<Http>::try_from(endpoint)?.interval(std::time::Duration::from_millis(20u64));\n    let chain_id = http_provider.get_chainid().await?;\n    let client = Arc::new(SignerMiddleware::new(\n        http_provider,\n        wallet.with_chain_id(chain_id.as_u64()),\n    ));\n    Ok(abi::IERC20Messaging::new(\n        erc20_messaging_contract_address,\n        client,\n    ))\n}\n\nasync fn create_new_erc20_client(\n    deploy_key: &str,\n    endpoint: &str,\n    erc20_contract_address: Address,\n) -> Result<abi::IERC20Client, Box<dyn std::error::Error>> {\n    let wallet: LocalWallet = deploy_key.parse()?;\n    let http_provider =\n        Provider::<Http>::try_from(endpoint)?.interval(std::time::Duration::from_millis(20u64));\n    let chain_id = http_provider.get_chainid().await?;\n    let client = Arc::new(SignerMiddleware::new(\n        http_provider,\n        wallet.with_chain_id(chain_id.as_u64()),\n    ));\n    let i_erc20 = abi::IERC20::new(erc20_contract_address, client);\n    Ok(i_erc20)\n}\n\nasync fn deploy_contracts(\n    deploy_key: &str,\n    endpoint: &str,\n) -> Result<\n    (\n        abi::IToposCoreClient,\n        abi::IToposMessagingClient,\n        abi::IERC20MessagingClient,\n    ),\n    Box<dyn std::error::Error>,\n> {\n    use ethers::abi::Token;\n\n    let wallet: LocalWallet = deploy_key.parse()?;\n    let http_provider =\n        Provider::<Http>::try_from(endpoint)?.interval(std::time::Duration::from_millis(20u64));\n    let chain_id = http_provider.get_chainid().await?;\n    let wallet_account = wallet.address();\n    let client = Arc::new(SignerMiddleware::new(\n        http_provider,\n        wallet.with_chain_id(chain_id.as_u64()),\n    ));\n\n    // Deploying contracts\n    info!(\"Deploying TokenDeployer contract...\");\n    let token_deployer_contract = abi::TokenDeployerContract::deploy(client.clone(), ())?\n        .gas(DEFAULT_GAS)\n        .chain_id(chain_id.as_u64())\n        .legacy()\n        .send()\n        .await?;\n    info!(\n        \"TokenDeployer contract deployed to 0x{:x}\",\n        token_deployer_contract.address()\n    );\n\n    info!(\"Deploying ToposCore contract...\");\n    let topos_core_contract = abi::ToposCoreContract::deploy(client.clone(), ())?\n        .gas(DEFAULT_GAS)\n        .chain_id(chain_id.as_u64())\n        .legacy()\n        .send()\n        .await?;\n    info!(\n        \"ToposCore contract deployed to 0x{:x}\",\n        topos_core_contract.address()\n    );\n\n    let topos_core_contact_address: Token = Token::Address(topos_core_contract.address());\n    let admin_account = vec![wallet_account];\n    let new_admin_threshold = U256::from(1);\n\n    info!(\"Deploying ToposCoreProxy contract...\");\n    let topos_core_proxy_contract =\n        abi::ToposCoreProxyContract::deploy(client.clone(), topos_core_contact_address)?\n            .gas(DEFAULT_GAS)\n            .chain_id(chain_id.as_u64())\n            .legacy()\n            .send()\n            .await?;\n    info!(\n        \"ToposCoreProxy contract deployed to 0x{:x}\",\n        topos_core_proxy_contract.address()\n    );\n    let i_topos_core = abi::IToposCore::new(topos_core_proxy_contract.address(), client.clone());\n\n    if let Err(e) = i_topos_core\n        .initialize(admin_account, new_admin_threshold)\n        .legacy()\n        .gas(DEFAULT_GAS)\n        .send()\n        .await\n        .map_err(|e| {\n            error!(\"Unable to initalize topos core contract: {e}\");\n            e\n        })?\n        .await\n    {\n        panic!(\"Error setting network subnet id: {e}\");\n    }\n\n    info!(\"Deploying ERC20Messaging contract...\");\n    let erc20_messaging_contract = abi::ERC20MessagingContract::deploy(\n        client.clone(),\n        (\n            token_deployer_contract.address(),\n            topos_core_proxy_contract.address(),\n        ),\n    )?\n    .gas(DEFAULT_GAS)\n    .chain_id(chain_id.as_u64())\n    .legacy()\n    .send()\n    .await?;\n    info!(\n        \"ERC20 contract deployed to 0x{:x}\",\n        erc20_messaging_contract.address()\n    );\n\n    let i_topos_messaging =\n        abi::IToposMessaging::new(erc20_messaging_contract.address(), client.clone());\n    let i_erc20_messaging = abi::IERC20Messaging::new(erc20_messaging_contract.address(), client);\n\n    // Set network subnet id\n    info!(\n        \"Updating new contract subnet network id to {}\",\n        SOURCE_SUBNET_ID_1.to_string()\n    );\n\n    if let Err(e) = i_topos_core\n        .set_network_subnet_id(SOURCE_SUBNET_ID_1.as_array().to_owned())\n        .legacy()\n        .gas(DEFAULT_GAS)\n        .send()\n        .await\n        .map_err(|e| {\n            error!(\"Unable to set network id: {e}\");\n            e\n        })?\n        .await\n    {\n        panic!(\"Error setting network subnet id: {e}\");\n    }\n\n    match i_topos_core.network_subnet_id().await {\n        Ok(subnet_id) => {\n            info!(\"Network subnet id {:?} successfully set\", subnet_id);\n        }\n        Err(e) => {\n            error!(\"Error retrieving subnet id: {e}\");\n        }\n    }\n\n    Ok((i_topos_core, i_topos_messaging, i_erc20_messaging))\n}\n\nasync fn deploy_test_token(\n    deploy_key: &str,\n    endpoint: &str,\n    topos_messaging_address: Address,\n) -> Result<abi::IERC20Client, Box<dyn std::error::Error>> {\n    use ethers::abi::Token;\n\n    let wallet: LocalWallet = deploy_key.parse()?;\n    let http_provider =\n        Provider::<Http>::try_from(endpoint)?.interval(std::time::Duration::from_millis(20u64));\n    let chain_id = http_provider.get_chainid().await?;\n    let client = Arc::new(SignerMiddleware::new(\n        http_provider,\n        wallet.with_chain_id(chain_id.as_u64()),\n    ));\n\n    let ierc20_messaging = abi::IERC20Messaging::new(topos_messaging_address, client.clone());\n\n    // Deploy token\n    let token_name: Token = Token::String(\"Test Token\".to_string());\n    let token_symbol: Token = Token::String(TOKEN_SYMBOL.to_string());\n    let token_mint_cap: Token = Token::Uint(U256::from(100_000_000));\n    let token_daily_mint_limit: Token = Token::Uint(U256::from(100));\n    let token_initial_supply: Token = Token::Uint(U256::from(10_000_000));\n    let token_encoded_params: ethers::types::Bytes = ethers::abi::encode(&[\n        token_name.clone(),\n        token_symbol.clone(),\n        token_mint_cap,\n        token_daily_mint_limit,\n        token_initial_supply,\n    ])\n    .into();\n    info!(\n        \"Deploying new token {} with symbol {}\",\n        token_name, token_symbol\n    );\n    let deploy_outcome = ierc20_messaging\n        .deploy_token(token_encoded_params)\n        .legacy()\n        .gas(DEFAULT_GAS)\n        .send()\n        .await\n        .map_err(|e| {\n            error!(\"Unable deploy token: {e}\");\n            e\n        })?\n        .await;\n\n    match deploy_outcome {\n        Ok(r) => {\n            info!(\"Token deployed: {:?}\", r);\n        }\n        Err(e) => {\n            panic!(\"Error deploying token: {e}\");\n        }\n    }\n\n    let events = ierc20_messaging\n        .event::<abi::ierc20_messaging::TokenDeployedFilter>()\n        .from_block(0);\n    let events = events.query().await?;\n    if events.is_empty() {\n        panic!(\n            \"Missing TokenDeployed event. Token contract is not deployed to test subnet. Could \\\n             not execute test\"\n        );\n    }\n    let token_address = events[0].token_address;\n    info!(\"Token contract deployed to {}\", token_address.to_string());\n\n    let i_erc20 = abi::IERC20Client::new(token_address, client);\n\n    Ok(i_erc20)\n}\n\nasync fn check_received_certificate(\n    mut runtime_proxy_worker: SubnetRuntimeProxyWorker,\n    received_certificates: Arc<Mutex<Vec<(u64, Certificate)>>>,\n    expected_block_numbers: Vec<u64>,\n    expected_blocks: Vec<Block<H256>>,\n) -> Result<(), Box<dyn std::error::Error>> {\n    let start_height = *expected_block_numbers.first().unwrap();\n    let end_height = *expected_block_numbers.last().unwrap();\n    while let Ok(event) = runtime_proxy_worker.next_event().await {\n        if let SubnetRuntimeProxyEvent::NewCertificate {\n            cert,\n            block_number,\n            ctx: _,\n        } = event\n        {\n            info!(\n                \"New certificate event received, block number: {} cert id: {} target subnets: \\\n                 {:?} state root {}\",\n                block_number,\n                cert.id,\n                cert.target_subnets,\n                hex::encode(cert.state_root)\n            );\n            let mut received_certificates = received_certificates.lock().await;\n            received_certificates.push((block_number, *cert));\n\n            if received_certificates\n                .iter()\n                .take(end_height as usize + 1)\n                .map(|(height, _cert)| height)\n                .copied()\n                .collect::<Vec<_>>()\n                == expected_block_numbers\n            {\n                info!(\n                    \"Received all certificates for blocks from {} to {}\",\n                    start_height, end_height\n                );\n                // Check if state root matches for all blocks\n                for expected_height in expected_block_numbers {\n                    let index = (expected_height - start_height) as usize;\n                    let received_certificate = &received_certificates[index].1;\n                    if expected_blocks[index].state_root\n                        != ethers::types::TxHash(received_certificate.state_root)\n                    {\n                        error!(\n                            \"State root mismatch, block: {:#?}\\n received certificate: {:#?}\",\n                            expected_blocks[index], received_certificates[index].1\n                        );\n                        panic!(\"State root mismatch\");\n                    }\n                }\n                info!(\n                    \"State root check successfully passed for blocks from {} to {}\",\n                    start_height, end_height\n                );\n\n                return Ok::<(), Box<dyn std::error::Error>>(());\n            }\n        }\n    }\n    panic!(\"Expected event not received\");\n}\n\n#[fixture]\nasync fn context_running_subnet_node(\n    #[default(8545)] port: u32,\n    #[default(STANDALONE_SUBNET_BLOCK_TIME)] block_time: u64,\n) -> Context {\n    info!(\n        \"Starting subnet node on port {}, block time: {}s\",\n        port, block_time\n    );\n\n    let subnet_node_handle = match spawn_subnet_node(port, block_time) {\n        Ok(subnet_node_handle) => subnet_node_handle,\n        Err(e) => {\n            if e.kind() == std::io::ErrorKind::NotFound {\n                panic!(\n                    \"Could not find Anvil binary. Please install and add to path Foundry tools \\\n                     including Anvil\"\n                );\n            } else {\n                panic!(\"Failed to start the Anvil subnet node as part of test context: {e}\");\n            }\n        }\n    };\n    // Wait a bit for anvil subprocess to spin itself up\n    tokio::time::sleep(tokio::time::Duration::from_secs(3)).await;\n    info!(\"Subnet node started...\");\n\n    // Deploy contracts\n    let json_rpc_endpoint = format!(\"http://127.0.0.1:{port}\");\n    match deploy_contracts(TEST_SECRET_ETHEREUM_KEY, &json_rpc_endpoint).await {\n        Ok((i_topos_core, i_topos_messaging, i_erc20_messaging)) => {\n            info!(\"Contracts successfully deployed\");\n            // Context with subnet container working in the background and ready deployed contracts\n            Context {\n                i_topos_core,\n                i_topos_messaging,\n                i_erc20_messaging,\n                subnet_node_handle: Some(subnet_node_handle),\n                port,\n            }\n        }\n        Err(e) => {\n            panic!(\"Unable to deploy contracts: {e}\");\n        }\n    }\n}\n\n// Test to start subnet and deploy subnet smart contract\n#[rstest]\n#[test(tokio::test)]\n#[serial]\nasync fn test_subnet_node_contract_deployment(\n    #[with(8544)]\n    #[future]\n    context_running_subnet_node: Context,\n) -> Result<(), Box<dyn std::error::Error>> {\n    let context = context_running_subnet_node.await;\n    info!(\"Subnet running in the background with deployed contract\");\n    context.shutdown().await?;\n    info!(\"Subnet node test finished\");\n    Ok(())\n}\n\n// Test subnet client RPC connection to subnet\n#[rstest]\n#[test(tokio::test)]\n#[serial]\nasync fn test_subnet_node_get_block_info(\n    #[with(8545)]\n    #[future]\n    context_running_subnet_node: Context,\n) -> Result<(), Box<dyn std::error::Error>> {\n    //Context with subnet\n    let context = context_running_subnet_node.await;\n    match topos_sequencer_subnet_client::SubnetClientListener::new(\n        &context.jsonrpc_ws(),\n        &(\"0x\".to_string() + &hex::encode(context.i_topos_core.address())),\n    )\n    .await\n    {\n        Ok(mut subnet_client) => match subnet_client.get_finalized_block(6).await {\n            Ok(block_info) => {\n                info!(\n                    \"Block info successfully retrieved for block {}\",\n                    block_info.number\n                );\n                // Blocks must have been mined while we deployed contracts\n                assert!(block_info.number == 6);\n            }\n            Err(e) => {\n                panic!(\"Error getting next finalized block: {e}\");\n            }\n        },\n        Err(e) => {\n            panic!(\"Unable to get block info, error {e}\");\n        }\n    }\n    context.shutdown().await?;\n    info!(\"Subnet node test finished\");\n    Ok(())\n}\n\n// Test runtime initialization\n#[rstest]\n#[test(tokio::test)]\n#[serial]\nasync fn test_create_runtime() -> Result<(), Box<dyn std::error::Error>> {\n    let test_private_key = generate_test_private_key();\n    info!(\"Creating runtime proxy...\");\n    let runtime_proxy_worker = SubnetRuntimeProxyWorker::new(\n        SubnetRuntimeProxyConfig {\n            subnet_id: SOURCE_SUBNET_ID_1,\n            http_endpoint: format!(\"http://localhost:{SUBNET_RPC_PORT}\"),\n            ws_endpoint: format!(\"ws://localhost:{SUBNET_RPC_PORT}\"),\n            subnet_contract_address: \"0x0000000000000000000000000000000000000000\".to_string(),\n            verifier: 0,\n            source_head_certificate_id: None,\n            start_block: None,\n        },\n        test_private_key,\n    )\n    .await?;\n    let runtime_proxy = topos_sequencer_subnet_runtime::testing::get_runtime(&runtime_proxy_worker);\n    let runtime_proxy = runtime_proxy.lock().await;\n    info!(\"New runtime proxy created:{:?}\", &runtime_proxy);\n    Ok(())\n}\n\n// Test push certificate to subnet smart contract\n#[rstest]\n#[test(tokio::test)]\n#[serial]\nasync fn test_subnet_certificate_push_call(\n    #[with(8546)]\n    #[future]\n    context_running_subnet_node: Context,\n) -> Result<(), Box<dyn std::error::Error>> {\n    let context = context_running_subnet_node.await;\n    let test_private_key = generate_test_private_key();\n    let admin_key = hex::decode(TEST_SECRET_ETHEREUM_KEY).unwrap();\n    let subnet_smart_contract_address =\n        \"0x\".to_string() + &hex::encode(context.i_topos_core.address());\n    let runtime_proxy_worker = SubnetRuntimeProxyWorker::new(\n        SubnetRuntimeProxyConfig {\n            subnet_id: SOURCE_SUBNET_ID_1,\n            http_endpoint: context.jsonrpc(),\n            ws_endpoint: context.jsonrpc_ws(),\n            subnet_contract_address: subnet_smart_contract_address.clone(),\n            verifier: 0,\n            source_head_certificate_id: None,\n            start_block: None,\n        },\n        admin_key.clone(),\n    )\n    .await?;\n\n    let source_subnet_id_1 =\n        topos_crypto::keys::derive_public_key(test_private_key.as_slice()).unwrap();\n\n    let mut certs = Vec::new();\n\n    let new_cert = |id, prev_id| {\n        let mut mock_cert = Certificate {\n            source_subnet_id: SubnetId::from_array(\n                TryInto::<[u8; SUBNET_ID_LENGTH]>::try_into(&source_subnet_id_1[1..33]).unwrap(),\n            ),\n            id,\n            prev_id,\n            target_subnets: vec![SOURCE_SUBNET_ID_1],\n            receipts_root_hash: *id.as_array(), // just to have different receipt root\n            ..Default::default()\n        };\n        mock_cert\n            .update_signature(test_private_key.as_slice())\n            .expect(\"valid signature update\");\n\n        mock_cert\n    };\n\n    certs.push(new_cert(CERTIFICATE_ID_1, PREV_CERTIFICATE_ID_1));\n    certs.push(new_cert(CERTIFICATE_ID_2, PREV_CERTIFICATE_ID_2));\n    certs.push(new_cert(CERTIFICATE_ID_15, CERTIFICATE_ID_14));\n\n    info!(\"Sending mock certificate to subnet smart contract...\");\n\n    // Multiple push\n    for (idx, mock_cert) in certs.iter().enumerate() {\n        info!(\n            \"Push #{idx} for the Certificate: {:?}, Receipt root: {:?}\",\n            mock_cert.id, mock_cert.receipts_root_hash\n        );\n        if let Err(e) = runtime_proxy_worker\n            .eval(SubnetRuntimeProxyCommand::OnNewDeliveredCertificate {\n                certificate: mock_cert.clone(),\n                position: idx as u64,\n                ctx: Span::current().context(),\n            })\n            .await\n        {\n            error!(\"Failed to send OnNewDeliveredTxns command: {}\", e);\n            return Err(Box::from(e));\n        }\n    }\n\n    info!(\"Waiting for CrossSubnetMessageSent event\");\n    tokio::time::sleep(tokio::time::Duration::from_secs(15)).await;\n    let provider = Provider::<Http>::try_from(format!(\"http://127.0.0.1:{}\", context.port))?;\n    let client = Arc::new(provider);\n    let filter = Filter::new()\n        .address(context.i_topos_core.address())\n        .event(\"CertStored(bytes32,bytes32)\")\n        .from_block(0);\n\n    let logs = client.get_logs(&filter).await?;\n    info!(\"ALL LOGS: {:?}\", logs);\n\n    let expected_logs = certs\n        .iter()\n        .map(|c| {\n            let mut log = c.id.as_array().to_vec();\n            log.extend_from_slice(&c.receipts_root_hash);\n            log\n        })\n        .collect::<Vec<_>>();\n\n    assert_eq!(\n        logs.len(),\n        expected_logs.len(),\n        \"should have as much logs as pushed Certificates\"\n    );\n\n    for log in logs {\n        info!(\n            \"CrossSubnetMessageSent received: block number {:?} from contract {}\",\n            log.block_number, log.address\n        );\n        assert_eq!(hex::encode(log.address), subnet_smart_contract_address[2..]);\n        assert!(\n            expected_logs.iter().any(|l| *l == log.data.0),\n            \"discrepencies in the logs\"\n        );\n    }\n\n    info!(\"Shutting down context...\");\n    context.shutdown().await?;\n    Ok(())\n}\n\n// Test get last checkpoints from subnet smart contract\n#[rstest]\n#[test(tokio::test)]\n#[serial]\nasync fn test_subnet_certificate_get_checkpoints_call(\n    #[with(8546)]\n    #[future]\n    context_running_subnet_node: Context,\n) -> Result<(), Box<dyn std::error::Error>> {\n    use topos_core::api::grpc::checkpoints;\n    let context = context_running_subnet_node.await;\n    let subnet_smart_contract_address =\n        \"0x\".to_string() + &hex::encode(context.i_topos_core.address());\n    let subnet_jsonrpc_http = context.jsonrpc();\n\n    // Get checkpoints when contract is empty\n    let subnet_client = topos_sequencer_subnet_client::SubnetClient::new(\n        &subnet_jsonrpc_http,\n        Some(hex::decode(TEST_SECRET_ETHEREUM_KEY).unwrap()),\n        &subnet_smart_contract_address,\n    )\n    .await\n    .expect(\"Valid subnet client\");\n    let target_stream_positions = match subnet_client.get_checkpoints(&TARGET_SUBNET_ID_1).await {\n        Ok(result) => result,\n        Err(e) => {\n            panic!(\"Unable to get latest certificate id and position: {e}\");\n        }\n    };\n    assert_eq!(\n        target_stream_positions,\n        Vec::<checkpoints::TargetStreamPosition>::new()\n    );\n\n    let test_certificates = vec![\n        (\n            Certificate {\n                source_subnet_id: SOURCE_SUBNET_ID_1,\n                id: CERTIFICATE_ID_1,\n                prev_id: PREV_CERTIFICATE_ID_1,\n                target_subnets: vec![TARGET_SUBNET_ID_1],\n                ..Default::default()\n            },\n            0,\n        ),\n        (\n            Certificate {\n                source_subnet_id: SOURCE_SUBNET_ID_2,\n                id: CERTIFICATE_ID_2,\n                prev_id: PREV_CERTIFICATE_ID_2,\n                target_subnets: vec![TARGET_SUBNET_ID_1],\n                ..Default::default()\n            },\n            0,\n        ),\n        (\n            Certificate {\n                source_subnet_id: SOURCE_SUBNET_ID_1,\n                id: CERTIFICATE_ID_3,\n                prev_id: CERTIFICATE_ID_1,\n                target_subnets: vec![TARGET_SUBNET_ID_1],\n                ..Default::default()\n            },\n            1,\n        ),\n    ];\n\n    for (test_cert, test_cert_position) in test_certificates.iter() {\n        info!(\"Pushing certificate id={}\", test_cert.id);\n        match subnet_client\n            .push_certificate(test_cert, *test_cert_position as u64)\n            .await\n        {\n            Ok(_) => {\n                info!(\"Certificate id={} pushed\", test_cert.id);\n            }\n            Err(e) => {\n                panic!(\"Unable to push certificate: {e}\");\n            }\n        }\n    }\n\n    info!(\"Getting latest checkpoints \");\n    let target_stream_positions = match subnet_client.get_checkpoints(&TARGET_SUBNET_ID_1).await {\n        Ok(result) => result,\n        Err(e) => {\n            panic!(\"Unable to get the latest certificate id and position: {e}\");\n        }\n    };\n\n    let expected_positions = vec![\n        TargetStreamPosition {\n            target_subnet_id: TARGET_SUBNET_ID_1,\n            source_subnet_id: SOURCE_SUBNET_ID_1,\n            certificate_id: Some(CERTIFICATE_ID_3),\n            position: 1,\n        },\n        TargetStreamPosition {\n            target_subnet_id: TARGET_SUBNET_ID_1,\n            source_subnet_id: SOURCE_SUBNET_ID_2,\n            certificate_id: Some(CERTIFICATE_ID_2),\n            position: 0,\n        },\n    ]\n    .into_iter()\n    .collect::<HashSet<TargetStreamPosition>>();\n\n    assert_eq!(\n        target_stream_positions\n            .into_iter()\n            .collect::<std::collections::HashSet<TargetStreamPosition>>(),\n        expected_positions\n    );\n\n    info!(\"Shutting down context...\");\n    context.shutdown().await?;\n    Ok(())\n}\n\n// Test get subnet id from subnet smart contract\n#[rstest]\n#[test(tokio::test)]\n#[serial]\nasync fn test_subnet_id_call(\n    #[with(8546)]\n    #[future]\n    context_running_subnet_node: Context,\n) -> Result<(), Box<dyn std::error::Error>> {\n    let context = context_running_subnet_node.await;\n    let subnet_smart_contract_address =\n        \"0x\".to_string() + &hex::encode(context.i_topos_core.address());\n    let subnet_jsonrpc_http = context.jsonrpc();\n\n    // Create subnet client\n    let subnet_client = topos_sequencer_subnet_client::SubnetClient::new(\n        &subnet_jsonrpc_http,\n        Some(hex::decode(TEST_SECRET_ETHEREUM_KEY).unwrap()),\n        &subnet_smart_contract_address,\n    )\n    .await\n    .expect(\"Valid subnet client\");\n\n    // Get subnet id\n    let retrieved_subnet_id = match subnet_client.get_subnet_id().await {\n        Ok(result) => {\n            info!(\"Retrieved subnet id {result}\");\n            result\n        }\n        Err(e) => {\n            panic!(\"Unable to get subnet id: {e}\");\n        }\n    };\n\n    let expected_subnet_id: SubnetId = hex::decode(TEST_SUBNET_ID)\n        .unwrap()\n        .as_slice()\n        .try_into()\n        .unwrap();\n    assert_eq!(retrieved_subnet_id, expected_subnet_id);\n\n    info!(\"Shutting down context...\");\n    context.shutdown().await?;\n    Ok(())\n}\n\n// Test perform send token and check for transaction\n// in the certificate (by observing target subnets)\n#[rstest]\n#[test(tokio::test)]\n#[serial]\nasync fn test_subnet_send_token_processing(\n    #[with(8546)]\n    #[future]\n    context_running_subnet_node: Context,\n) -> Result<(), Box<dyn std::error::Error>> {\n    let context = context_running_subnet_node.await;\n    let test_private_key = hex::decode(TEST_SECRET_ETHEREUM_KEY).unwrap();\n    let subnet_jsonrpc_http = context.jsonrpc();\n    let subnet_smart_contract_address =\n        \"0x\".to_string() + &hex::encode(context.i_topos_core.address());\n\n    // Create runtime proxy worker\n    info!(\"Creating subnet runtime proxy\");\n    let mut runtime_proxy_worker = SubnetRuntimeProxyWorker::new(\n        SubnetRuntimeProxyConfig {\n            subnet_id: SOURCE_SUBNET_ID_1,\n            http_endpoint: context.jsonrpc(),\n            ws_endpoint: context.jsonrpc_ws(),\n            subnet_contract_address: subnet_smart_contract_address.clone(),\n            verifier: 0,\n            source_head_certificate_id: None,\n            start_block: None,\n        },\n        test_private_key.clone(),\n    )\n    .await?;\n    tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;\n    info!(\"Set source head certificate to 0\");\n    if let Err(e) = runtime_proxy_worker\n        .set_source_head_certificate_id(Some((CERTIFICATE_ID_1, 0)))\n        .await\n    {\n        panic!(\"Unable to set source head certificate id: {e}\");\n    }\n\n    // Deploy token contract\n    let i_erc20 = deploy_test_token(\n        &hex::encode(&test_private_key),\n        &subnet_jsonrpc_http,\n        context.i_topos_messaging.address(),\n    )\n    .await?;\n\n    // Approve token spending\n    if let Err(e) = i_erc20\n        .approve(context.i_topos_messaging.address(), U256::from(10))\n        .legacy()\n        .gas(DEFAULT_GAS)\n        .send()\n        .await?\n        .await\n    {\n        panic!(\"Unable to perform token approval {e}\");\n    }\n\n    // Perform send token\n    info!(\"Sending token\");\n    if let Err(e) = context\n        .i_erc20_messaging\n        .send_token(\n            TARGET_SUBNET_ID_2.into(),\n            TOKEN_SYMBOL.into(),\n            \"00000000000000000000000000000000000000AA\".parse()?,\n            U256::from(2),\n        )\n        .legacy()\n        .gas(DEFAULT_GAS)\n        .send()\n        .await?\n        .await\n    {\n        panic!(\"Unable to send token: {e}\");\n    };\n    info!(\"Waiting for certificate with send token transaction...\");\n    let assertion = async move {\n        while let Ok(event) = runtime_proxy_worker.next_event().await {\n            if let SubnetRuntimeProxyEvent::NewCertificate {\n                cert,\n                block_number: _,\n                ctx: _,\n            } = event\n            {\n                info!(\n                    \"New certificate event received, cert id: {} target subnets: {:?}\",\n                    cert.id, cert.target_subnets\n                );\n                if cert.target_subnets.len() == 1 && cert.target_subnets == vec![TARGET_SUBNET_ID_2]\n                {\n                    info!(\n                        \"Received certificate with requested target subnet {}\",\n                        cert.target_subnets[0]\n                    );\n                    return Ok::<(), Box<dyn std::error::Error>>(());\n                }\n            }\n        }\n        panic!(\"Expected event not received\");\n    };\n\n    // Set big timeout to prevent flaky fails. Instead fail/panic early in the test to indicate actual error\n    if tokio::time::timeout(std::time::Duration::from_secs(10), assertion)\n        .await\n        .is_err()\n    {\n        panic!(\"Timeout waiting for command\");\n    }\n\n    info!(\"Shutting down context...\");\n    context.shutdown().await?;\n    Ok(())\n}\n\n// Test sync of blocks and generating certificates from genesis block\n// and test sync from particular source head received from tce\n#[rstest]\n#[test(tokio::test)]\n#[timeout(std::time::Duration::from_secs(600))]\n#[serial]\nasync fn test_sync_from_genesis_and_particular_source_head(\n    #[with(8546)]\n    #[future]\n    context_running_subnet_node: Context,\n) -> Result<(), Box<dyn std::error::Error>> {\n    let context = context_running_subnet_node.await;\n    let test_private_key = hex::decode(TEST_SECRET_ETHEREUM_KEY).unwrap();\n    let subnet_jsonrpc_http = context.jsonrpc();\n    let subnet_smart_contract_address =\n        \"0x\".to_string() + &hex::encode(context.i_topos_core.address());\n\n    // Wait for some time to simulate network history\n    tokio::time::sleep(tokio::time::Duration::from_secs(10)).await;\n\n    // Get block height\n    let http_provider = Provider::<Http>::try_from(subnet_jsonrpc_http.clone())?\n        .interval(std::time::Duration::from_millis(20u64));\n    let subnet_height = http_provider.get_block_number().await?.as_u64();\n\n    // Create runtime proxy worker\n    info!(\"Creating subnet runtime proxy\");\n    let runtime_proxy_worker = SubnetRuntimeProxyWorker::new(\n        SubnetRuntimeProxyConfig {\n            subnet_id: SOURCE_SUBNET_ID_1,\n            http_endpoint: context.jsonrpc(),\n            ws_endpoint: context.jsonrpc_ws(),\n            subnet_contract_address: subnet_smart_contract_address.clone(),\n            verifier: 0,\n            source_head_certificate_id: None,\n            start_block: None,\n        },\n        test_private_key.clone(),\n    )\n    .await?;\n    tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;\n    info!(\"Manually set source head certificate to 0 as TCE is not available\");\n    if let Err(e) = runtime_proxy_worker\n        .set_source_head_certificate_id(None)\n        .await\n    {\n        panic!(\"Unable to set source head certificate id: {e}\");\n    }\n\n    info!(\"Waiting for the certificates from zero until height {subnet_height}...\");\n    let received_certificates = Arc::new(Mutex::new(Vec::new()));\n\n    // Test sync from genesis block\n    {\n        let expected_block_numbers = (0..=subnet_height).collect::<Vec<_>>();\n        let mut expected_blocks = Vec::new();\n        for height in &expected_block_numbers {\n            match http_provider.get_block(*height).await {\n                Ok(block_info) => expected_blocks.push(block_info.expect(\"valid block\")),\n                Err(e) => {\n                    panic!(\"Unable to get block number {}: {}\", height, e);\n                }\n            }\n        }\n        let received_certificates = received_certificates.clone();\n\n        // Set big timeout to prevent flaky fails. Instead fail/panic early in the test to indicate actual error\n        if tokio::time::timeout(\n            std::time::Duration::from_secs(60),\n            check_received_certificate(\n                runtime_proxy_worker,\n                received_certificates,\n                expected_block_numbers,\n                expected_blocks,\n            ),\n        )\n        .await\n        .is_err()\n        {\n            panic!(\"Timeout waiting for command\");\n        }\n    }\n\n    //---------------------------------------------------------------------\n    // Now, instantiate new subnet runtime and sync from known position to\n    // test sync from particular point\n    //---------------------------------------------------------------------\n    //\n    // Get block height\n    let http_provider = Provider::<Http>::try_from(subnet_jsonrpc_http)?\n        .interval(std::time::Duration::from_millis(20u64));\n    let subnet_height = http_provider.get_block_number().await?.as_u64();\n    const SYNC_START_BLOCK_NUMBER: u64 = 11;\n\n    // Create second runtime proxy worker\n    info!(\"Creating second subnet runtime proxy worker\");\n    let runtime_proxy_worker_2 = SubnetRuntimeProxyWorker::new(\n        SubnetRuntimeProxyConfig {\n            subnet_id: SOURCE_SUBNET_ID_1,\n            http_endpoint: context.jsonrpc(),\n            ws_endpoint: context.jsonrpc_ws(),\n            subnet_contract_address: subnet_smart_contract_address.clone(),\n            verifier: 0,\n            source_head_certificate_id: None,\n            start_block: None,\n        },\n        test_private_key.clone(),\n    )\n    .await?;\n    tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;\n    info!(\n        \"Manually set source head certificate to certificate from block {}\",\n        SYNC_START_BLOCK_NUMBER\n    );\n    let last_certificate_retrieved =\n        received_certificates.lock().await[SYNC_START_BLOCK_NUMBER as usize - 1].clone();\n    received_certificates.lock().await.clear();\n    if let Err(e) = runtime_proxy_worker_2\n        .set_source_head_certificate_id(Some((\n            last_certificate_retrieved.1.id,\n            last_certificate_retrieved.0,\n        )))\n        .await\n    {\n        panic!(\"Unable to set source head certificate id: {e}\");\n    }\n\n    // Test sync from 11 block\n    {\n        let expected_block_numbers = (SYNC_START_BLOCK_NUMBER..=subnet_height).collect::<Vec<_>>();\n        let mut expected_blocks = Vec::new();\n        for height in &expected_block_numbers {\n            match http_provider.get_block(*height).await {\n                Ok(block_info) => expected_blocks.push(block_info.expect(\"valid block\")),\n                Err(e) => {\n                    panic!(\"Unable to get block number {}: {}\", height, e);\n                }\n            }\n        }\n        let received_certificates = Arc::new(Mutex::new(Vec::new()));\n\n        // Set big timeout to prevent flaky fails. Instead fail/panic early in the test to indicate actual error\n        if tokio::time::timeout(\n            std::time::Duration::from_secs(60),\n            check_received_certificate(\n                runtime_proxy_worker_2,\n                received_certificates,\n                expected_block_numbers,\n                expected_blocks,\n            ),\n        )\n        .await\n        .is_err()\n        {\n            panic!(\"Timeout waiting for command\");\n        }\n    }\n\n    info!(\"Shutting down context...\");\n\n    context.shutdown().await?;\n    Ok(())\n}\n\n// Test sync of blocks and generating certificates start block parameter\n#[rstest]\n#[test(tokio::test)]\n#[timeout(std::time::Duration::from_secs(600))]\n#[serial]\nasync fn test_sync_from_start_block(\n    #[with(8546)]\n    #[future]\n    context_running_subnet_node: Context,\n) -> Result<(), Box<dyn std::error::Error>> {\n    let context = context_running_subnet_node.await;\n    let test_private_key = hex::decode(TEST_SECRET_ETHEREUM_KEY).unwrap();\n    let subnet_jsonrpc_http = context.jsonrpc();\n    let subnet_smart_contract_address =\n        \"0x\".to_string() + &hex::encode(context.i_topos_core.address());\n\n    // Wait for some time to simulate network history\n    tokio::time::sleep(tokio::time::Duration::from_secs(10)).await;\n\n    // Get block height\n    let http_provider = Provider::<Http>::try_from(subnet_jsonrpc_http.clone())?\n        .interval(std::time::Duration::from_millis(20u64));\n    let subnet_height = http_provider.get_block_number().await?.as_u64();\n\n    // Define start block as current subnet height reduced by 5\n    let start_block: u64 = subnet_height - 5;\n\n    // Create runtime proxy worker\n    info!(\"Creating subnet runtime proxy\");\n    let runtime_proxy_worker = SubnetRuntimeProxyWorker::new(\n        SubnetRuntimeProxyConfig {\n            subnet_id: SOURCE_SUBNET_ID_1,\n            http_endpoint: context.jsonrpc(),\n            ws_endpoint: context.jsonrpc_ws(),\n            subnet_contract_address: subnet_smart_contract_address.clone(),\n            verifier: 0,\n            source_head_certificate_id: None,\n            start_block: Some(start_block),\n        },\n        test_private_key.clone(),\n    )\n    .await?;\n    tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;\n\n    info!(\"Manually set source head certificate to 0 as TCE is not available\");\n    if let Err(e) = runtime_proxy_worker\n        .set_source_head_certificate_id(None)\n        .await\n    {\n        panic!(\"Unable to set source head certificate id: {e}\");\n    }\n\n    info!(\n        \"Syncing from the start block {} to current height {}\",\n        start_block, subnet_height\n    );\n\n    let received_certificates = Arc::new(Mutex::new(Vec::new()));\n\n    // Test sync from start block\n    {\n        let expected_block_numbers = (start_block..=subnet_height).collect::<Vec<_>>();\n        let mut expected_blocks = Vec::new();\n        for height in &expected_block_numbers {\n            match http_provider.get_block(*height).await {\n                Ok(block_info) => expected_blocks.push(block_info.expect(\"valid block\")),\n                Err(e) => {\n                    panic!(\"Unable to get block number {}: {}\", height, e);\n                }\n            }\n        }\n        let received_certificates = received_certificates.clone();\n\n        // Set big timeout to prevent flaky fails. Instead fail/panic early in the test to indicate actual error\n        if tokio::time::timeout(\n            std::time::Duration::from_secs(60),\n            check_received_certificate(\n                runtime_proxy_worker,\n                received_certificates,\n                expected_block_numbers,\n                expected_blocks,\n            ),\n        )\n        .await\n        .is_err()\n        {\n            panic!(\"Timeout waiting for command\");\n        }\n    }\n\n    info!(\"Shutting down context...\");\n\n    context.shutdown().await?;\n    Ok(())\n}\n\n// Test multiple send token events in a block\n// Test is slow, block time is 12 seconds\n#[rstest]\n#[test(tokio::test)]\n#[timeout(std::time::Duration::from_secs(600))]\n#[serial]\nasync fn test_subnet_multiple_send_token_in_a_block(\n    #[with(8546, STANDALONE_SUBNET_WITH_LONG_BLOCKS_BLOCK_TIME)]\n    #[future]\n    context_running_subnet_node: Context,\n) -> Result<(), Box<dyn std::error::Error>> {\n    let context = context_running_subnet_node.await;\n    let test_private_key = hex::decode(TEST_SECRET_ETHEREUM_KEY).unwrap();\n    let subnet_jsonrpc_http = context.jsonrpc();\n    let subnet_smart_contract_address =\n        \"0x\".to_string() + &hex::encode(context.i_topos_core.address());\n    let number_of_send_token_transactions: usize = 4;\n\n    warn!(\"Block time is intentionally long, this is slow test...\");\n\n    // Create runtime proxy worker\n    info!(\"Creating subnet runtime proxy\");\n    let mut runtime_proxy_worker = SubnetRuntimeProxyWorker::new(\n        SubnetRuntimeProxyConfig {\n            subnet_id: SOURCE_SUBNET_ID_1,\n            http_endpoint: context.jsonrpc(),\n            ws_endpoint: context.jsonrpc_ws(),\n            subnet_contract_address: subnet_smart_contract_address.clone(),\n            verifier: 0,\n            source_head_certificate_id: None,\n            start_block: None,\n        },\n        test_private_key.clone(),\n    )\n    .await?;\n    tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;\n    info!(\"Set source head certificate to 0\");\n    if let Err(e) = runtime_proxy_worker\n        .set_source_head_certificate_id(Some((CERTIFICATE_ID_1, 0)))\n        .await\n    {\n        panic!(\"Unable to set source head certificate id: {e}\");\n    }\n\n    // Deploy token contract\n    let i_erc20 = deploy_test_token(\n        &hex::encode(&test_private_key),\n        &subnet_jsonrpc_http,\n        context.i_topos_messaging.address(),\n    )\n    .await?;\n\n    info!(\"Reading balance of the main account...\");\n    match i_erc20.balance_of(TEST_ETHEREUM_ACCOUNT.parse()?).await {\n        Ok(balance) => {\n            info!(\"Balance of admin account is {:?}\", balance);\n        }\n        Err(e) => {\n            error!(\"Unable to read balance {e}\");\n        }\n    }\n\n    // Send token to other addresses\n    let test_accounts: Vec<_> = vec![\n        TEST_ACCOUNT_ALITH_ACCOUNT.parse()?,\n        TEST_ACCOUNT_BALATHAR_ACCOUNT.parse()?,\n        TEST_ACCOUNT_CEZAR_ACCOUNT.parse()?,\n    ];\n    info!(\"Transferring tokens to {} accounts\", test_accounts.len());\n    for test_account in test_accounts {\n        info!(\n            \"Transferring token to address {}\",\n            \"0x\".to_string() + &hex::encode(test_account)\n        );\n        if let Err(e) = i_erc20\n            .transfer(test_account, U256::from(10))\n            .legacy()\n            .gas(DEFAULT_GAS)\n            .send()\n            .await?\n            .await\n        {\n            panic!(\"Unable to perform token transfer {e}\");\n        }\n    }\n    info!(\"Tokens transferred\");\n\n    let mut erc20_clients = vec![\n        create_new_erc20_client(\n            TEST_SECRET_ETHEREUM_KEY,\n            &subnet_jsonrpc_http,\n            i_erc20.address(),\n        )\n        .await\n        .expect(\"Valid erc20 client\"),\n        create_new_erc20_client(\n            TEST_ACCOUNT_ALITH_KEY,\n            &subnet_jsonrpc_http,\n            i_erc20.address(),\n        )\n        .await\n        .expect(\"Valid erc20 client\"),\n        create_new_erc20_client(\n            TEST_ACCOUNT_BALATHAR_KEY,\n            &subnet_jsonrpc_http,\n            i_erc20.address(),\n        )\n        .await\n        .expect(\"Valid erc20 client\"),\n        create_new_erc20_client(\n            TEST_ACCOUNT_CEZAR_KEY,\n            &subnet_jsonrpc_http,\n            i_erc20.address(),\n        )\n        .await\n        .expect(\"Valid erc20 client\"),\n    ];\n\n    info!(\"Approve token spending\");\n    for erc20_client in &mut erc20_clients {\n        if let Err(e) = erc20_client\n            .approve(context.i_topos_messaging.address(), U256::from(10))\n            .legacy()\n            .gas(DEFAULT_GAS)\n            .send()\n            .await?\n            .await\n        {\n            panic!(\"Unable to perform token approval {e}\");\n        } else {\n            info!(\"Token spending approved for {}\", erc20_client.address());\n        }\n    }\n    info!(\"All token spending approved\");\n\n    info!(\"Initializing multiple i_erc20_messaging subnet clients\");\n    let mut target_subnets = vec![\n        (\n            TARGET_SUBNET_ID_5,\n            create_new_erc20msg_client(\n                TEST_SECRET_ETHEREUM_KEY,\n                &subnet_jsonrpc_http,\n                context.i_erc20_messaging.address(),\n            )\n            .await\n            .expect(\"Valid client\"),\n        ),\n        (\n            TARGET_SUBNET_ID_4,\n            create_new_erc20msg_client(\n                TEST_ACCOUNT_ALITH_KEY,\n                &subnet_jsonrpc_http,\n                context.i_erc20_messaging.address(),\n            )\n            .await\n            .expect(\"Valid client\"),\n        ),\n        (\n            TARGET_SUBNET_ID_3,\n            create_new_erc20msg_client(\n                TEST_ACCOUNT_BALATHAR_KEY,\n                &subnet_jsonrpc_http,\n                context.i_erc20_messaging.address(),\n            )\n            .await\n            .expect(\"Valid client\"),\n        ),\n        (\n            TARGET_SUBNET_ID_2,\n            create_new_erc20msg_client(\n                TEST_ACCOUNT_CEZAR_KEY,\n                &subnet_jsonrpc_http,\n                context.i_erc20_messaging.address(),\n            )\n            .await\n            .expect(\"Valid client\"),\n        ),\n    ];\n\n    // Perform multiple send token actions\n    info!(\"Sending multiple transactions in parallel\");\n    let mut handles = Vec::new();\n    for i in 1..=number_of_send_token_transactions {\n        let (target_subnet, i_erc20_messaging) = target_subnets.pop().unwrap();\n        let i_erc20_messaging_address = i_erc20_messaging.address();\n        let handle = tokio::spawn(async move {\n            info!(\n                \"Sending transaction {} to target subnet {} erc20 messaging account {}\",\n                i,\n                &target_subnet,\n                \"0x\".to_string() + &hex::encode(i_erc20_messaging_address)\n            );\n            if let Err(e) = i_erc20_messaging\n                .send_token(\n                    target_subnet.into(),\n                    TOKEN_SYMBOL.into(),\n                    \"00000000000000000000000000000000000000AA\".parse().unwrap(),\n                    U256::from(i),\n                )\n                .legacy()\n                .gas(DEFAULT_GAS)\n                .send()\n                .await\n                .map_err(|e| {\n                    error!(\"Unable to send token, contract error: {e}\");\n                })\n                .unwrap()\n                .await\n            {\n                error!(\"Unable to send token {e}\");\n                panic!(\"Unable to send token: {e}\");\n            };\n            info!(\"Transaction {} sent\", i);\n        });\n        handles.push(handle);\n    }\n    for handle in handles {\n        handle.await.expect(\"Send token task correctly finished\");\n    }\n    info!(\"All token transactions sent!\");\n\n    info!(\"Waiting for certificate with send token transaction...\");\n    let mut received_certificates = Vec::new();\n    let assertion = async move {\n        while let Ok(event) = runtime_proxy_worker.next_event().await {\n            if let SubnetRuntimeProxyEvent::NewCertificate {\n                cert,\n                block_number,\n                ctx: _,\n            } = event\n            {\n                info!(\n                    \"New certificate event received, block number: {} cert id: {} target subnets: \\\n                     {:?}\",\n                    block_number, cert.id, cert.target_subnets\n                );\n                if !cert.target_subnets.is_empty() {\n                    received_certificates.push(cert);\n                    let target_subnets = received_certificates\n                        .iter()\n                        .flat_map(|c| c.target_subnets.iter())\n                        .collect::<Vec<_>>();\n                    if target_subnets.len() == number_of_send_token_transactions {\n                        info!(\"Received all expected target subnets {:?}\", target_subnets);\n                        return Ok::<(), Box<dyn std::error::Error>>(());\n                    }\n                }\n            } else {\n                info!(\"Received subnet event: {:?}\", event);\n            }\n        }\n        panic!(\"Expected event not received\");\n    };\n\n    // Set big timeout to prevent flaky failures. Instead fail/panic early in the test to indicate actual error\n    if tokio::time::timeout(std::time::Duration::from_secs(120), assertion)\n        .await\n        .is_err()\n    {\n        panic!(\"Timeout waiting for command\");\n    }\n\n    info!(\"Shutting down context...\");\n    context.shutdown().await?;\n    Ok(())\n}\n"
  },
  {
    "path": "crates/topos-tce/Cargo.toml",
    "content": "[package]\nname = \"topos-tce\"\ndescription = \"TCE Node Service\"\nversion = \"0.1.0\"\nedition = \"2021\"\nrust-version = \"1.65\"\n\n[lints]\nworkspace = true\n\n[dependencies]\nlibp2p.workspace = true\nasync-trait.workspace = true\nbincode.workspace = true\nclap.workspace = true\nhex.workspace = true\nfutures.workspace = true\nopentelemetry.workspace = true\nprometheus-client.workspace = true\nprometheus.workspace = true\nserde.workspace = true\nthiserror.workspace = true\ntokio.workspace = true\ntokio-util.workspace = true\ntokio-stream.workspace = true\ntopos-core.workspace = true\ntracing-attributes.workspace = true\ntracing-opentelemetry.workspace = true\ntracing-subscriber = { workspace = true, default-features = false,  features = [\"std\", \"env-filter\", \"fmt\", \"ansi\"] }\ntracing.workspace = true\ntonic.workspace = true\nbytes.workspace = true\nprost.workspace = true\n\ntopos-config = { path = \"../topos-config\" }\ntopos-p2p = { path = \"../topos-p2p\" }\ntopos-metrics = { path = \"../topos-metrics\" }\ntopos-tce-api = { path = \"../topos-tce-api\"}\ntopos-crypto = { path = \"../topos-crypto\" }\ntopos-tce-broadcast = { path = \"../topos-tce-broadcast\" }\ntopos-tce-gatekeeper = { path = \"../topos-tce-gatekeeper\" }\ntopos-tce-storage = { package = \"topos-tce-storage\", path = \"../topos-tce-storage\" }\ntopos-tce-synchronizer = { path = \"../topos-tce-synchronizer\" }\ntopos-telemetry = { path = \"../topos-telemetry\" }\naxum = \"0.7.4\"\naxum-prometheus = \"0.6\"\n\n\n[dev-dependencies]\ntopos-test-sdk = { path = \"../topos-test-sdk/\" }\nasync-stream.workspace = true\nasync-trait.workspace = true\nhyper.workspace = true\nlibp2p.workspace = true\nrand.workspace = true\nrand_core.workspace = true\nrand_distr.workspace = true\nrstest.workspace = true\ntonic.workspace = true\ntracing-subscriber = { workspace = true, features = [\"env-filter\", \"fmt\"] }\ntracing.workspace = true\ntest-log.workspace = true\ncucumber = \"0.13.0\"\nenv_logger.workspace = true\n\n[features]\ndefault = []\nlog-json = [\"tracing-subscriber/json\"]\n\n"
  },
  {
    "path": "crates/topos-tce/src/app_context/api.rs",
    "content": "use crate::AppContext;\nuse std::collections::HashMap;\nuse topos_core::uci::{Certificate, SubnetId};\nuse topos_metrics::CERTIFICATE_DELIVERY_LATENCY;\nuse topos_tce_api::RuntimeError;\nuse topos_tce_api::RuntimeEvent as ApiEvent;\nuse topos_tce_broadcast::DoubleEchoCommand;\nuse topos_tce_storage::errors::{InternalStorageError, StorageError};\nuse topos_tce_storage::types::PendingResult;\nuse tracing::debug;\nuse tracing::{error, warn};\n\nimpl AppContext {\n    pub async fn on_api_event(&mut self, event: ApiEvent) {\n        match event {\n            ApiEvent::CertificateSubmitted {\n                certificate,\n                sender,\n            } => {\n                self.delivery_latency\n                    .insert(certificate.id, CERTIFICATE_DELIVERY_LATENCY.start_timer());\n\n                _ = match self\n                    .validator_store\n                    .insert_pending_certificate(&certificate)\n                    .await\n                {\n                    Ok(Some(pending_id)) => {\n                        let certificate_id = certificate.id;\n                        debug!(\n                            \"Certificate {} from subnet {} has been inserted into pending pool\",\n                            certificate_id, certificate.source_subnet_id\n                        );\n\n                        if self\n                            .tce_cli\n                            .get_double_echo_channel()\n                            .send(DoubleEchoCommand::Broadcast {\n                                need_gossip: true,\n                                cert: *certificate,\n                                pending_id,\n                            })\n                            .await\n                            .is_err()\n                        {\n                            error!(\n                                \"Unable to send DoubleEchoCommand::Broadcast command to double \\\n                                 echo for {}\",\n                                certificate_id\n                            );\n\n                            sender.send(Err(RuntimeError::CommunicationError(\n                                \"Unable to send DoubleEchoCommand::Broadcast command to double \\\n                                 echo\"\n                                    .to_string(),\n                            )))\n                        } else {\n                            sender.send(Ok(PendingResult::InPending(pending_id)))\n                        }\n                    }\n                    Ok(None) => {\n                        debug!(\n                            \"Certificate {} from subnet {} has been inserted into precedence pool \\\n                             waiting for {}\",\n                            certificate.id, certificate.source_subnet_id, certificate.prev_id\n                        );\n                        sender.send(Ok(PendingResult::AwaitPrecedence))\n                    }\n                    Err(StorageError::InternalStorage(\n                        InternalStorageError::CertificateAlreadyPending,\n                    )) => {\n                        debug!(\n                            \"Certificate {} has already been added to the pending pool, skipping\",\n                            certificate.id\n                        );\n                        sender.send(Ok(PendingResult::AlreadyPending))\n                    }\n                    Err(StorageError::InternalStorage(\n                        InternalStorageError::CertificateAlreadyExists,\n                    )) => {\n                        debug!(\n                            \"Certificate {} has already been delivered, skipping\",\n                            certificate.id\n                        );\n                        sender.send(Ok(PendingResult::AlreadyDelivered))\n                    }\n                    Err(error) => {\n                        error!(\n                            \"Unable to insert pending certificate {}: {}\",\n                            certificate.id, error\n                        );\n\n                        sender.send(Err(error.into()))\n                    }\n                };\n            }\n\n            ApiEvent::GetSourceHead { subnet_id, sender } => {\n                // Get source head certificate\n                let mut result = self\n                    .pending_storage\n                    .get_source_head(subnet_id)\n                    .await\n                    .and_then(|result| match result {\n                        None => Err(StorageError::InternalStorage(\n                            InternalStorageError::MissingHeadForSubnet(subnet_id),\n                        )),\n                        value => Ok(value),\n                    })\n                    .map_err(|e| match e {\n                        StorageError::InternalStorage(internal) => {\n                            if let InternalStorageError::MissingHeadForSubnet(subnet_id) = internal\n                            {\n                                RuntimeError::UnknownSubnet(subnet_id)\n                            } else {\n                                RuntimeError::UnableToGetSourceHead(subnet_id, internal.to_string())\n                            }\n                        }\n                        e => RuntimeError::UnableToGetSourceHead(subnet_id, e.to_string()),\n                    });\n\n                // TODO: Initial genesis certificate eventually will be fetched from the topos subnet\n                // Currently, for subnet starting from scratch there are no certificates in the database\n                // So for MissingHeadForSubnet error we will return some default dummy certificate\n                if let Err(RuntimeError::UnknownSubnet(subnet_id)) = result {\n                    warn!(\"Returning dummy certificate as head certificate, to be fixed...\");\n                    result = Ok(Some((\n                        0,\n                        topos_core::uci::Certificate {\n                            prev_id: AppContext::DUMMY_INITIAL_CERTIFICATE_ID,\n                            source_subnet_id: subnet_id,\n                            state_root: Default::default(),\n                            tx_root_hash: Default::default(),\n                            receipts_root_hash: Default::default(),\n                            target_subnets: vec![],\n                            verifier: 0,\n                            id: AppContext::DUMMY_INITIAL_CERTIFICATE_ID,\n                            proof: Default::default(),\n                            signature: Default::default(),\n                        },\n                    )));\n                };\n\n                _ = sender.send(result);\n            }\n\n            ApiEvent::GetLastPendingCertificates {\n                mut subnet_ids,\n                sender,\n            } => {\n                let mut last_pending_certificates: HashMap<SubnetId, Option<(Certificate, u64)>> =\n                    subnet_ids\n                        .iter()\n                        .map(|subnet_id| (*subnet_id, None))\n                        .collect();\n\n                if let Ok(pending_certificates) =\n                    self.pending_storage.get_pending_certificates().await\n                {\n                    // Count number of pending certificates for every subnet\n                    let mut indexes: HashMap<SubnetId, u64> = HashMap::new();\n                    for (_pending_certificate_id, cert) in pending_certificates.iter() {\n                        *indexes.entry(cert.source_subnet_id).or_insert(0) += 1;\n                    }\n\n                    // Iterate through pending certificates and determine last one for every subnet\n                    // Last certificate in the subnet should be one with the highest index\n                    for (_pending_certificate_id, cert) in pending_certificates.into_iter().rev() {\n                        if let Some(subnet_id) = subnet_ids.take(&cert.source_subnet_id) {\n                            *last_pending_certificates.entry(subnet_id).or_insert(None) =\n                                Some((cert, indexes[&subnet_id]));\n                        }\n                        if subnet_ids.is_empty() {\n                            break;\n                        }\n                    }\n                }\n\n                // Add None pending certificate for any other requested subnet_id\n                subnet_ids.iter().for_each(|subnet_id| {\n                    last_pending_certificates.insert(*subnet_id, None);\n                });\n\n                _ = sender.send(Ok(last_pending_certificates));\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "crates/topos-tce/src/app_context/network.rs",
    "content": "use prost::Message;\nuse std::collections::hash_map;\nuse topos_tce_storage::errors::{InternalStorageError, StorageError};\n\nuse tokio::spawn;\n\nuse topos_metrics::CERTIFICATE_DELIVERY_LATENCY;\nuse topos_p2p::Event as NetEvent;\nuse topos_tce_broadcast::DoubleEchoCommand;\nuse tracing::{debug, error, info, trace};\n\nuse topos_core::api::grpc::tce::v1::{double_echo_request, DoubleEchoRequest, Echo, Gossip, Ready};\nuse topos_core::uci;\n\nuse crate::AppContext;\n\nimpl AppContext {\n    pub async fn on_net_event(&mut self, evt: NetEvent) {\n        trace!(\n            \"on_net_event: peer: {} event {:?}\",\n            &self.network_client.local_peer_id,\n            &evt\n        );\n\n        if let NetEvent::Gossip { data, from } = evt {\n            if let Ok(DoubleEchoRequest {\n                request: Some(double_echo_request),\n            }) = DoubleEchoRequest::decode(&data[..])\n            {\n                match double_echo_request {\n                    double_echo_request::Request::Gossip(Gossip {\n                        certificate: Some(certificate),\n                    }) => match uci::Certificate::try_from(certificate) {\n                        Ok(cert) => {\n                            if let hash_map::Entry::Vacant(entry) =\n                                self.delivery_latency.entry(cert.id)\n                            {\n                                entry.insert(CERTIFICATE_DELIVERY_LATENCY.start_timer());\n                            }\n                            info!(\n                                \"Received certificate {} from GossipSub from {}\",\n                                cert.id, from\n                            );\n\n                            match self.validator_store.insert_pending_certificate(&cert).await {\n                                Ok(Some(pending_id)) => {\n                                    let certificate_id = cert.id;\n                                    debug!(\n                                        \"Certificate {} has been inserted into pending pool\",\n                                        certificate_id\n                                    );\n\n                                    if self\n                                        .tce_cli\n                                        .get_double_echo_channel()\n                                        .send(DoubleEchoCommand::Broadcast {\n                                            need_gossip: false,\n                                            cert,\n                                            pending_id,\n                                        })\n                                        .await\n                                        .is_err()\n                                    {\n                                        error!(\n                                            \"Unable to send DoubleEchoCommand::Broadcast command \\\n                                             to double echo for {}\",\n                                            certificate_id\n                                        );\n                                    }\n                                }\n\n                                Ok(None) => {\n                                    debug!(\n                                        \"Certificate {} from subnet {} has been inserted into \\\n                                         precedence pool waiting for {}\",\n                                        cert.id, cert.source_subnet_id, cert.prev_id\n                                    );\n                                }\n                                Err(StorageError::InternalStorage(\n                                    InternalStorageError::CertificateAlreadyPending,\n                                )) => {\n                                    debug!(\n                                        \"Certificate {} has been already added to the pending \\\n                                         pool, skipping\",\n                                        cert.id\n                                    );\n                                }\n                                Err(StorageError::InternalStorage(\n                                    InternalStorageError::CertificateAlreadyExists,\n                                )) => {\n                                    debug!(\n                                        \"Certificate {} has been already delivered, skipping\",\n                                        cert.id\n                                    );\n                                }\n                                Err(error) => {\n                                    error!(\n                                        \"Unable to insert pending certificate {}: {}\",\n                                        cert.id, error\n                                    );\n                                }\n                            }\n                        }\n                        Err(e) => {\n                            error!(\"Failed to parse the received Certificate: {e}\");\n                        }\n                    },\n                    double_echo_request::Request::Echo(Echo {\n                        certificate_id: Some(certificate_id),\n                        signature: Some(signature),\n                        validator_id: Some(validator_id),\n                    }) => {\n                        let channel = self.tce_cli.get_double_echo_channel();\n                        spawn(async move {\n                            let certificate_id = certificate_id.clone().try_into().map_err(|e| {\n                                error!(\n                                    \"Failed to parse the CertificateId {certificate_id} from \\\n                                     Echo: {e}\"\n                                );\n                                e\n                            });\n                            let validator_id = validator_id.clone().try_into().map_err(|e| {\n                                error!(\n                                    \"Failed to parse the ValidatorId {validator_id} from Echo: {e}\"\n                                );\n                                e\n                            });\n\n                            if let (Ok(certificate_id), Ok(validator_id)) =\n                                (certificate_id, validator_id)\n                            {\n                                trace!(\n                                    \"Received Echo message, certificate_id: {certificate_id}, \\\n                                     validator_id: {validator_id} from: {from}\",\n                                    certificate_id = certificate_id,\n                                    validator_id = validator_id\n                                );\n\n                                if let Err(e) = channel\n                                    .send(DoubleEchoCommand::Echo {\n                                        signature: signature.into(),\n                                        certificate_id,\n                                        validator_id,\n                                    })\n                                    .await\n                                {\n                                    error!(\"Unable to pass received Echo message: {:?}\", e);\n                                }\n                            } else {\n                                error!(\"Unable to process Echo message due to invalid data\");\n                            }\n                        });\n                    }\n                    double_echo_request::Request::Ready(Ready {\n                        certificate_id: Some(certificate_id),\n                        signature: Some(signature),\n                        validator_id: Some(validator_id),\n                    }) => {\n                        let channel = self.tce_cli.get_double_echo_channel();\n                        spawn(async move {\n                            let certificate_id = certificate_id.clone().try_into().map_err(|e| {\n                                error!(\n                                    \"Failed to parse the CertificateId {certificate_id} from \\\n                                     Ready: {e}\"\n                                );\n                                e\n                            });\n                            let validator_id = validator_id.clone().try_into().map_err(|e| {\n                                error!(\n                                    \"Failed to parse the ValidatorId {validator_id} from Ready: \\\n                                     {e}\"\n                                );\n                                e\n                            });\n                            if let (Ok(certificate_id), Ok(validator_id)) =\n                                (certificate_id, validator_id)\n                            {\n                                trace!(\n                                    \"Received Ready message, certificate_id: {certificate_id}, \\\n                                     validator_id: {validator_id} from: {from}\",\n                                    certificate_id = certificate_id,\n                                    validator_id = validator_id\n                                );\n                                if let Err(e) = channel\n                                    .send(DoubleEchoCommand::Ready {\n                                        signature: signature.into(),\n                                        certificate_id,\n                                        validator_id,\n                                    })\n                                    .await\n                                {\n                                    error!(\"Unable to pass received Ready message: {:?}\", e);\n                                }\n                            } else {\n                                error!(\"Unable to process Ready message due to invalid data\");\n                            }\n                        });\n                    }\n                    _ => {}\n                }\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "crates/topos-tce/src/app_context/protocol.rs",
    "content": "use topos_core::api::grpc::tce::v1::{double_echo_request, DoubleEchoRequest, Echo, Gossip, Ready};\nuse topos_tce_broadcast::event::ProtocolEvents;\nuse tracing::{error, info, warn};\n\nuse crate::AppContext;\n\nimpl AppContext {\n    pub async fn on_protocol_event(&mut self, evt: ProtocolEvents) {\n        match evt {\n            ProtocolEvents::Broadcast { certificate_id } => {\n                info!(\"Broadcasting certificate {}\", certificate_id);\n            }\n\n            ProtocolEvents::Gossip { cert } => {\n                let cert_id = cert.id;\n\n                let request = DoubleEchoRequest {\n                    request: Some(double_echo_request::Request::Gossip(Gossip {\n                        certificate: Some(cert.into()),\n                    })),\n                };\n\n                info!(\"Sending Gossip for certificate {}\", cert_id);\n                if let Err(e) = self\n                    .network_client\n                    .publish(topos_p2p::TOPOS_GOSSIP, request)\n                    .await\n                {\n                    error!(\"Unable to send Gossip: {e}\");\n                }\n            }\n\n            ProtocolEvents::Echo {\n                certificate_id,\n                signature,\n                validator_id,\n            } if self.is_validator => {\n                // Send echo message\n                let request = DoubleEchoRequest {\n                    request: Some(double_echo_request::Request::Echo(Echo {\n                        certificate_id: Some(certificate_id.into()),\n                        signature: Some(signature.into()),\n                        validator_id: Some(validator_id.into()),\n                    })),\n                };\n\n                if let Err(e) = self\n                    .network_client\n                    .publish(topos_p2p::TOPOS_ECHO, request)\n                    .await\n                {\n                    error!(\"Unable to send Echo: {e}\");\n                }\n            }\n\n            ProtocolEvents::Ready {\n                certificate_id,\n                signature,\n                validator_id,\n            } if self.is_validator => {\n                let request = DoubleEchoRequest {\n                    request: Some(double_echo_request::Request::Ready(Ready {\n                        certificate_id: Some(certificate_id.into()),\n                        signature: Some(signature.into()),\n                        validator_id: Some(validator_id.into()),\n                    })),\n                };\n\n                if let Err(e) = self\n                    .network_client\n                    .publish(topos_p2p::TOPOS_READY, request)\n                    .await\n                {\n                    error!(\"Unable to send Ready: {e}\");\n                }\n            }\n            ProtocolEvents::BroadcastFailed { certificate_id } => {\n                warn!(\"Broadcast failed for certificate {certificate_id}\")\n            }\n            ProtocolEvents::AlreadyDelivered { certificate_id } => {\n                info!(\"Certificate {certificate_id} already delivered\")\n            }\n            _ => {}\n        }\n    }\n}\n"
  },
  {
    "path": "crates/topos-tce/src/app_context.rs",
    "content": "//!\n//! Application logic glue\n//!\nuse crate::events::Events;\nuse futures::{Stream, StreamExt};\nuse prometheus::HistogramTimer;\nuse std::collections::HashMap;\nuse std::sync::Arc;\nuse tokio::sync::mpsc;\nuse tokio_util::sync::CancellationToken;\nuse topos_core::uci::CertificateId;\nuse topos_metrics::CERTIFICATE_DELIVERED_TOTAL;\nuse topos_p2p::{Event as NetEvent, NetworkClient};\nuse topos_tce_api::RuntimeClient as ApiClient;\nuse topos_tce_api::RuntimeContext;\nuse topos_tce_api::RuntimeEvent as ApiEvent;\nuse topos_tce_broadcast::event::ProtocolEvents;\nuse topos_tce_broadcast::ReliableBroadcastClient;\nuse topos_tce_gatekeeper::GatekeeperClient;\nuse topos_tce_storage::store::ReadStore;\nuse topos_tce_storage::types::CertificateDeliveredWithPositions;\nuse topos_tce_storage::validator::ValidatorStore;\nuse topos_tce_storage::StorageClient;\nuse topos_tce_synchronizer::SynchronizerEvent;\nuse tracing::{error, info, warn};\n\nmod api;\nmod network;\npub(crate) mod protocol;\n\n/// Top-level transducer main app context & driver (alike)\n///\n/// Implements <...Host> traits for network and Api, listens for protocol events in events\n/// (store is not active component).\n///\n/// In the end we shall come to design where this struct receives\n/// config+data as input and runs app returning data as output\n///\npub struct AppContext {\n    pub is_validator: bool,\n    pub events: mpsc::Sender<Events>,\n    pub tce_cli: ReliableBroadcastClient,\n    pub network_client: NetworkClient,\n    pub api_client: ApiClient,\n    pub pending_storage: StorageClient,\n    pub gatekeeper: GatekeeperClient,\n\n    pub delivery_latency: HashMap<CertificateId, HistogramTimer>,\n\n    pub validator_store: Arc<ValidatorStore>,\n    pub api_context: RuntimeContext,\n}\n\nimpl AppContext {\n    // Default previous certificate id for first certificate in the subnet\n    // TODO: Remove, it will be genesis certificate id retrieved from Topos Subnet\n    const DUMMY_INITIAL_CERTIFICATE_ID: CertificateId =\n        CertificateId::from_array([0u8; topos_core::uci::CERTIFICATE_ID_LENGTH]);\n\n    /// Factory\n    #[allow(clippy::too_many_arguments)]\n    pub fn new(\n        is_validator: bool,\n        pending_storage: StorageClient,\n        tce_cli: ReliableBroadcastClient,\n        network_client: NetworkClient,\n        api_client: ApiClient,\n        gatekeeper: GatekeeperClient,\n        validator_store: Arc<ValidatorStore>,\n        api_context: RuntimeContext,\n    ) -> (Self, mpsc::Receiver<Events>) {\n        let (events, receiver) = mpsc::channel(100);\n        (\n            Self {\n                is_validator,\n                events,\n                tce_cli,\n                network_client,\n                api_client,\n                pending_storage,\n                gatekeeper,\n                delivery_latency: Default::default(),\n                validator_store,\n                api_context,\n            },\n            receiver,\n        )\n    }\n\n    /// Main processing loop\n    #[allow(clippy::too_many_arguments)]\n    pub async fn run(\n        mut self,\n        mut network_stream: impl Stream<Item = NetEvent> + Unpin,\n        mut tce_stream: impl Stream<Item = ProtocolEvents> + Unpin,\n        mut api_stream: impl Stream<Item = ApiEvent> + Unpin,\n        mut synchronizer_stream: impl Stream<Item = SynchronizerEvent> + Unpin,\n        mut broadcast_stream: impl Stream<Item = CertificateDeliveredWithPositions> + Unpin,\n        shutdown: (CancellationToken, mpsc::Sender<()>),\n    ) {\n        loop {\n            tokio::select! {\n\n                Some(delivery) = broadcast_stream.next() => {\n                    let certificate_id = delivery.0.certificate.id;\n                    CERTIFICATE_DELIVERED_TOTAL.inc();\n\n                    if let Some(timer) = self.delivery_latency.remove(&certificate_id) {\n                        let duration = timer.stop_and_record();\n                        info!(\"Certificate {} delivered with total latency: {}s\", certificate_id, duration);\n                    }\n                }\n\n                // protocol\n                Some(evt) = tce_stream.next() => {\n                    self.on_protocol_event(evt).await;\n                },\n\n                // network\n                Some(net_evt) = network_stream.next() => {\n                    self.on_net_event(net_evt).await;\n                }\n\n                // api events\n                Some(event) = api_stream.next() => {\n                    self.on_api_event(event).await;\n                }\n\n                // Synchronizer events\n                Some(_event) = synchronizer_stream.next() => {\n                }\n\n                // Shutdown signal\n                _ = shutdown.0.cancelled() => {\n                    info!(\"Shutting down TCE app context...\");\n\n                    if let Err(e) = self.shutdown().await {\n                        error!(\"Failed to shutdown the TCE app context: {e}\");\n                    }\n                    // Drop the sender to notify the TCE termination\n                    drop(shutdown.1);\n                    break;\n                }\n            }\n        }\n        warn!(\"Exiting main TCE app processing loop\")\n    }\n\n    pub async fn shutdown(&mut self) -> Result<(), Box<dyn std::error::Error>> {\n        info!(\"Shutting down the TCE client...\");\n\n        self.api_client.shutdown().await?;\n        self.tce_cli.shutdown().await?;\n        self.gatekeeper.shutdown().await?;\n        self.network_client.shutdown().await?;\n\n        let certificates_synced = self\n            .validator_store\n            .count_certificates_delivered()\n            .map_err(|error| format!(\"Unable to count certificates delivered: {error}\"))\n            .unwrap();\n\n        let pending_certificates = self\n            .validator_store\n            .pending_pool_size()\n            .map_err(|error| format!(\"Unable to count pending certificates: {error}\"))\n            .unwrap();\n\n        let precedence_pool_certificates = self\n            .validator_store\n            .precedence_pool_size()\n            .map_err(|error| format!(\"Unable to count precedence pool certificates: {error}\"))\n            .unwrap();\n\n        info!(\n            \"Stopping with {} certificates delivered, {} pending certificates and {} certificates \\\n             in the precedence pool\",\n            certificates_synced, pending_certificates, precedence_pool_certificates\n        );\n        Ok(())\n    }\n}\n"
  },
  {
    "path": "crates/topos-tce/src/events.rs",
    "content": "#[derive(Debug)]\npub enum Events {\n    StableSample,\n}\n"
  },
  {
    "path": "crates/topos-tce/src/lib.rs",
    "content": "use futures::{Future, StreamExt};\nuse opentelemetry::global;\nuse std::process::ExitStatus;\nuse std::{future::IntoFuture, sync::Arc};\nuse tokio::{\n    spawn,\n    sync::{broadcast, mpsc},\n};\nuse tokio_stream::wrappers::BroadcastStream;\nuse tokio_util::sync::CancellationToken;\nuse topos_config::tce::TceConfig;\nuse topos_core::api::grpc::tce::v1::synchronizer_service_server::SynchronizerServiceServer;\nuse topos_crypto::{messages::MessageSigner, validator_id::ValidatorId};\nuse topos_p2p::{\n    utils::{local_key_pair, local_key_pair_from_slice},\n    GrpcContext, GrpcRouter,\n};\nuse topos_tce_broadcast::{ReliableBroadcastClient, ReliableBroadcastConfig};\nuse topos_tce_storage::{store::ReadStore, validator::ValidatorStore, StorageClient};\nuse topos_tce_synchronizer::SynchronizerService;\nuse tracing::{debug, info, warn};\n\nmod app_context;\npub mod events;\n#[cfg(test)]\nmod tests;\n\npub use app_context::AppContext;\n\nuse topos_config::tce::{AuthKey, StorageConfiguration};\n\n// TODO: Estimate on the max broadcast throughput, could need to be override by config\nconst BROADCAST_CHANNEL_SIZE: usize = 10_000;\n\npub async fn launch(\n    config: &TceConfig,\n    shutdown: (CancellationToken, mpsc::Sender<()>),\n) -> Result<ExitStatus, Box<dyn std::error::Error>> {\n    let cancel = shutdown.0.clone();\n    let run_fut = run(config, shutdown);\n    let app_context_run = tokio::select! {\n            _ = cancel.cancelled() => {\n                return Err(Box::from(\"Killed before readiness\".to_string()));\n            }\n\n            result = run_fut => {\n                match result {\n                    Ok(app_context_run)=> app_context_run,\n                    Err(error) => return Err(error)\n                }\n            }\n    };\n\n    app_context_run.await;\n\n    global::shutdown_tracer_provider();\n    Ok(ExitStatus::default())\n}\n\npub async fn run(\n    config: &TceConfig,\n    shutdown: (CancellationToken, mpsc::Sender<()>),\n) -> Result<impl Future<Output = ()>, Box<dyn std::error::Error>> {\n    // Preboot phase - start\n    topos_metrics::init_metrics();\n\n    let key = match config.auth_key.as_ref() {\n        Some(AuthKey::Seed(seed)) => local_key_pair_from_slice(&seed[..]),\n        Some(AuthKey::PrivateKey(pk)) => topos_p2p::utils::keypair_from_protobuf_encoding(&pk[..]),\n        None => local_key_pair(None),\n    };\n\n    let message_signer = match &config.signing_key {\n        Some(AuthKey::PrivateKey(pk)) => Arc::new(MessageSigner::new(&pk[..])?),\n        _ => return Err(Box::from(\"Error, no singing key\".to_string())),\n    };\n\n    let validator_id: ValidatorId = message_signer.public_address.into();\n    let public_address = validator_id.to_string();\n\n    warn!(\"Public node address: {public_address}\");\n\n    let peer_id = key.public().to_peer_id();\n\n    warn!(\"I am {peer_id}\");\n\n    tracing::Span::current().record(\"peer_id\", &peer_id.to_string());\n\n    let mut boot_peers = config.boot_peers.clone();\n\n    // Remove myself from the bootnode list\n    boot_peers.retain(|(p, _)| *p != peer_id);\n    let is_validator = config.validators.contains(&validator_id);\n\n    // Preboot phase - stop\n    // Healthiness phase - start\n    debug!(\"Starting the Storage\");\n    let path = if let StorageConfiguration::RocksDB(Some(ref path)) = config.storage {\n        path\n    } else {\n        return Err(Box::new(std::io::Error::new(\n            std::io::ErrorKind::Other,\n            format!(\"Unsupported storage type {:?}\", config.storage),\n        )));\n    };\n\n    let validator_store = ValidatorStore::new(path)\n        .map_err(|error| format!(\"Unable to create validator store: {error}\"))?;\n\n    let fullnode_store = validator_store.fullnode_store();\n\n    let storage_client = StorageClient::new(validator_store.clone());\n\n    let certificates_synced = fullnode_store\n        .count_certificates_delivered()\n        .map_err(|error| format!(\"Unable to count certificates delivered: {error}\"))?;\n\n    let pending_certificates = validator_store\n        .pending_pool_size()\n        .map_err(|error| format!(\"Unable to count pending certificates: {error}\"))?;\n\n    let precedence_pool_certificates = validator_store\n        .precedence_pool_size()\n        .map_err(|error| format!(\"Unable to count precedence pool certificates: {error}\"))?;\n\n    info!(\n        \"Storage initialized with {} certificates delivered, {} pending certificates and {} \\\n         certificates in the precedence pool\",\n        certificates_synced, pending_certificates, precedence_pool_certificates\n    );\n\n    let grpc_context = GrpcContext::default().with_router(\n        GrpcRouter::new(tonic::transport::Server::builder()).add_service(\n            SynchronizerServiceServer::new(SynchronizerService {\n                validator_store: validator_store.clone(),\n            }),\n        ),\n    );\n\n    let (network_client, mut event_stream, network_runtime) = topos_p2p::network::builder()\n        .peer_key(key)\n        .listen_addresses(config.p2p.listen_addresses.clone())\n        .minimum_cluster_size(config.minimum_tce_cluster_size)\n        .public_addresses(config.p2p.public_addresses.clone())\n        .known_peers(&boot_peers)\n        .grpc_context(grpc_context)\n        .build()\n        .await?;\n\n    debug!(\"Starting the p2p network\");\n    let _network_handle = network_runtime.bootstrap(&mut event_stream).await?;\n    debug!(\"P2P layer bootstrapped\");\n\n    debug!(\"Creating the Synchronizer\");\n\n    let (synchronizer_runtime, synchronizer_stream) =\n        topos_tce_synchronizer::Synchronizer::builder()\n            .with_config(config.synchronization.clone())\n            .with_shutdown(shutdown.0.child_token())\n            .with_store(validator_store.clone())\n            .with_network_client(network_client.clone())\n            .build()?;\n\n    debug!(\"Synchronizer created\");\n\n    debug!(\"Starting gRPC api\");\n    let (broadcast_sender, broadcast_receiver) = broadcast::channel(BROADCAST_CHANNEL_SIZE);\n\n    let (api_client, api_stream, ctx) = topos_tce_api::Runtime::builder()\n        .with_peer_id(peer_id.to_string())\n        .with_broadcast_stream(broadcast_receiver.resubscribe())\n        .serve_grpc_addr(config.grpc_api_addr)\n        .serve_graphql_addr(config.graphql_api_addr)\n        .serve_metrics_addr(config.metrics_api_addr)\n        .store(validator_store.clone())\n        .storage(storage_client.clone())\n        .build_and_launch()\n        .await;\n    debug!(\"gRPC api started\");\n\n    // Healthiness phase - stop\n\n    debug!(\"Starting the gatekeeper\");\n    let (gatekeeper_client, gatekeeper_runtime) =\n        topos_tce_gatekeeper::Gatekeeper::builder().await?;\n\n    spawn(gatekeeper_runtime.into_future());\n    debug!(\"Gatekeeper started\");\n\n    debug!(\"Starting reliable broadcast\");\n\n    let (tce_cli, tce_stream) = ReliableBroadcastClient::new(\n        ReliableBroadcastConfig {\n            tce_params: config.tce_params.clone(),\n            validator_id,\n            validators: config.validators.clone(),\n            message_signer,\n        },\n        validator_store.clone(),\n        broadcast_sender,\n    )\n    .await;\n\n    debug!(\"Reliable broadcast started\");\n\n    spawn(synchronizer_runtime.into_future());\n    // setup transport-tce-storage-api connector\n    let (app_context, _tce_stream) = AppContext::new(\n        is_validator,\n        storage_client,\n        tce_cli,\n        network_client,\n        api_client,\n        gatekeeper_client,\n        validator_store,\n        ctx,\n    );\n\n    Ok(app_context.run(\n        event_stream,\n        tce_stream,\n        api_stream,\n        synchronizer_stream,\n        BroadcastStream::new(broadcast_receiver).filter_map(|v| futures::future::ready(v.ok())),\n        shutdown,\n    ))\n}\n"
  },
  {
    "path": "crates/topos-tce/src/tests/api.rs",
    "content": "use std::sync::Arc;\n\nuse rstest::rstest;\nuse test_log::test;\nuse tokio::sync::{mpsc, oneshot};\nuse topos_crypto::messages::MessageSigner;\nuse topos_tce_storage::{store::WriteStore, types::PendingResult};\nuse topos_test_sdk::{\n    certificates::create_certificate_chain,\n    constants::{SOURCE_SUBNET_ID_1, TARGET_SUBNET_ID_1},\n};\n\nuse crate::AppContext;\n\nuse super::setup_test;\n\n#[rstest]\n#[test(tokio::test)]\nasync fn handle_new_certificate(\n    #[future] setup_test: (\n        AppContext,\n        mpsc::Receiver<topos_p2p::Command>,\n        Arc<MessageSigner>,\n    ),\n) {\n    let (mut context, _, _) = setup_test.await;\n    let mut certificates = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 1);\n    let certificate = certificates.pop().unwrap().certificate;\n\n    let (sender, receiver) = oneshot::channel();\n\n    context\n        .on_api_event(topos_tce_api::RuntimeEvent::CertificateSubmitted {\n            certificate: Box::new(certificate),\n            sender,\n        })\n        .await;\n\n    let response = receiver.await;\n\n    assert!(matches!(response, Ok(Ok(PendingResult::InPending(_)))));\n}\n\n#[rstest]\n#[test(tokio::test)]\nasync fn handle_certificate_in_precedence_pool(\n    #[future] setup_test: (\n        AppContext,\n        mpsc::Receiver<topos_p2p::Command>,\n        Arc<MessageSigner>,\n    ),\n) {\n    let (mut context, _, _) = setup_test.await;\n    let mut certificates = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 2);\n    let certificate = certificates.pop().unwrap().certificate;\n\n    let (sender, receiver) = oneshot::channel();\n\n    context\n        .on_api_event(topos_tce_api::RuntimeEvent::CertificateSubmitted {\n            certificate: Box::new(certificate),\n            sender,\n        })\n        .await;\n\n    let response = receiver.await;\n\n    assert!(matches!(response, Ok(Ok(PendingResult::AwaitPrecedence))));\n}\n\n#[rstest]\n#[test(tokio::test)]\nasync fn handle_certificate_already_delivered(\n    #[future] setup_test: (\n        AppContext,\n        mpsc::Receiver<topos_p2p::Command>,\n        Arc<MessageSigner>,\n    ),\n) {\n    let (mut context, _, _) = setup_test.await;\n    let mut certificates = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 1);\n    let certificate_delivered = certificates.pop().unwrap();\n\n    _ = context\n        .validator_store\n        .insert_certificate_delivered(&certificate_delivered)\n        .await\n        .unwrap();\n\n    let certificate = certificate_delivered.certificate;\n\n    let (sender, receiver) = oneshot::channel();\n\n    context\n        .on_api_event(topos_tce_api::RuntimeEvent::CertificateSubmitted {\n            certificate: Box::new(certificate),\n            sender,\n        })\n        .await;\n\n    let response = receiver.await;\n\n    assert!(matches!(response, Ok(Ok(PendingResult::AlreadyDelivered))));\n}\n"
  },
  {
    "path": "crates/topos-tce/src/tests/mod.rs",
    "content": "use libp2p::PeerId;\nuse rstest::{fixture, rstest};\nuse std::{collections::HashSet, future::IntoFuture, sync::Arc};\nuse tokio_stream::Stream;\nuse topos_tce_api::RuntimeEvent;\nuse topos_tce_broadcast::event::ProtocolEvents;\nuse topos_tce_gatekeeper::Gatekeeper;\n\nuse tokio::sync::{broadcast, mpsc};\nuse topos_crypto::messages::MessageSigner;\nuse topos_p2p::{utils::GrpcOverP2P, NetworkClient};\nuse topos_tce_broadcast::{ReliableBroadcastClient, ReliableBroadcastConfig};\nuse topos_tce_storage::{validator::ValidatorStore, StorageClient};\nuse topos_test_sdk::{\n    certificates::create_certificate_chain,\n    constants::{CERTIFICATE_ID_1, SOURCE_SUBNET_ID_1, TARGET_SUBNET_ID_1},\n    storage::create_validator_store,\n    tce::public_api::{create_public_api, PublicApiContext},\n};\n\nuse crate::AppContext;\n\nmod api;\nmod network;\n\n#[rstest]\n#[tokio::test]\nasync fn non_validator_publish_gossip(\n    #[future] setup_test: (\n        AppContext,\n        mpsc::Receiver<topos_p2p::Command>,\n        Arc<MessageSigner>,\n    ),\n) {\n    let (mut context, mut p2p_receiver, _) = setup_test.await;\n    let certificates = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 1);\n    context\n        .on_protocol_event(ProtocolEvents::Gossip {\n            cert: certificates[0].certificate.clone(),\n        })\n        .await;\n\n    assert!(matches!(\n        p2p_receiver.try_recv(),\n        Ok(topos_p2p::Command::Gossip { topic, .. }) if topic == \"topos_gossip\"\n    ));\n}\n\n#[rstest]\n#[tokio::test]\nasync fn non_validator_do_not_publish_echo(\n    #[future] setup_test: (\n        AppContext,\n        mpsc::Receiver<topos_p2p::Command>,\n        Arc<MessageSigner>,\n    ),\n) {\n    let (mut context, mut p2p_receiver, message_signer) = setup_test.await;\n    context\n        .on_protocol_event(ProtocolEvents::Echo {\n            certificate_id: CERTIFICATE_ID_1,\n            signature: message_signer.sign_message(&[]).ok().unwrap(),\n            validator_id: message_signer.public_address.into(),\n        })\n        .await;\n\n    assert!(p2p_receiver.try_recv().is_err(),);\n}\n\n#[rstest]\n#[tokio::test]\nasync fn non_validator_do_not_publish_ready(\n    #[future] setup_test: (\n        AppContext,\n        mpsc::Receiver<topos_p2p::Command>,\n        Arc<MessageSigner>,\n    ),\n) {\n    let (mut context, mut p2p_receiver, message_signer) = setup_test.await;\n    context\n        .on_protocol_event(ProtocolEvents::Ready {\n            certificate_id: CERTIFICATE_ID_1,\n            signature: message_signer.sign_message(&[]).ok().unwrap(),\n            validator_id: message_signer.public_address.into(),\n        })\n        .await;\n\n    assert!(p2p_receiver.try_recv().is_err(),);\n}\n\n#[fixture]\npub async fn setup_test(\n    #[future] create_validator_store: Arc<ValidatorStore>,\n    #[future] create_public_api: (PublicApiContext, impl Stream<Item = RuntimeEvent>),\n) -> (\n    AppContext,\n    mpsc::Receiver<topos_p2p::Command>,\n    Arc<MessageSigner>,\n) {\n    let validator_store = create_validator_store.await;\n    let is_validator = false;\n    let message_signer = Arc::new(MessageSigner::new(&[5u8; 32]).unwrap());\n    let validator_id = message_signer.public_address.into();\n\n    let (broadcast_sender, _) = broadcast::channel(1);\n\n    let (tce_cli, _) = ReliableBroadcastClient::new(\n        ReliableBroadcastConfig {\n            tce_params: topos_config::tce::broadcast::ReliableBroadcastParams::default(),\n            validator_id,\n            validators: HashSet::new(),\n            message_signer: message_signer.clone(),\n        },\n        validator_store.clone(),\n        broadcast_sender,\n    )\n    .await;\n\n    let (shutdown_p2p, _) = mpsc::channel(1);\n    let (p2p_sender, p2p_receiver) = mpsc::channel(1);\n    let grpc_over_p2p = GrpcOverP2P::new(p2p_sender.clone());\n    let network_client = NetworkClient {\n        retry_ttl: 10,\n        local_peer_id: PeerId::random(),\n        sender: p2p_sender,\n        grpc_over_p2p,\n        shutdown_channel: shutdown_p2p,\n    };\n\n    let (api_context, _api_stream) = create_public_api.await;\n    let api_client = api_context.client;\n\n    let (gatekeeper_client, _) = Gatekeeper::builder().into_future().await.unwrap();\n\n    let (context, _) = AppContext::new(\n        is_validator,\n        StorageClient::new(validator_store.clone()),\n        tce_cli,\n        network_client,\n        api_client,\n        gatekeeper_client,\n        validator_store,\n        api_context.api_context.unwrap(),\n    );\n\n    (context, p2p_receiver, message_signer)\n}\n"
  },
  {
    "path": "crates/topos-tce/src/tests/network.rs",
    "content": "use std::sync::Arc;\n\nuse libp2p::PeerId;\nuse prost::Message;\nuse rstest::rstest;\nuse test_log::test;\nuse tokio::sync::mpsc;\nuse topos_core::api::grpc::tce::v1::{double_echo_request, DoubleEchoRequest, Echo, Gossip, Ready};\nuse topos_crypto::{messages::MessageSigner, validator_id::ValidatorId};\nuse topos_tce_storage::store::WriteStore;\nuse topos_test_sdk::{\n    certificates::create_certificate_chain,\n    constants::{SOURCE_SUBNET_ID_1, TARGET_SUBNET_ID_1},\n};\n\nuse crate::AppContext;\n\nuse super::setup_test;\n\n#[rstest]\n#[test(tokio::test)]\nasync fn handle_gossip(\n    #[future] setup_test: (\n        AppContext,\n        mpsc::Receiver<topos_p2p::Command>,\n        Arc<MessageSigner>,\n    ),\n) {\n    let (mut context, _, _) = setup_test.await;\n    let mut certificates = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 1);\n    let certificate = certificates.pop().unwrap().certificate;\n\n    let msg = DoubleEchoRequest {\n        request: Some(double_echo_request::Request::Gossip(Gossip {\n            certificate: Some(certificate.into()),\n        })),\n    };\n    context\n        .on_net_event(topos_p2p::Event::Gossip {\n            from: PeerId::random(),\n            data: msg.encode_to_vec(),\n        })\n        .await;\n}\n\n#[rstest]\n#[test(tokio::test)]\nasync fn handle_echo(\n    #[future] setup_test: (\n        AppContext,\n        mpsc::Receiver<topos_p2p::Command>,\n        Arc<MessageSigner>,\n    ),\n) {\n    let (mut context, _, message_signer) = setup_test.await;\n    let mut certificates = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 1);\n    let certificate = certificates.pop().unwrap().certificate;\n    let validator_id: ValidatorId = message_signer.public_address.into();\n\n    let msg = DoubleEchoRequest {\n        request: Some(double_echo_request::Request::Echo(Echo {\n            certificate_id: Some(certificate.id.into()),\n            signature: Some(message_signer.sign_message(&[]).ok().unwrap().into()),\n            validator_id: Some(validator_id.into()),\n        })),\n    };\n    context\n        .on_net_event(topos_p2p::Event::Gossip {\n            from: PeerId::random(),\n            data: msg.encode_to_vec(),\n        })\n        .await;\n}\n\n#[rstest]\n#[test(tokio::test)]\nasync fn handle_ready(\n    #[future] setup_test: (\n        AppContext,\n        mpsc::Receiver<topos_p2p::Command>,\n        Arc<MessageSigner>,\n    ),\n) {\n    let (mut context, _, message_signer) = setup_test.await;\n    let mut certificates = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 1);\n    let certificate = certificates.pop().unwrap().certificate;\n    let validator_id: ValidatorId = message_signer.public_address.into();\n\n    let msg = DoubleEchoRequest {\n        request: Some(double_echo_request::Request::Ready(Ready {\n            certificate_id: Some(certificate.id.into()),\n            signature: Some(message_signer.sign_message(&[]).ok().unwrap().into()),\n            validator_id: Some(validator_id.into()),\n        })),\n    };\n    context\n        .on_net_event(topos_p2p::Event::Gossip {\n            from: PeerId::random(),\n            data: msg.encode_to_vec(),\n        })\n        .await;\n}\n\n#[rstest]\n#[test(tokio::test)]\nasync fn handle_already_delivered(\n    #[future] setup_test: (\n        AppContext,\n        mpsc::Receiver<topos_p2p::Command>,\n        Arc<MessageSigner>,\n    ),\n) {\n    let (mut context, _, _) = setup_test.await;\n    let mut certificates = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 1);\n    let certificate_delivered = certificates.pop().unwrap();\n    let certificate = certificate_delivered.certificate.clone();\n\n    let msg = DoubleEchoRequest {\n        request: Some(double_echo_request::Request::Gossip(Gossip {\n            certificate: Some(certificate.into()),\n        })),\n    };\n    _ = context\n        .validator_store\n        .insert_certificate_delivered(&certificate_delivered)\n        .await\n        .unwrap();\n\n    context\n        .on_net_event(topos_p2p::Event::Gossip {\n            from: PeerId::random(),\n            data: msg.encode_to_vec(),\n        })\n        .await;\n}\n"
  },
  {
    "path": "crates/topos-tce-api/Cargo.toml",
    "content": "[package]\nname = \"topos-tce-api\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n[lints]\nworkspace = true\n\n[dependencies]\ntopos-p2p = { path = \"../topos-p2p\" }\ntopos-core = { workspace = true, features = [\"uci\", \"api\"] }\ntopos-metrics = { path = \"../topos-metrics\" }\ntopos-tce-storage = { path = \"../topos-tce-storage\" }\n\nasync-graphql-axum.workspace = true\nasync-graphql.workspace = true\nasync-stream.workspace = true\nasync-trait.workspace = true\naxum.workspace = true\nbase64ct.workspace = true\nfutures.workspace = true\nhex.workspace = true\nhttp.workspace = true\nhyper.workspace = true\nprometheus-client.workspace = true\nserde.workspace = true\nthiserror.workspace = true\ntokio-stream.workspace = true\ntokio.workspace = true\ntokio-util.workspace = true\ntonic.workspace = true\ntower-http.workspace = true\ntower.workspace = true\ntracing.workspace = true\nuuid.workspace = true\n\ntonic-health = \"0.11.0\"\ntonic-reflection = \"0.11.0\"\npin-project = \"1.0.12\"\nasync-recursion = \"1.0\"\n\n[dev-dependencies]\nbytes.workspace = true\nprost.workspace = true\ntest-log.workspace = true\nreqwest.workspace = true\nserde_json.workspace = true\ntracing-subscriber = { workspace = true, features = [\"env-filter\", \"fmt\"] }\nenv_logger.workspace = true\nhttp = \"0.2.8\"\nhttp-body = \"0.4.5\"\nrstest = { workspace = true, features = [\"async-timeout\"] }\ntopos-test-sdk = { path = \"../topos-test-sdk/\" }\n"
  },
  {
    "path": "crates/topos-tce-api/src/graphql/builder.rs",
    "content": "use std::{net::SocketAddr, sync::Arc};\n\nuse async_graphql::{EmptyMutation, Schema};\nuse async_graphql_axum::GraphQLSubscription;\nuse axum::{extract::Extension, routing::get, Router, Server};\nuse http::{header, Method};\nuse tokio::sync::mpsc;\nuse tower_http::cors::{Any, CorsLayer};\n\nuse crate::{\n    graphql::{\n        query::{QueryRoot, ServiceSchema},\n        routes::{graphql_playground, health},\n    },\n    runtime::InternalRuntimeCommand,\n};\nuse topos_tce_storage::validator::ValidatorStore;\n\nuse super::query::SubscriptionRoot;\n\n#[derive(Default)]\npub struct ServerBuilder {\n    store: Option<Arc<ValidatorStore>>,\n    serve_addr: Option<SocketAddr>,\n    runtime: Option<mpsc::Sender<InternalRuntimeCommand>>,\n}\n\nimpl ServerBuilder {\n    /// Sets the runtime command channel\n    ///\n    /// Mostly used to manage Transient streams\n    pub(crate) fn runtime(mut self, runtime: mpsc::Sender<InternalRuntimeCommand>) -> Self {\n        self.runtime = Some(runtime);\n\n        self\n    }\n    pub(crate) fn store(mut self, store: Arc<ValidatorStore>) -> Self {\n        self.store = Some(store);\n\n        self\n    }\n\n    pub(crate) fn serve_addr(mut self, addr: Option<SocketAddr>) -> Self {\n        self.serve_addr = addr;\n\n        self\n    }\n\n    pub async fn build(\n        mut self,\n    ) -> Server<hyper::server::conn::AddrIncoming, axum::routing::IntoMakeService<Router>> {\n        let cors = CorsLayer::new()\n            // allow `GET` and `POST` when accessing the resource\n            .allow_methods([Method::GET, Method::POST])\n            // allow 'application/json' requests\n            .allow_headers([header::CONTENT_TYPE])\n            // allow requests from any origin\n            .allow_origin(Any);\n\n        let store = self\n            .store\n            .take()\n            .expect(\"Cannot build GraphQL server without a FullNode store\");\n\n        let fullnode_store = store.fullnode_store();\n        let runtime = self\n            .runtime\n            .take()\n            .expect(\"Cannot build GraphQL server without the internal runtime channel\");\n\n        let schema: ServiceSchema = Schema::build(QueryRoot, EmptyMutation, SubscriptionRoot)\n            .data(store)\n            .data(fullnode_store)\n            .data(runtime)\n            .finish();\n\n        let app = Router::new()\n            .route(\n                \"/\",\n                get(graphql_playground)\n                    .post_service(async_graphql_axum::GraphQL::new(schema.clone())),\n            )\n            .route_service(\"/ws\", GraphQLSubscription::new(schema.clone()))\n            .route(\"/health\", get(health))\n            .layer(cors)\n            .layer(Extension(schema));\n\n        let serve_addr = self.serve_addr.take().expect(\"Server address is not set\");\n        Server::bind(&serve_addr).serve(app.into_make_service())\n    }\n}\n"
  },
  {
    "path": "crates/topos-tce-api/src/graphql/filter.rs",
    "content": "pub(crate) enum FilterIs {\n    Source,\n    Target,\n}\n"
  },
  {
    "path": "crates/topos-tce-api/src/graphql/mod.rs",
    "content": "pub mod builder;\nmod filter;\nmod query;\nmod routes;\n#[cfg(test)]\nmod tests;\n"
  },
  {
    "path": "crates/topos-tce-api/src/graphql/query.rs",
    "content": "use std::collections::HashMap;\nuse std::sync::Arc;\n\nuse async_graphql::{Context, EmptyMutation, Object, Schema, Subscription};\nuse async_trait::async_trait;\nuse futures::{Stream, StreamExt};\nuse tokio::sync::{mpsc, oneshot};\nuse topos_core::api::graphql::certificate::UndeliveredCertificate;\nuse topos_core::api::graphql::checkpoint::SourceStreamPosition;\nuse topos_core::api::graphql::errors::GraphQLServerError;\nuse topos_core::api::graphql::filter::SubnetFilter;\nuse topos_core::api::graphql::{\n    certificate::{Certificate, CertificateId},\n    checkpoint::SourceCheckpointInput,\n    query::CertificateQuery,\n};\nuse topos_core::types::stream::CertificateSourceStreamPosition;\nuse topos_metrics::{STORAGE_PENDING_POOL_COUNT, STORAGE_PRECEDENCE_POOL_COUNT};\nuse topos_tce_storage::fullnode::FullNodeStore;\nuse topos_tce_storage::store::ReadStore;\n\nuse topos_tce_storage::validator::ValidatorStore;\nuse tracing::debug;\n\nuse crate::runtime::InternalRuntimeCommand;\nuse crate::stream::TransientStream;\n\nuse super::filter::FilterIs;\n\npub struct QueryRoot;\npub(crate) type ServiceSchema = Schema<QueryRoot, EmptyMutation, SubscriptionRoot>;\n\n#[async_trait]\nimpl CertificateQuery for QueryRoot {\n    async fn certificates_per_subnet(\n        ctx: &Context<'_>,\n        from_source_checkpoint: SourceCheckpointInput,\n        first: usize,\n    ) -> Result<Vec<Certificate>, GraphQLServerError> {\n        let store = ctx.data::<Arc<FullNodeStore>>().map_err(|_| {\n            tracing::error!(\"Failed to get store from context\");\n\n            GraphQLServerError::ParseDataConnector\n        })?;\n\n        let mut certificates = Vec::default();\n\n        for (index, _) in from_source_checkpoint.source_subnet_ids.iter().enumerate() {\n            let subnet_id: topos_core::uci::SubnetId = (&from_source_checkpoint.positions[index]\n                .source_subnet_id)\n                .try_into()\n                .map_err(|_| GraphQLServerError::ParseSubnetId)?;\n\n            let position = from_source_checkpoint.positions[index].position.into();\n\n            let certificates_with_position = store\n                .get_source_stream_certificates_from_position(\n                    CertificateSourceStreamPosition {\n                        subnet_id,\n                        position,\n                    },\n                    first,\n                )\n                .map_err(|_| GraphQLServerError::StorageError)?;\n\n            debug!(\"Returned from storage: {certificates_with_position:?}\");\n            certificates.extend(\n                certificates_with_position\n                    .into_iter()\n                    .map(|(ref c, _)| c.into()),\n            );\n        }\n\n        Ok(certificates)\n    }\n\n    async fn certificate_by_id(\n        ctx: &Context<'_>,\n        certificate_id: CertificateId,\n    ) -> Result<Certificate, GraphQLServerError> {\n        let store = ctx.data::<Arc<FullNodeStore>>().map_err(|_| {\n            tracing::error!(\"Failed to get storage client from context\");\n\n            GraphQLServerError::ParseDataConnector\n        })?;\n\n        store\n            .get_certificate(\n                &certificate_id\n                    .try_into()\n                    .map_err(|_| GraphQLServerError::ParseCertificateId)?,\n            )\n            .map_err(|_| GraphQLServerError::StorageError)\n            .and_then(|c| {\n                c.map(|ref c| c.into())\n                    .ok_or(GraphQLServerError::StorageError)\n            })\n    }\n}\n\n#[Object]\nimpl QueryRoot {\n    /// The endpoint for the GraphQL API, calling our trait implementation on the QueryRoot object\n    async fn certificates(\n        &self,\n        ctx: &Context<'_>,\n        from_source_checkpoint: SourceCheckpointInput,\n        first: usize,\n    ) -> Result<Vec<Certificate>, GraphQLServerError> {\n        Self::certificates_per_subnet(ctx, from_source_checkpoint, first).await\n    }\n\n    async fn certificate(\n        &self,\n        ctx: &Context<'_>,\n        certificate_id: CertificateId,\n    ) -> Result<Certificate, GraphQLServerError> {\n        Self::certificate_by_id(ctx, certificate_id).await\n    }\n\n    /// This endpoint is used to get the current storage pool stats.\n    /// It returns the number of certificates in the pending and precedence pools.\n    /// The values are estimated as having a precise count is costly.\n    async fn get_storage_pool_stats(\n        &self,\n        ctx: &Context<'_>,\n    ) -> Result<HashMap<&str, i64>, GraphQLServerError> {\n        let mut stats = HashMap::new();\n        stats.insert(\"metrics_pending_pool\", STORAGE_PENDING_POOL_COUNT.get());\n        stats.insert(\n            \"metrics_precedence_pool\",\n            STORAGE_PRECEDENCE_POOL_COUNT.get(),\n        );\n\n        let store = ctx.data::<Arc<ValidatorStore>>().map_err(|_| {\n            tracing::error!(\"Failed to get store from context\");\n\n            GraphQLServerError::ParseDataConnector\n        })?;\n\n        stats.insert(\n            \"count_pending_certificates\",\n            store\n                .iter_pending_pool()\n                .map_err(|_| GraphQLServerError::StorageError)?\n                .count()\n                .try_into()\n                .unwrap_or(i64::MAX),\n        );\n\n        stats.insert(\n            \"count_precedence_certificates\",\n            store\n                .iter_precedence_pool()\n                .map_err(|_| GraphQLServerError::StorageError)?\n                .count()\n                .try_into()\n                .unwrap_or(i64::MAX),\n        );\n\n        stats.insert(\n            \"pending_pool_size\",\n            store\n                .pending_pool_size()\n                .map_err(|_| GraphQLServerError::StorageError)?\n                .try_into()\n                .unwrap_or(i64::MAX),\n        );\n\n        stats.insert(\n            \"precedence_pool_size\",\n            store\n                .precedence_pool_size()\n                .map_err(|_| GraphQLServerError::StorageError)?\n                .try_into()\n                .unwrap_or(i64::MAX),\n        );\n\n        Ok(stats)\n    }\n\n    /// This endpoint is used to get the current checkpoint of the source streams.\n    /// The checkpoint is the position of the last certificate delivered for each source stream.\n    async fn get_checkpoint(\n        &self,\n        ctx: &Context<'_>,\n    ) -> Result<Vec<SourceStreamPosition>, GraphQLServerError> {\n        let store = ctx.data::<Arc<FullNodeStore>>().map_err(|_| {\n            tracing::error!(\"Failed to get store from context\");\n\n            GraphQLServerError::ParseDataConnector\n        })?;\n\n        let checkpoint = store\n            .get_checkpoint()\n            .map_err(|_| GraphQLServerError::StorageError)?;\n\n        Ok(checkpoint\n            .iter()\n            .map(|(subnet_id, head)| SourceStreamPosition {\n                source_subnet_id: subnet_id.into(),\n                position: *head.position,\n                certificate_id: head.certificate_id.into(),\n            })\n            .collect())\n    }\n\n    /// This endpoint is used to get the current pending pool.\n    /// It returns [`CertificateId`] and the [`PendingCertificateId`]\n    async fn get_pending_pool(\n        &self,\n        ctx: &Context<'_>,\n    ) -> Result<HashMap<u64, CertificateId>, GraphQLServerError> {\n        let store = ctx.data::<Arc<ValidatorStore>>().map_err(|_| {\n            tracing::error!(\"Failed to get store from context\");\n\n            GraphQLServerError::ParseDataConnector\n        })?;\n\n        Ok(store\n            .iter_pending_pool()\n            .map_err(|_| GraphQLServerError::StorageError)?\n            .map(|(id, certificate)| (id, certificate.id.into()))\n            .collect())\n    }\n\n    /// This endpoint is used to check if a certificate has any child certificate in the precedence pool.\n    async fn check_precedence(\n        &self,\n        ctx: &Context<'_>,\n        certificate_id: CertificateId,\n    ) -> Result<Option<UndeliveredCertificate>, GraphQLServerError> {\n        let store = ctx.data::<Arc<ValidatorStore>>().map_err(|_| {\n            tracing::error!(\"Failed to get store from context\");\n\n            GraphQLServerError::ParseDataConnector\n        })?;\n\n        store\n            .check_precedence(\n                &certificate_id\n                    .try_into()\n                    .map_err(|_| GraphQLServerError::ParseCertificateId)?,\n            )\n            .map_err(|_| GraphQLServerError::StorageError)\n            .map(|certificate| certificate.as_ref().map(Into::into))\n    }\n}\n\npub struct SubscriptionRoot;\n\nimpl SubscriptionRoot {\n    /// Try to create a new [`Stream`] of delivered [`Certificate`]s to be used in a GraphQL subscription.\n    pub(crate) async fn new_transient_stream(\n        &self,\n        register: &mpsc::Sender<InternalRuntimeCommand>,\n        filter: Option<SubnetFilter>,\n    ) -> Result<impl Stream<Item = Certificate>, GraphQLServerError> {\n        let (sender, receiver) = oneshot::channel();\n        _ = register\n            .send(InternalRuntimeCommand::NewTransientStream { sender })\n            .await;\n\n        let stream: TransientStream = receiver\n            .await\n            .map_err(|_| {\n                GraphQLServerError::InternalError(\n                    \"Communication error trying to create a new transient stream\",\n                )\n            })?\n            .map_err(|e| GraphQLServerError::TransientStream(e.to_string()))?;\n\n        let filter: Option<(FilterIs, topos_core::uci::SubnetId)> = filter\n            .map(|value| match value {\n                SubnetFilter::Target(ref id) => id.try_into().map(|v| (FilterIs::Target, v)),\n                SubnetFilter::Source(ref id) => id.try_into().map(|v| (FilterIs::Source, v)),\n            })\n            .map_or(Ok(None), |v| v.map(Some))\n            .map_err(|_| GraphQLServerError::ParseSubnetId)?;\n\n        Ok(stream\n            .filter(move |c| {\n                futures::future::ready(\n                    filter\n                        .as_ref()\n                        .map(|v| match v {\n                            (FilterIs::Source, id) => id == &c.certificate.source_subnet_id,\n                            (FilterIs::Target, id) => c.certificate.target_subnets.contains(id),\n                        })\n                        .unwrap_or(true),\n                )\n            })\n            .map(|c| c.as_ref().into()))\n    }\n}\n\n#[Subscription]\nimpl SubscriptionRoot {\n    /// This endpoint is used to received delivered certificates.\n    /// It uses a transient stream, which is a stream that is only valid for the current connection.\n    ///\n    /// Closing the connection will close the stream.\n    /// Starting a new connection will start a new stream and the client will not receive\n    /// any certificates that were delivered before the connection was started.\n    async fn watch_delivered_certificates(\n        &self,\n        ctx: &Context<'_>,\n        filter: Option<SubnetFilter>,\n    ) -> Result<impl Stream<Item = Certificate>, GraphQLServerError> {\n        let register = ctx\n            .data::<mpsc::Sender<InternalRuntimeCommand>>()\n            .map_err(|_| {\n                tracing::error!(\"Failed to get the transient register client from context\");\n\n                GraphQLServerError::ParseDataConnector\n            })?;\n\n        self.new_transient_stream(register, filter).await\n    }\n}\n"
  },
  {
    "path": "crates/topos-tce-api/src/graphql/routes.rs",
    "content": "use async_graphql::http::GraphiQLSource;\nuse axum::{\n    http::StatusCode,\n    response::{Html, IntoResponse},\n    Json,\n};\nuse serde::Serialize;\n\n#[derive(Serialize)]\nstruct Health {\n    healthy: bool,\n}\n\npub(crate) async fn health() -> impl IntoResponse {\n    let health = Health { healthy: true };\n    (StatusCode::OK, Json(health))\n}\n\n/// Build a GraphQL playground\npub async fn graphql_playground() -> impl IntoResponse {\n    Html(\n        GraphiQLSource::build()\n            .endpoint(\"/\")\n            .subscription_endpoint(\"/ws\")\n            .finish(),\n    )\n}\n"
  },
  {
    "path": "crates/topos-tce-api/src/graphql/tests.rs",
    "content": "use std::{sync::Arc, time::Duration};\n\nuse crate::{\n    graphql::query::{QueryRoot, SubscriptionRoot},\n    runtime::InternalRuntimeCommand,\n    stream::TransientStream,\n};\nuse async_graphql::{http, value, EmptyMutation, Schema};\nuse futures::{SinkExt, StreamExt};\nuse rstest::rstest;\nuse test_log::test;\nuse tokio::sync::{mpsc, oneshot};\nuse topos_core::{\n    types::stream::Position,\n    uci::{SubnetId, INITIAL_CERTIFICATE_ID},\n};\nuse topos_test_sdk::{\n    certificates::{create_certificate, create_certificate_at_position},\n    constants::{SOURCE_SUBNET_ID_2, TARGET_SUBNET_ID_3},\n};\nuse uuid::Uuid;\n\n#[rstest]\n#[test(tokio::test)]\n#[timeout(Duration::from_secs(2))]\nasync fn requesting_transient_stream_from_graphql() {\n    let (sender, mut receiver) = mpsc::channel(1);\n\n    tokio::spawn(async move {\n        let mut v = Vec::new();\n        while let Some(query) = receiver.recv().await {\n            if let InternalRuntimeCommand::NewTransientStream { sender } = query {\n                let (notifier, notifier_receiver) = oneshot::channel();\n                v.push(notifier_receiver);\n\n                let (_s, inner) = mpsc::channel(10);\n                _ = sender.send(Ok(TransientStream {\n                    stream_id: Uuid::new_v4(),\n                    notifier: Some(notifier),\n                    inner,\n                }));\n            }\n        }\n    });\n\n    let root = SubscriptionRoot {};\n\n    let result = root.new_transient_stream(&sender, None).await;\n\n    assert!(result.is_ok());\n}\n\n#[rstest]\n#[timeout(Duration::from_secs(4))]\n#[test(tokio::test)]\nasync fn open_watch_certificate_delivered() {\n    let (mut tx, rx) = futures::channel::mpsc::unbounded();\n    let (sender, mut receiver): (mpsc::Sender<InternalRuntimeCommand>, _) = mpsc::channel(1);\n\n    tokio::spawn(async move {\n        let mut v = Vec::new();\n        while let Some(query) = receiver.recv().await {\n            if let InternalRuntimeCommand::NewTransientStream { sender } = query {\n                let (notifier, notifier_receiver) = oneshot::channel();\n                v.push(notifier_receiver);\n\n                let (notify, inner) = mpsc::channel(10);\n                _ = sender.send(Ok(TransientStream {\n                    stream_id: Uuid::new_v4(),\n                    notifier: Some(notifier),\n                    inner,\n                }));\n\n                tokio::time::sleep(Duration::from_millis(10)).await;\n\n                let certificate = create_certificate_at_position(\n                    Position::ZERO,\n                    create_certificate(\n                        SOURCE_SUBNET_ID_2,\n                        &[TARGET_SUBNET_ID_3],\n                        Some(INITIAL_CERTIFICATE_ID),\n                    ),\n                );\n\n                _ = notify.send(Arc::new(certificate)).await;\n            }\n        }\n    });\n    let subscription = SubscriptionRoot {};\n    let schema = Schema::build(QueryRoot, EmptyMutation, subscription)\n        .data(sender)\n        .finish();\n\n    let mut stream = http::WebSocket::new(schema, rx, http::WebSocketProtocols::GraphQLWS);\n\n    tx.send(\n        serde_json::to_string(&value!({\n            \"type\": \"connection_init\",\n        }))\n        .unwrap(),\n    )\n    .await\n    .unwrap();\n\n    assert_eq!(\n        serde_json::from_str::<serde_json::Value>(&stream.next().await.unwrap().unwrap_text())\n            .unwrap(),\n        serde_json::json!({\n            \"type\": \"connection_ack\",\n        }),\n    );\n\n    tx.send(\n        serde_json::to_string(&value!({\n            \"type\": \"start\",\n            \"id\": \"1\",\n            \"payload\": {\n                \"query\": \"subscription onCertificates {\n                              watchDeliveredCertificates {\n                                id\n                                prevId\n                                proof\n                                signature\n                                sourceSubnetId\n                                stateRoot\n                                targetSubnets\n                                txRootHash\n                                receiptsRootHash\n                                verifier\n                                positions {\n                                  source {\n                                    sourceSubnetId\n                                    position\n                                    certificateId\n                                  }\n                                }\n                              }\n                            }\"\n            },\n        }))\n        .unwrap(),\n    )\n    .await\n    .unwrap();\n    let certificate =\n        &serde_json::from_str::<serde_json::Value>(&stream.next().await.unwrap().unwrap_text())\n            .unwrap();\n\n    let certificate = serde_json::from_value::<topos_core::api::graphql::certificate::Certificate>(\n        certificate[\"payload\"][\"data\"][\"watchDeliveredCertificates\"].clone(),\n    )\n    .unwrap();\n\n    let subnet_id: SubnetId = (&certificate.source_subnet_id).try_into().unwrap();\n    assert_eq!(subnet_id, SOURCE_SUBNET_ID_2,);\n    assert_eq!(\n        serde_json::from_str::<serde_json::Value>(&stream.next().await.unwrap().unwrap_text())\n            .unwrap(),\n        serde_json::json!({\n            \"type\": \"complete\",\n            \"id\": \"1\",\n        }),\n    );\n}\n"
  },
  {
    "path": "crates/topos-tce-api/src/grpc/builder.rs",
    "content": "use std::{net::SocketAddr, sync::Arc};\n\nuse futures::{future::BoxFuture, FutureExt};\nuse tokio::sync::{mpsc::Sender, RwLock};\nuse tonic_health::server::HealthReporter;\nuse topos_core::api::grpc::tce::v1::{\n    api_service_server::ApiServiceServer, console_service_server::ConsoleServiceServer,\n    StatusResponse,\n};\nuse topos_tce_storage::validator::ValidatorStore;\n\nuse crate::runtime::InternalRuntimeCommand;\n\nuse super::{console::TceConsoleService, TceGrpcService};\n\n#[derive(Default)]\npub struct ServerBuilder {\n    store: Option<Arc<ValidatorStore>>,\n    local_peer_id: String,\n    command_sender: Option<Sender<InternalRuntimeCommand>>,\n    serve_addr: Option<SocketAddr>,\n}\n\nimpl ServerBuilder {\n    pub(crate) fn with_store(mut self, store: Arc<ValidatorStore>) -> Self {\n        self.store = Some(store);\n\n        self\n    }\n\n    pub(crate) fn with_peer_id(mut self, local_peer_id: String) -> Self {\n        self.local_peer_id = local_peer_id;\n\n        self\n    }\n\n    pub(crate) fn command_sender(mut self, sender: Sender<InternalRuntimeCommand>) -> Self {\n        self.command_sender = Some(sender);\n\n        self\n    }\n\n    pub(crate) fn serve_addr(mut self, addr: Option<SocketAddr>) -> Self {\n        self.serve_addr = addr;\n\n        self\n    }\n\n    pub async fn build(\n        mut self,\n    ) -> (\n        HealthReporter,\n        Arc<RwLock<StatusResponse>>,\n        BoxFuture<'static, Result<(), tonic::transport::Error>>,\n    ) {\n        let command_sender = self\n            .command_sender\n            .take()\n            .expect(\"Cannot build gRPC without an InternalRuntimeCommand sender\");\n\n        // We don't do active sampling at the start of the node,\n        // but give it a fixed set of validators from the genesis file.\n        // So as soon as the node starts it is ready to send and receive ECHO messages.\n        let status = Arc::new(RwLock::new(StatusResponse {\n            has_active_sample: true,\n        }));\n\n        let console = ConsoleServiceServer::new(TceConsoleService {\n            command_sender: command_sender.clone(),\n            status: status.clone(),\n        });\n\n        let store = self\n            .store\n            .take()\n            .expect(\"Cannot build GraphQL server without a FullNode store\");\n\n        let service = ApiServiceServer::new(TceGrpcService {\n            store,\n            command_sender,\n        });\n\n        let (mut health_reporter, health_service) = tonic_health::server::health_reporter();\n\n        health_reporter\n            .set_serving::<ApiServiceServer<TceGrpcService>>()\n            .await;\n\n        let reflexion = tonic_reflection::server::Builder::configure()\n            .register_encoded_file_descriptor_set(topos_core::api::grpc::FILE_DESCRIPTOR_SET)\n            .build()\n            .expect(\"Cannot build gRPC because of FILE_DESCRIPTOR_SET error\");\n\n        let serve_addr = self\n            .serve_addr\n            .take()\n            .expect(\"Cannot build gRPC without a valid serve_addr\");\n\n        let grpc = tonic::transport::Server::builder()\n            .add_service(health_service)\n            .add_service(service)\n            .add_service(console)\n            .add_service(reflexion)\n            .serve(serve_addr)\n            .boxed();\n\n        (health_reporter, status, grpc)\n    }\n}\n"
  },
  {
    "path": "crates/topos-tce-api/src/grpc/console.rs",
    "content": "use std::sync::Arc;\nuse tokio::sync::mpsc::Sender;\n\nuse crate::runtime::InternalRuntimeCommand;\nuse tokio::sync::RwLock;\nuse tonic::{Request, Response, Status};\nuse topos_core::api::grpc::tce::v1::{\n    console_service_server::ConsoleService, StatusRequest, StatusResponse,\n};\n\npub(crate) struct TceConsoleService {\n    // We want to allow this unused command_sender, because we need it in the future again.\n    // We keep it so the architecture is already obvious where to put a command_sender\n    // One example will be changing validators during the uptime of the network\n    #[allow(dead_code)]\n    pub(crate) command_sender: Sender<InternalRuntimeCommand>,\n    pub(crate) status: Arc<RwLock<StatusResponse>>,\n}\n\n#[tonic::async_trait]\nimpl ConsoleService for TceConsoleService {\n    async fn status(\n        &self,\n        _request: Request<StatusRequest>,\n    ) -> Result<Response<StatusResponse>, Status> {\n        let status = self.status.read().await;\n\n        Ok(Response::new(status.clone()))\n    }\n}\n"
  },
  {
    "path": "crates/topos-tce-api/src/grpc/messaging.rs",
    "content": "use tonic::Status;\nuse topos_core::api::grpc::checkpoints::{TargetCheckpoint, TargetStreamPosition};\nuse topos_core::api::grpc::tce::v1::watch_certificates_request::Command;\nuse topos_core::api::grpc::tce::v1::watch_certificates_request::OpenStream as GrpcOpenStream;\nuse topos_core::api::grpc::tce::v1::watch_certificates_response::CertificatePushed as GrpcCertificatePushed;\nuse topos_core::api::grpc::tce::v1::watch_certificates_response::Event;\nuse topos_core::api::grpc::tce::v1::watch_certificates_response::StreamOpened as GrpcStreamOpened;\nuse topos_core::types::CertificateDelivered;\nuse topos_core::uci::SubnetId;\n\npub enum InboundMessage {\n    OpenStream(OpenStream),\n}\n\npub struct OpenStream {\n    pub(crate) target_checkpoint: TargetCheckpoint,\n}\n\n#[derive(Debug)]\npub struct CertificatePushed {\n    pub(crate) certificate: CertificateDelivered,\n    pub(crate) positions: Vec<TargetStreamPosition>,\n}\n\n#[derive(Debug)]\npub enum OutboundMessage {\n    StreamOpened(StreamOpened),\n    CertificatePushed(Box<CertificatePushed>),\n}\n\n#[derive(Debug)]\npub struct StreamOpened {\n    pub(crate) subnet_ids: Vec<SubnetId>,\n}\n\nimpl TryFrom<Command> for InboundMessage {\n    type Error = Status;\n\n    fn try_from(command: Command) -> Result<Self, Self::Error> {\n        match command {\n            Command::OpenStream(value) => Ok(OpenStream::try_from(value)?.into()),\n        }\n    }\n}\n\nimpl TryFrom<GrpcOpenStream> for OpenStream {\n    type Error = Status;\n\n    fn try_from(value: GrpcOpenStream) -> Result<Self, Self::Error> {\n        Ok(Self {\n            target_checkpoint: value.target_checkpoint.map(TryInto::try_into).map_or(\n                Err(Status::invalid_argument(\"missing target_checkpoint\")),\n                |value| value.map_err(|_| Status::invalid_argument(\"invalid checkpoint\")),\n            )?,\n        })\n    }\n}\n\nimpl From<OpenStream> for InboundMessage {\n    fn from(value: OpenStream) -> Self {\n        Self::OpenStream(value)\n    }\n}\n\nimpl From<OutboundMessage> for Event {\n    fn from(value: OutboundMessage) -> Self {\n        match value {\n            OutboundMessage::StreamOpened(StreamOpened { subnet_ids }) => {\n                Self::StreamOpened(GrpcStreamOpened {\n                    subnet_ids: subnet_ids.into_iter().map(Into::into).collect(),\n                })\n            }\n            OutboundMessage::CertificatePushed(certificate_pushed) => {\n                Self::CertificatePushed(GrpcCertificatePushed {\n                    certificate: Some(certificate_pushed.certificate.certificate.into()),\n                    positions: certificate_pushed\n                        .positions\n                        .into_iter()\n                        .map(Into::into)\n                        .collect(),\n                })\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "crates/topos-tce-api/src/grpc/mod.rs",
    "content": "use base64ct::{Base64, Encoding};\nuse futures::{FutureExt, Stream as FutureStream, StreamExt};\nuse std::pin::Pin;\nuse std::sync::Arc;\nuse tokio::sync::{mpsc, oneshot};\nuse tokio_stream::wrappers::ReceiverStream;\nuse tonic::{Request, Response, Status, Streaming};\nuse topos_core::api::grpc::tce::v1::LastPendingCertificate;\nuse topos_core::api::grpc::tce::v1::{\n    api_service_server::ApiService, GetLastPendingCertificatesRequest,\n    GetLastPendingCertificatesResponse, GetSourceHeadRequest, GetSourceHeadResponse,\n    SubmitCertificateRequest, SubmitCertificateResponse, WatchCertificatesRequest,\n    WatchCertificatesResponse,\n};\nuse topos_core::uci::SubnetId;\nuse topos_metrics::API_GRPC_CERTIFICATE_RECEIVED_TOTAL;\nuse topos_tce_storage::validator::ValidatorStore;\nuse tracing::{error, info, Span};\nuse uuid::Uuid;\n\nuse crate::{\n    runtime::InternalRuntimeCommand,\n    stream::{Stream, StreamError, StreamErrorKind},\n};\n\nuse self::messaging::{InboundMessage, OutboundMessage};\n\npub(crate) mod console;\n#[cfg(test)]\nmod tests;\n\nconst DEFAULT_CHANNEL_STREAM_CAPACITY: usize = 100;\n\npub(crate) mod builder;\npub(crate) mod messaging;\n\npub(crate) struct TceGrpcService {\n    store: Arc<ValidatorStore>,\n    command_sender: mpsc::Sender<InternalRuntimeCommand>,\n}\n\nimpl TceGrpcService {\n    pub fn create_stream(\n        rx: mpsc::Receiver<Result<(Option<Uuid>, OutboundMessage), Status>>,\n    ) -> Pin<Box<dyn FutureStream<Item = Result<WatchCertificatesResponse, Status>> + Send + 'static>>\n    {\n        Box::pin(ReceiverStream::new(rx).map(|response| match response {\n            Ok((request_id, response)) => Ok(WatchCertificatesResponse {\n                event: Some(response.into()),\n                request_id: request_id.map(Into::into),\n            }),\n            Err(error) => Err(error),\n        }))\n    }\n\n    pub fn parse_stream(\n        message: Result<WatchCertificatesRequest, Status>,\n        stream_id: Uuid,\n    ) -> Result<(Option<Uuid>, InboundMessage), StreamError> {\n        match message {\n            Ok(WatchCertificatesRequest {\n                request_id,\n                command,\n            }) => match command {\n                Some(command) => match command.try_into() {\n                    Ok(inner_command) => Ok((request_id.map(Into::into), inner_command)),\n                    Err(_) => Err(StreamError::new(stream_id, StreamErrorKind::InvalidCommand)),\n                },\n                None => Err(StreamError::new(stream_id, StreamErrorKind::InvalidCommand)),\n            },\n            Err(error) => Err(StreamError::new(\n                stream_id,\n                StreamErrorKind::Transport(error.code()),\n            )),\n        }\n    }\n}\n\n#[tonic::async_trait]\nimpl ApiService for TceGrpcService {\n    async fn submit_certificate(\n        &self,\n        request: Request<SubmitCertificateRequest>,\n    ) -> Result<Response<SubmitCertificateResponse>, Status> {\n        async {\n            let data = request.into_inner();\n            if let Some(certificate) = data.certificate {\n                if let Some(ref id) = certificate.id {\n                    Span::current().record(\"certificate_id\", id.to_string());\n\n                    let (sender, receiver) = oneshot::channel();\n                    // FIXME: remove certificate cloning (may be a lot of data) when we\n                    // resolve the issue with invalid certificate error\n                    let certificate = match certificate.clone().try_into() {\n                        Ok(c) => c,\n                        Err(e) => {\n                            error!(\n                                \"Invalid certificate error: {e:?}, certificate: {certificate:?}\"\n                            );\n                            return Err(Status::invalid_argument(format!(\n                                \"Can't submit invalid certificate: {e}\"\n                            )));\n                        }\n                    };\n\n                    if self\n                        .command_sender\n                        .send(InternalRuntimeCommand::CertificateSubmitted {\n                            certificate: Box::new(certificate),\n                            sender,\n                        })\n                        .await\n                        .is_err()\n                    {\n                        return Err(Status::internal(\"Can't submit certificate: sender dropped\"));\n                    } else {\n                        API_GRPC_CERTIFICATE_RECEIVED_TOTAL.inc();\n                    }\n\n                    receiver\n                        .map(|value| match value {\n                            Ok(Ok(_)) => Ok(Response::new(SubmitCertificateResponse {})),\n                            Ok(Err(_)) => Err(Status::internal(\"Can't submit certificate\")),\n                            Err(_) => Err(Status::internal(\"Can't submit certificate\")),\n                        })\n                        .await\n                } else {\n                    error!(\"No certificate id provided\");\n                    Err(Status::invalid_argument(\"Certificate is malformed\"))\n                }\n            } else {\n                Err(Status::invalid_argument(\"Certificate is malformed\"))\n            }\n        }\n        .await\n    }\n\n    /// This RPC allows a client to get last delivered source certificate\n    /// for particular subnet\n    async fn get_source_head(\n        &self,\n        request: Request<GetSourceHeadRequest>,\n    ) -> Result<Response<GetSourceHeadResponse>, Status> {\n        let data = request.into_inner();\n        if let Some(subnet_id) = data.subnet_id {\n            let (sender, receiver) = oneshot::channel();\n\n            let subnet_id = match subnet_id.try_into() {\n                Ok(id) => id,\n                Err(e) => {\n                    error!(\"Invalid subnet id: {e:?}\");\n                    return Err(Status::invalid_argument(\"Invalid subnet id\"));\n                }\n            };\n\n            if self\n                .command_sender\n                .send(InternalRuntimeCommand::GetSourceHead { subnet_id, sender })\n                .await\n                .is_err()\n            {\n                return Err(Status::internal(\n                    \"Can't get delivered certificate position by source: sender dropped\",\n                ));\n            }\n\n            receiver\n                .map(|value| {\n                    match value {\n\n                    Ok(Ok(response)) => Ok(match response {\n                        Some((position, certificate)) => Response::new(GetSourceHeadResponse {\n                            certificate: Some(certificate.clone().into()),\n                            position: Some(\n                                topos_core::api::grpc::shared::v1::positions::SourceStreamPosition {\n                                    source_subnet_id: Some(certificate.source_subnet_id.into()),\n                                    certificate_id: Some((*certificate.id.as_array()).into()),\n                                    position,\n\n                                },\n                            ),\n                        }),\n                        None => Response::new(GetSourceHeadResponse {\n                            certificate: None,\n                            position: None\n                        })\n                    }),\n\n                    Ok(Err(crate::RuntimeError::UnknownSubnet(subnet_id))) =>\n                        // Tce does not have Position::Zero certificate associated\n                        {\n                            Err(Status::internal(format!(\n                                \"Unknown subnet, no genesis certificate associated with subnet id \\\n                                 {}\",\n                                &subnet_id\n                            )))\n                        },\n\n                        Ok(Err(e)) => Err(Status::internal(format!(\n                            \"Can't get source head certificate position: {e}\"\n                        ))),\n\n                        Err(e) => Err(Status::internal(format!(\n                            \"Can't get source head certificate position: {e}\"\n                        ))),\n                    }\n                })\n                .await\n        } else {\n            Err(Status::invalid_argument(\"Certificate is malformed\"))\n        }\n    }\n\n    async fn get_last_pending_certificates(\n        &self,\n        request: Request<GetLastPendingCertificatesRequest>,\n    ) -> Result<Response<GetLastPendingCertificatesResponse>, Status> {\n        let data = request.into_inner();\n\n        let subnet_ids = data.subnet_ids;\n\n        let subnet_ids: Vec<SubnetId> = subnet_ids\n            .into_iter()\n            .map(TryInto::try_into)\n            .map(|v| v.map_err(|e| Status::internal(format!(\"Invalid subnet id: {e}\"))))\n            .collect::<Result<_, _>>()?;\n\n        let last_pending_certificate = self\n            .store\n            .get_pending_certificates_for_subnets(&subnet_ids)\n            .map_err(|e| Status::internal(format!(\"Can't get last pending certificates: {e}\")))?\n            .into_iter()\n            .map(|(subnet_id, (index, maybe_certificate))| {\n                (Base64::encode_string(subnet_id.as_array()), {\n                    maybe_certificate\n                        .map(|certificate| LastPendingCertificate {\n                            index,\n                            value: Some(certificate.into()),\n                        })\n                        .unwrap_or(LastPendingCertificate {\n                            value: None,\n                            index: 0,\n                        })\n                })\n            })\n            .collect();\n\n        Ok(Response::new(GetLastPendingCertificatesResponse {\n            last_pending_certificate,\n        }))\n    }\n\n    ///Server streaming response type for the WatchCertificates method.\n    type WatchCertificatesStream = Pin<\n        Box<dyn FutureStream<Item = Result<WatchCertificatesResponse, Status>> + Send + 'static>,\n    >;\n\n    /// This RPC allows a client to open a bidirectional stream with a TCE\n    async fn watch_certificates(\n        &self,\n        request: Request<Streaming<WatchCertificatesRequest>>,\n    ) -> Result<Response<Self::WatchCertificatesStream>, Status> {\n        match request.remote_addr() {\n            Some(addr) => info!(client.addr = %addr, \"Starting a new stream\"),\n            None => info!(client.addr = %\"<unknown>\", \"Starting a new stream\"),\n        }\n        // TODO: Use Cow\n        let stream_id = Uuid::new_v4();\n\n        let inbound_stream = request\n            .into_inner()\n            .map(move |message| Self::parse_stream(message, stream_id))\n            .boxed();\n\n        let (command_sender, command_receiver) = mpsc::channel(2048);\n\n        let (outbound_stream, rx) = mpsc::channel::<Result<(Option<Uuid>, OutboundMessage), Status>>(\n            DEFAULT_CHANNEL_STREAM_CAPACITY,\n        );\n\n        let stream = Stream::new(\n            stream_id,\n            inbound_stream,\n            outbound_stream,\n            command_receiver,\n            self.command_sender.clone(),\n        );\n\n        if self\n            .command_sender\n            .send(InternalRuntimeCommand::NewStream {\n                stream,\n                command_sender,\n            })\n            .await\n            .is_err()\n        {\n            return Err(Status::internal(\"Can't submit certificate: sender dropped\"));\n        }\n\n        Ok(Response::new(\n            Self::create_stream(rx) as Self::WatchCertificatesStream\n        ))\n    }\n}\n"
  },
  {
    "path": "crates/topos-tce-api/src/grpc/tests.rs",
    "content": "use test_log::test;\n\n#[test(tokio::test)]\n#[ignore = \"not yet implemented\"]\nasync fn respond_to_valid_certificate_submission() {}\n\n#[test(tokio::test)]\n#[ignore = \"not yet implemented\"]\nasync fn respond_to_invalid_certificate_submission() {}\n"
  },
  {
    "path": "crates/topos-tce-api/src/lib.rs",
    "content": "mod graphql;\nmod grpc;\nmod metrics;\nmod runtime;\nmod stream;\n\n#[cfg(test)]\nmod tests;\n\npub(crate) mod constants {\n    /// Constant size of every channel in the crate\n    pub(crate) const CHANNEL_SIZE: usize = 2048;\n\n    /// Constant size of every transient stream channel in the crate\n    pub(crate) const TRANSIENT_STREAM_CHANNEL_SIZE: usize = 1024;\n}\npub use runtime::{\n    error::RuntimeError, Runtime, RuntimeClient, RuntimeCommand, RuntimeContext, RuntimeEvent,\n};\n"
  },
  {
    "path": "crates/topos-tce-api/src/metrics/builder.rs",
    "content": "use std::net::SocketAddr;\n\nuse topos_metrics::gather_metrics;\n\nuse axum::{routing::get, Router, Server};\nuse tracing::info;\n\n#[derive(Default)]\npub struct ServerBuilder {\n    serve_addr: Option<SocketAddr>,\n}\n\nimpl ServerBuilder {\n    pub fn serve_addr(mut self, addr: Option<SocketAddr>) -> Self {\n        self.serve_addr = addr;\n\n        self\n    }\n\n    pub async fn build(\n        mut self,\n    ) -> Server<hyper::server::conn::AddrIncoming, axum::routing::IntoMakeService<Router>> {\n        let app = Router::new().route(\n            \"/metrics\",\n            get(|| async {\n                let topos_metrics = gather_metrics();\n                let mut libp2p_metrics = String::new();\n                let reg = topos_p2p::constants::METRIC_REGISTRY.lock().await;\n                _ = prometheus_client::encoding::text::encode(&mut libp2p_metrics, &reg);\n\n                format!(\"{topos_metrics}{libp2p_metrics}\")\n            }),\n        );\n\n        let serve_addr = self\n            .serve_addr\n            .take()\n            .expect(\"Metrics server address is not set\");\n        info!(\"Starting metrics server on {}\", serve_addr);\n        Server::bind(&serve_addr).serve(app.into_make_service())\n    }\n}\n"
  },
  {
    "path": "crates/topos-tce-api/src/metrics/mod.rs",
    "content": "pub(crate) mod builder;\n"
  },
  {
    "path": "crates/topos-tce-api/src/runtime/builder.rs",
    "content": "use futures::Stream;\nuse std::{collections::HashMap, net::SocketAddr, sync::Arc};\nuse tokio::{\n    spawn,\n    sync::{broadcast, mpsc, oneshot, RwLock},\n};\nuse tokio_stream::wrappers::ReceiverStream;\nuse topos_core::api::grpc::tce::v1::StatusResponse;\nuse topos_tce_storage::{\n    types::CertificateDeliveredWithPositions, validator::ValidatorStore, StorageClient,\n};\nuse tracing::Instrument;\n\nuse crate::{\n    constants::CHANNEL_SIZE, graphql::builder::ServerBuilder as GraphQLBuilder,\n    grpc::builder::ServerBuilder, metrics::builder::ServerBuilder as MetricsBuilder, Runtime,\n    RuntimeClient, RuntimeEvent,\n};\n\n#[derive(Default)]\npub struct RuntimeBuilder {\n    storage: Option<StorageClient>,\n    store: Option<Arc<ValidatorStore>>,\n    broadcast_stream: Option<broadcast::Receiver<CertificateDeliveredWithPositions>>,\n    local_peer_id: String,\n    grpc_socket_addr: Option<SocketAddr>,\n    graphql_socket_addr: Option<SocketAddr>,\n    metrics_socket_addr: Option<SocketAddr>,\n    status: Option<RwLock<StatusResponse>>,\n}\n\nimpl RuntimeBuilder {\n    pub fn with_broadcast_stream(\n        mut self,\n        stream: broadcast::Receiver<CertificateDeliveredWithPositions>,\n    ) -> Self {\n        self.broadcast_stream = Some(stream);\n\n        self\n    }\n\n    pub fn with_peer_id(mut self, local_peer_id: String) -> Self {\n        self.local_peer_id = local_peer_id;\n\n        self\n    }\n\n    pub fn serve_grpc_addr(mut self, addr: SocketAddr) -> Self {\n        self.grpc_socket_addr = Some(addr);\n\n        self\n    }\n\n    pub fn serve_graphql_addr(mut self, addr: SocketAddr) -> Self {\n        self.graphql_socket_addr = Some(addr);\n\n        self\n    }\n\n    pub fn serve_metrics_addr(mut self, addr: SocketAddr) -> Self {\n        self.metrics_socket_addr = Some(addr);\n\n        self\n    }\n\n    pub fn tce_status(mut self, status: RwLock<StatusResponse>) -> Self {\n        self.status = Some(status);\n\n        self\n    }\n\n    pub fn store(mut self, store: Arc<ValidatorStore>) -> Self {\n        self.store = Some(store);\n\n        self\n    }\n\n    pub fn storage(mut self, storage: StorageClient) -> Self {\n        self.storage = Some(storage);\n\n        self\n    }\n\n    pub async fn build_and_launch(\n        mut self,\n    ) -> (\n        RuntimeClient,\n        impl Stream<Item = RuntimeEvent>,\n        RuntimeContext,\n    ) {\n        let (internal_runtime_command_sender, internal_runtime_command_receiver) =\n            mpsc::channel(CHANNEL_SIZE);\n        let (api_event_sender, api_event_receiver) = mpsc::channel(CHANNEL_SIZE);\n\n        let (health_reporter, tce_status, grpc) = ServerBuilder::default()\n            .with_store(\n                self.store\n                    .clone()\n                    .take()\n                    .expect(\"Unable to build gRPC Server, Store is missing\"),\n            )\n            .with_peer_id(self.local_peer_id)\n            .command_sender(internal_runtime_command_sender.clone())\n            .serve_addr(self.grpc_socket_addr)\n            .build()\n            .in_current_span()\n            .await;\n\n        let (command_sender, runtime_command_receiver) = mpsc::channel(CHANNEL_SIZE);\n        let (shutdown_channel, shutdown_receiver) = mpsc::channel::<oneshot::Sender<()>>(1);\n\n        let grpc_handler = spawn(grpc.in_current_span());\n\n        let graphql_handler = if let Some(graphql_addr) = self.graphql_socket_addr {\n            tracing::info!(\"Serving GraphQL on {}\", graphql_addr);\n\n            let graphql = GraphQLBuilder::default()\n                .store(\n                    self.store\n                        .take()\n                        .expect(\"Unable to build GraphQL Server, Store is missing\"),\n                )\n                .runtime(internal_runtime_command_sender.clone())\n                .serve_addr(Some(graphql_addr))\n                .build()\n                .in_current_span();\n            spawn(graphql.await)\n        } else {\n            spawn(async move {\n                tracing::info!(\"Not serving GraphQL\");\n                Ok(())\n            })\n        };\n\n        let metrics_handler = if let Some(metrics_addr) = self.metrics_socket_addr {\n            tracing::info!(\"Serving metrics on {}\", metrics_addr);\n\n            let metrics_server = MetricsBuilder::default()\n                .serve_addr(Some(metrics_addr))\n                .build()\n                .in_current_span();\n            spawn(metrics_server.await)\n        } else {\n            spawn(async move {\n                tracing::info!(\"Not serving metrics\");\n                Ok(())\n            })\n        };\n\n        let runtime = Runtime {\n            sync_tasks: Default::default(),\n            running_sync_tasks: Default::default(),\n            broadcast_stream: self\n                .broadcast_stream\n                .expect(\"Unable to build Runtime, Broadcast Stream is missing\"),\n            storage: self\n                .storage\n                .take()\n                .expect(\"Unable to build Runtime, Storage is missing\"),\n            active_streams: HashMap::new(),\n            pending_streams: HashMap::new(),\n            subnet_subscriptions: HashMap::new(),\n            internal_runtime_command_receiver,\n            runtime_command_receiver,\n            health_reporter,\n            api_event_sender,\n            shutdown: shutdown_receiver,\n            streams: Default::default(),\n            transient_streams: HashMap::new(),\n        };\n        let runtime_handler = spawn(runtime.launch());\n\n        (\n            RuntimeClient {\n                command_sender,\n                tce_status,\n                shutdown_channel,\n            },\n            ReceiverStream::new(api_event_receiver),\n            RuntimeContext {\n                grpc_handler,\n                graphql_handler,\n                metrics_handler,\n                runtime_handler,\n            },\n        )\n    }\n\n    pub fn set_grpc_socket_addr(mut self, socket: Option<SocketAddr>) -> Self {\n        self.grpc_socket_addr = socket;\n\n        self\n    }\n}\n\n#[derive(Debug)]\npub struct RuntimeContext {\n    grpc_handler: tokio::task::JoinHandle<Result<(), tonic::transport::Error>>,\n    graphql_handler: tokio::task::JoinHandle<Result<(), hyper::Error>>,\n    metrics_handler: tokio::task::JoinHandle<Result<(), hyper::Error>>,\n    runtime_handler: tokio::task::JoinHandle<()>,\n}\n\nimpl Drop for RuntimeContext {\n    fn drop(&mut self) {\n        tracing::warn!(\"Dropping RuntimeContext\");\n        self.grpc_handler.abort();\n        self.graphql_handler.abort();\n        self.metrics_handler.abort();\n        self.runtime_handler.abort();\n    }\n}\n"
  },
  {
    "path": "crates/topos-tce-api/src/runtime/client.rs",
    "content": "use std::collections::HashMap;\nuse std::sync::Arc;\n\nuse super::RuntimeCommand;\nuse futures::Future;\nuse tokio::sync::{mpsc, oneshot, RwLock};\nuse topos_core::api::grpc::checkpoints::TargetStreamPosition;\nuse topos_core::api::grpc::tce::v1::StatusResponse;\nuse topos_core::types::CertificateDelivered;\nuse topos_core::uci::SubnetId;\nuse tracing::error;\n\n#[derive(Clone, Debug)]\npub struct RuntimeClient {\n    pub(crate) command_sender: mpsc::Sender<RuntimeCommand>,\n    pub(crate) tce_status: Arc<RwLock<StatusResponse>>,\n    pub(crate) shutdown_channel: mpsc::Sender<oneshot::Sender<()>>,\n}\n\nimpl RuntimeClient {\n    pub fn dispatch_certificate(\n        &self,\n        certificate: CertificateDelivered,\n        positions: HashMap<SubnetId, TargetStreamPosition>,\n    ) -> impl Future<Output = ()> + 'static + Send {\n        let sender = self.command_sender.clone();\n\n        async move {\n            if let Err(error) = sender\n                .send(RuntimeCommand::DispatchCertificate {\n                    certificate,\n                    positions,\n                })\n                .await\n            {\n                error!(\"Can't dispatch certificate: {error:?}\");\n            }\n        }\n    }\n\n    pub async fn has_active_sample(&self) -> bool {\n        self.tce_status.read().await.has_active_sample\n    }\n\n    pub async fn set_active_sample(&self, value: bool) {\n        let mut status = self.tce_status.write().await;\n\n        status.has_active_sample = value;\n    }\n\n    pub async fn shutdown(&self) -> Result<(), Box<dyn std::error::Error>> {\n        let (sender, receiver) = oneshot::channel();\n        self.shutdown_channel.send(sender).await?;\n\n        Ok(receiver.await?)\n    }\n}\n"
  },
  {
    "path": "crates/topos-tce-api/src/runtime/commands.rs",
    "content": "use std::collections::HashMap;\nuse tokio::sync::{mpsc::Sender, oneshot};\nuse topos_core::api::grpc::checkpoints::TargetStreamPosition;\nuse topos_core::types::CertificateDelivered;\nuse topos_core::uci::{Certificate, SubnetId};\nuse topos_tce_storage::types::PendingResult;\nuse uuid::Uuid;\n\nuse crate::stream::{Stream, StreamCommand, TransientStream};\n\nuse super::error::RuntimeError;\n\n#[derive(Debug)]\npub enum RuntimeCommand {\n    /// Dispatch certificate to gRPC API Runtime in order to push it to listening open streams\n    DispatchCertificate {\n        certificate: CertificateDelivered,\n        positions: HashMap<SubnetId, TargetStreamPosition>,\n    },\n}\n\n#[derive(Debug)]\npub(crate) enum InternalRuntimeCommand {\n    /// When a new stream is open, this command is dispatch to manage the stream\n    NewStream {\n        stream: Stream,\n        command_sender: Sender<StreamCommand>,\n    },\n\n    /// Register a stream as subscriber for the given subnet_streams.\n    /// Commands or certificates pointing to one of the subnet will be forward using the given Sender\n    Register {\n        stream_id: Uuid,\n        #[allow(dead_code)]\n        target_subnet_stream_positions: HashMap<SubnetId, HashMap<SubnetId, TargetStreamPosition>>,\n        sender: oneshot::Sender<Result<(), RuntimeError>>,\n    },\n\n    /// Notify that a Stream has successfully handshake with the server\n    Handshaked { stream_id: Uuid },\n\n    /// Dispatch when a certificate has been submitted to the TCE.\n    /// This command will be used to trigger the DoubleEcho process.\n    CertificateSubmitted {\n        certificate: Box<Certificate>,\n        sender: oneshot::Sender<Result<PendingResult, RuntimeError>>,\n    },\n\n    /// Get source head certificate by source subnet id\n    GetSourceHead {\n        subnet_id: SubnetId,\n        sender: oneshot::Sender<Result<Option<(u64, Certificate)>, RuntimeError>>,\n    },\n\n    /// Ask for the creation of a new TransientStream\n    NewTransientStream {\n        sender: oneshot::Sender<Result<TransientStream, RuntimeError>>,\n    },\n}\n"
  },
  {
    "path": "crates/topos-tce-api/src/runtime/error.rs",
    "content": "use thiserror::Error;\nuse topos_core::uci::SubnetId;\nuse topos_tce_storage::errors::StorageError;\nuse uuid::Uuid;\n\n#[derive(Error, Debug)]\npub enum RuntimeError {\n    #[error(\"The pending stream {0} was not found\")]\n    PendingStreamNotFound(Uuid),\n\n    #[error(\"Unable to get source head certificate for subnet id {0}: {1}\")]\n    UnableToGetSourceHead(SubnetId, String),\n\n    #[error(\"Unknown subnet with subnet id {0}\")]\n    UnknownSubnet(SubnetId),\n\n    #[error(\"Unexpected store error: {0}\")]\n    Store(#[from] StorageError),\n\n    #[error(\"Communication error: {0}\")]\n    CommunicationError(String),\n}\n"
  },
  {
    "path": "crates/topos-tce-api/src/runtime/events.rs",
    "content": "use std::collections::HashMap;\nuse std::collections::HashSet;\nuse tokio::sync::oneshot;\nuse topos_core::uci::{Certificate, SubnetId};\nuse topos_tce_storage::types::PendingResult;\n\nuse super::error::RuntimeError;\n\npub enum RuntimeEvent {\n    CertificateSubmitted {\n        certificate: Box<Certificate>,\n        sender: oneshot::Sender<Result<PendingResult, RuntimeError>>,\n    },\n\n    GetSourceHead {\n        subnet_id: SubnetId,\n        sender: oneshot::Sender<Result<Option<(u64, Certificate)>, RuntimeError>>,\n    },\n\n    GetLastPendingCertificates {\n        subnet_ids: HashSet<SubnetId>,\n        #[allow(clippy::type_complexity)]\n        sender:\n            oneshot::Sender<Result<HashMap<SubnetId, Option<(Certificate, u64)>>, RuntimeError>>,\n    },\n}\n"
  },
  {
    "path": "crates/topos-tce-api/src/runtime/mod.rs",
    "content": "use futures::{stream::FuturesUnordered, FutureExt, StreamExt, TryFutureExt};\nuse std::future::{Future, IntoFuture};\nuse std::{\n    collections::{HashMap, HashSet},\n    pin::Pin,\n    sync::Arc,\n    time::Duration,\n};\nuse tokio::{\n    sync::mpsc::{self, Receiver, Sender},\n    sync::{broadcast, oneshot},\n};\nuse tokio_util::sync::CancellationToken;\nuse tonic_health::server::HealthReporter;\nuse topos_core::api::grpc::checkpoints::TargetStreamPosition;\nuse topos_core::api::grpc::tce::v1::api_service_server::ApiServiceServer;\nuse topos_core::types::CertificateDelivered;\nuse topos_core::uci::SubnetId;\nuse topos_tce_storage::{types::CertificateDeliveredWithPositions, StorageClient};\n\nuse tracing::{debug, error, info};\nuse uuid::Uuid;\n\nuse crate::{\n    constants::TRANSIENT_STREAM_CHANNEL_SIZE,\n    grpc::TceGrpcService,\n    stream::{StreamCommand, StreamError, StreamErrorKind, TransientStream},\n};\n\npub mod builder;\npub use builder::RuntimeContext;\nmod client;\nmod commands;\npub mod error;\nmod events;\n\nmod sync_task;\n#[cfg(test)]\nmod tests;\n\npub use client::RuntimeClient;\n\nuse self::builder::RuntimeBuilder;\npub(crate) use self::commands::InternalRuntimeCommand;\n\npub use self::commands::RuntimeCommand;\npub use self::events::RuntimeEvent;\n\nuse crate::runtime::sync_task::{RunningTasks, SyncTask};\n\npub(crate) type Streams =\n    FuturesUnordered<Pin<Box<dyn Future<Output = Result<Uuid, StreamError>> + Send>>>;\n\npub struct Runtime {\n    /// Map of sync tasks and their stream id, so we can cancel them when a new stream\n    /// with the same id is registered\n    pub(crate) sync_tasks: HashMap<Uuid, CancellationToken>,\n    /// Sync tasks that were registered for this node.\n    pub(crate) running_sync_tasks: RunningTasks,\n\n    pub(crate) broadcast_stream: broadcast::Receiver<CertificateDeliveredWithPositions>,\n\n    pub(crate) storage: StorageClient,\n    pub(crate) transient_streams: HashMap<Uuid, Sender<Arc<CertificateDelivered>>>,\n    /// Streams that are currently active (with a valid handshake)\n    pub(crate) active_streams: HashMap<Uuid, Sender<StreamCommand>>,\n    /// Streams that are currently in negotiation\n    pub(crate) pending_streams: HashMap<Uuid, Sender<StreamCommand>>,\n    /// Mapping between a subnet_id and streams that are subscribed to it\n    pub(crate) subnet_subscriptions: HashMap<SubnetId, HashSet<Uuid>>,\n    /// Receiver for Internal API command\n    pub(crate) internal_runtime_command_receiver: Receiver<InternalRuntimeCommand>,\n    /// Receiver for Outside API command\n    pub(crate) runtime_command_receiver: Receiver<RuntimeCommand>,\n    /// HealthCheck reporter for gRPC\n    pub(crate) health_reporter: HealthReporter,\n    /// Sender that forward Event to the rest of the system\n    pub(crate) api_event_sender: Sender<RuntimeEvent>,\n    /// Shutdown signal receiver\n    pub(crate) shutdown: mpsc::Receiver<oneshot::Sender<()>>,\n    /// Spawned stream that manage a gRPC stream\n    pub(crate) streams: Streams,\n}\n\nimpl Runtime {\n    pub fn builder() -> RuntimeBuilder {\n        RuntimeBuilder::default()\n    }\n\n    pub async fn launch(mut self) {\n        let mut health_update = tokio::time::interval(Duration::from_secs(1));\n        let shutdowned: Option<oneshot::Sender<()>> = loop {\n            tokio::select! {\n                shutdown = self.shutdown.recv() => {\n                    break shutdown;\n                },\n\n                _ = health_update.tick() => {\n                    self.health_reporter.set_serving::<ApiServiceServer<TceGrpcService>>().await;\n                }\n\n                Ok(certificate_delivered) = self.broadcast_stream.recv() => {\n                    let certificate = certificate_delivered.0;\n                    let certificate_id = certificate.certificate.id;\n                    let positions = certificate_delivered.1;\n                    let cmd = RuntimeCommand::DispatchCertificate {\n                        certificate,\n                        positions: positions\n                            .targets\n                            .into_iter()\n                            .map(|(subnet_id, certificate_target_stream_position)| {\n                                (\n                                    subnet_id,\n                                    TargetStreamPosition {\n                                        target_subnet_id:\n                                            certificate_target_stream_position.target_subnet_id,\n                                        source_subnet_id:\n                                            certificate_target_stream_position.source_subnet_id,\n                                        position: *certificate_target_stream_position.position,\n                                        certificate_id: Some(certificate_id),\n                                    },\n                                )\n                            })\n                        .collect::<HashMap<SubnetId, TargetStreamPosition>>()\n                    };\n\n                    self.handle_runtime_command(cmd).await;\n                }\n\n                Some(result) = self.streams.next() => {\n                    self.handle_stream_termination(result).await;\n                }\n\n                Some(internal_command) = self.internal_runtime_command_receiver.recv() => {\n                    self.handle_internal_command(internal_command).await;\n                }\n\n                Some(command) = self.runtime_command_receiver.recv() => {\n                    self.handle_runtime_command(command).await;\n                }\n\n                Some(result) = self.running_sync_tasks.next() => {\n                    debug!(\"SyncTask with StreamId: {:?} resulted in {:?}\", result.0, result.1);\n                }\n            }\n        };\n\n        if let Some(sender) = shutdowned {\n            info!(\"Shutting down the TCE API service...\");\n            _ = sender.send(());\n        }\n    }\n\n    async fn handle_stream_termination(&mut self, stream_result: Result<Uuid, StreamError>) {\n        match stream_result {\n            Ok(stream_id) => {\n                info!(\"Stream {stream_id} terminated gracefully\");\n\n                self.active_streams.remove(&stream_id);\n                self.pending_streams.remove(&stream_id);\n            }\n            Err(StreamError { stream_id, kind }) => match kind {\n                StreamErrorKind::HandshakeFailed(_)\n                | StreamErrorKind::InvalidCommand\n                | StreamErrorKind::MalformedTargetCheckpoint\n                | StreamErrorKind::Transport(_)\n                | StreamErrorKind::PreStartError\n                | StreamErrorKind::StreamClosed\n                | StreamErrorKind::Timeout => {\n                    error!(\"Stream {stream_id} error: {kind:?}\");\n\n                    self.active_streams.remove(&stream_id);\n                    self.pending_streams.remove(&stream_id);\n                }\n            },\n        }\n    }\n\n    async fn handle_runtime_command(&mut self, command: RuntimeCommand) {\n        match command {\n            RuntimeCommand::DispatchCertificate {\n                certificate,\n                mut positions,\n            } => {\n                info!(\n                    \"Received DispatchCertificate for certificate cert_id: {:?}\",\n                    certificate.certificate.id\n                );\n                // Collect target subnets from certificate cross chain transaction list\n                let target_subnets = certificate\n                    .certificate\n                    .target_subnets\n                    .iter()\n                    .collect::<HashSet<_>>();\n                debug!(\n                    \"Dispatching certificate cert_id: {:?} to target subnets: {:?}\",\n                    &certificate.certificate.id, target_subnets\n                );\n\n                // Notify all the transient streams that a new certificate is available\n                // To avoid double allocation for each stream, we clone an Arc of the certificate.\n                // Each stream will convert the UCI certificate into a GraphQL one and send it to the transient stream.\n                let shared_certificate = Arc::new(certificate.clone());\n                for transient in self.transient_streams.values() {\n                    let sender = transient.clone();\n                    let shared_certificate = shared_certificate.clone();\n                    tokio::spawn(async move {\n                        _ = sender.send(shared_certificate).await;\n                    });\n                }\n\n                for target_subnet_id in target_subnets {\n                    let target_subnet_id = *target_subnet_id;\n                    let target_position = positions.remove(&target_subnet_id);\n                    if let Some(stream_list) = self.subnet_subscriptions.get(&target_subnet_id) {\n                        let uuids: Vec<&Uuid> = stream_list.iter().collect();\n                        for uuid in uuids {\n                            if let Some(sender) = self.active_streams.get(uuid) {\n                                let sender = sender.clone();\n                                let certificate = certificate.clone();\n                                info!(\"Sending certificate to {uuid}\");\n                                if let Some(target_position) = target_position.clone() {\n                                    if let Err(error) = sender\n                                        .send(StreamCommand::PushCertificate {\n                                            certificate,\n                                            positions: vec![target_position],\n                                        })\n                                        .await\n                                    {\n                                        error!(%error, \"Can't push certificate because the receiver is dropped\");\n                                    }\n                                } else {\n                                    error!(\n                                        \"Invalid target stream position for cert id {}, target \\\n                                         subnet id {target_subnet_id}, dispatch failed\",\n                                        &certificate.certificate.id\n                                    );\n                                }\n                            }\n                        }\n                    }\n                }\n            }\n        }\n    }\n\n    async fn handle_internal_command(&mut self, command: InternalRuntimeCommand) {\n        match command {\n            InternalRuntimeCommand::NewTransientStream { sender } => {\n                let stream_id = Uuid::new_v4();\n                info!(\"Opening a new transient stream with UUID {stream_id}\");\n\n                let (stream, receiver) = mpsc::channel(TRANSIENT_STREAM_CHANNEL_SIZE);\n                let (shutdown, shutdown_recv) = oneshot::channel();\n                self.transient_streams.insert(stream_id, stream);\n\n                self.streams.push(\n                    shutdown_recv\n                        .map_err(move |_| StreamError {\n                            stream_id,\n                            kind: StreamErrorKind::StreamClosed,\n                        })\n                        .boxed(),\n                );\n\n                if sender\n                    .send(Ok(TransientStream {\n                        stream_id,\n                        inner: receiver,\n                        notifier: Some(shutdown),\n                    }))\n                    .is_err()\n                {\n                    error!(\"Unable to send new TransientStream\");\n                    _ = self.transient_streams.remove(&stream_id);\n                }\n            }\n\n            InternalRuntimeCommand::NewStream {\n                stream,\n                command_sender,\n            } => {\n                let stream_id = stream.stream_id;\n                info!(\"Opening a new stream with UUID {stream_id}\");\n\n                self.pending_streams.insert(stream_id, command_sender);\n\n                self.streams.push(Box::pin(stream.run()));\n            }\n\n            InternalRuntimeCommand::Handshaked { stream_id } => {\n                if let Some(sender) = self.pending_streams.remove(&stream_id) {\n                    self.active_streams.insert(stream_id, sender);\n                    info!(\"Stream {stream_id} has successfully handshake\");\n                }\n            }\n\n            InternalRuntimeCommand::Register {\n                stream_id,\n                sender,\n                target_subnet_stream_positions,\n            } => {\n                info!(\"Stream {stream_id} is registered as subscriber\");\n\n                if let Some(cancel_token) = self.sync_tasks.remove(&stream_id) {\n                    // Cancel the previous task\n                    cancel_token.cancel();\n                }\n\n                let storage = self.storage.clone();\n                let notifier = self\n                    .active_streams\n                    .get(&stream_id)\n                    .or_else(|| self.pending_streams.get(&stream_id))\n                    .cloned();\n\n                if let Err(error) = sender.send(Ok(())) {\n                    error!(\n                        ?error,\n                        \"Failed to send response to the Stream, receiver is dropped\"\n                    );\n                }\n\n                if let Some(notifier) = notifier {\n                    // TODO: Rework to remove old subscriptions\n                    for target_subnet_id in target_subnet_stream_positions.keys() {\n                        self.subnet_subscriptions\n                            .entry(*target_subnet_id)\n                            .or_default()\n                            .insert(stream_id);\n                    }\n\n                    let cancel_token = CancellationToken::new();\n\n                    let cloned_cancel_token = cancel_token.clone();\n\n                    let task = SyncTask::new(\n                        stream_id,\n                        target_subnet_stream_positions,\n                        storage,\n                        notifier,\n                        cancel_token,\n                    );\n\n                    self.running_sync_tasks.push(task.into_future());\n\n                    self.sync_tasks.insert(stream_id, cloned_cancel_token);\n                }\n            }\n\n            InternalRuntimeCommand::CertificateSubmitted {\n                certificate,\n                sender,\n            } => {\n                async move {\n                    info!(\n                        \"A certificate has been submitted to the TCE {}\",\n                        certificate.id\n                    );\n                    if let Err(error) = self\n                        .api_event_sender\n                        .send(RuntimeEvent::CertificateSubmitted {\n                            certificate,\n                            sender,\n                        })\n                        .await\n                    {\n                        error!(\n                            %error,\n                            \"Can't send certificate submission to runtime, receiver is dropped\"\n                        );\n                    }\n                }\n                .await\n            }\n\n            InternalRuntimeCommand::GetSourceHead { subnet_id, sender } => {\n                info!(\"Source head certificate has been requested for subnet id: {subnet_id}\");\n\n                if let Err(error) = self\n                    .api_event_sender\n                    .send(RuntimeEvent::GetSourceHead { subnet_id, sender })\n                    .await\n                {\n                    error!(\n                        %error,\n                        \"Can't request source head certificate, receiver is dropped\"\n                    );\n                }\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "crates/topos-tce-api/src/runtime/sync_task.rs",
    "content": "use crate::stream::StreamCommand;\nuse futures::stream::FuturesUnordered;\nuse std::collections::hash_map::Entry;\nuse std::collections::HashMap;\nuse std::future::{Future, IntoFuture};\nuse std::pin::Pin;\nuse tokio::sync::mpsc::error::SendError;\nuse tokio::sync::mpsc::Sender;\nuse tokio_util::sync::CancellationToken;\nuse topos_core::api::grpc::checkpoints::TargetStreamPosition;\nuse topos_core::types::stream::CertificateTargetStreamPosition;\nuse topos_core::types::CertificateDelivered;\nuse topos_core::uci::SubnetId;\nuse topos_tce_storage::{FetchCertificatesFilter, FetchCertificatesPosition, StorageClient};\nuse tracing::{debug, error, info};\nuse uuid::Uuid;\n\ntype TargetSubnetStreamPositions = HashMap<SubnetId, HashMap<SubnetId, TargetStreamPosition>>;\npub(crate) type RunningTasks =\n    FuturesUnordered<Pin<Box<dyn Future<Output = (Uuid, SyncTaskStatus)> + Send>>>;\n\n/// Status of a [`SyncTask`]\n#[derive(Debug)]\npub(crate) enum SyncTaskStatus {\n    ///  The sync task is active and started running\n    Running,\n    /// The sync task failed and reported an error\n    #[allow(dead_code)]\n    Error(Box<SyncTaskError>),\n    /// The sync task exited gracefully and is done pushing certificates to the stream\n    Done,\n    /// The sync task was cancelled by a incoming stream with the same Uuid\n    Cancelled,\n}\n\n#[derive(Debug)]\npub(crate) enum SyncTaskError {\n    /// The [`SyncTask`] failed to send a certificate to the stream\n    SendingToStream {\n        #[allow(dead_code)]\n        error: Box<SendError<StreamCommand>>,\n    },\n    /// Invalid certificate position was being fetched\n    InvalidCertificatePosition,\n}\n\n/// When registering a stream, a [`SyncTask`] is started to fetch certificates from the storage\n/// and push them to the stream.\n///\n/// The [`SyncTask`] is used to fetch certificates from the storage and push them to the stream.\n/// It is created when a new stream is registered and is cancelled when a stream with the same Uuid\n/// is being started. It is using the [`StorageClient`] to fetch certificates from the storage and\n/// a [`Sender`] part of a channel to push certificates to the stream.\npub(crate) struct SyncTask {\n    /// The status of the [`SyncTask`]. Can be used to check if the task is still running\n    pub(crate) status: SyncTaskStatus,\n    /// The stream with which the [`SyncTask`] is connected and pushes certificates to\n    pub(crate) stream_id: Uuid,\n    /// A map of subnet and the subnet pair (target and source subnet id), its position and the\n    /// last certificate id delivered to the stream\n    pub(crate) target_subnet_stream_positions: TargetSubnetStreamPositions,\n    /// The connection to the database layer through a StorageClient\n    pub(crate) store: StorageClient,\n    /// The notifier is used to send certificates to the stream\n    pub(crate) notifier: Sender<StreamCommand>,\n    /// If a new stream is registered with the same Uuid, the sync task will be cancelled\n    pub(crate) cancel_token: CancellationToken,\n}\n\nimpl SyncTask {\n    /// Creating a new SyncTask which will fetch certificates from the storage and pushes them to the stream\n    pub(crate) fn new(\n        stream_id: Uuid,\n        target_subnet_stream_positions: TargetSubnetStreamPositions,\n        store: StorageClient,\n        notifier: Sender<StreamCommand>,\n        cancel_token: CancellationToken,\n    ) -> Self {\n        Self {\n            status: SyncTaskStatus::Running,\n            stream_id,\n            target_subnet_stream_positions,\n            store,\n            notifier,\n            cancel_token,\n        }\n    }\n}\n\nimpl IntoFuture for SyncTask {\n    type Output = (Uuid, SyncTaskStatus);\n\n    type IntoFuture = Pin<Box<dyn Future<Output = Self::Output> + Send + 'static>>;\n\n    fn into_future(mut self) -> Self::IntoFuture {\n        Box::pin(async move {\n            debug!(\"Sync task started for stream {}\", self.stream_id);\n            let mut collector: Vec<(CertificateDelivered, FetchCertificatesPosition)> = Vec::new();\n\n            for (target_subnet_id, source) in &mut self.target_subnet_stream_positions {\n                if self.cancel_token.is_cancelled() {\n                    self.status = SyncTaskStatus::Cancelled;\n                    return (self.stream_id, self.status);\n                }\n                let source_subnet_list = self\n                    .store\n                    .get_target_source_subnet_list(*target_subnet_id)\n                    .await;\n\n                debug!(\n                    \"Stream sync task detected {:?} as source list\",\n                    source_subnet_list\n                );\n                if let Ok(source_subnet_list) = source_subnet_list {\n                    for source_subnet_id in source_subnet_list {\n                        if let Entry::Vacant(entry) = source.entry(source_subnet_id) {\n                            entry.insert(TargetStreamPosition {\n                                target_subnet_id: *target_subnet_id,\n                                source_subnet_id,\n                                position: 0,\n                                certificate_id: None,\n                            });\n                        }\n                    }\n                }\n\n                for TargetStreamPosition {\n                    target_subnet_id,\n                    source_subnet_id,\n                    position,\n                    ..\n                } in source.values_mut()\n                {\n                    if self.cancel_token.is_cancelled() {\n                        self.status = SyncTaskStatus::Cancelled;\n                        return (self.stream_id, self.status);\n                    }\n                    if let Ok(certificates_with_positions) = self\n                        .store\n                        .fetch_certificates(FetchCertificatesFilter::Target {\n                            target_stream_position: CertificateTargetStreamPosition {\n                                target_subnet_id: *target_subnet_id,\n                                source_subnet_id: *source_subnet_id,\n                                position: (*position).into(),\n                            },\n                            limit: 100,\n                        })\n                        .await\n                    {\n                        collector.extend(certificates_with_positions)\n                    }\n                }\n            }\n\n            for (certificate, position) in collector {\n                debug!(\n                    \"Stream sync task for {} is sending {}\",\n                    self.stream_id, certificate.certificate.id\n                );\n\n                if let FetchCertificatesPosition::Target(CertificateTargetStreamPosition {\n                    target_subnet_id,\n                    source_subnet_id,\n                    position,\n                }) = position\n                {\n                    if let Err(error) = self\n                        .notifier\n                        .send(StreamCommand::PushCertificate {\n                            positions: vec![TargetStreamPosition {\n                                target_subnet_id,\n                                source_subnet_id,\n                                position: *position,\n                                certificate_id: Some(certificate.certificate.id),\n                            }],\n                            certificate,\n                        })\n                        .await\n                    {\n                        error!(\"Error sending certificate to stream: {}\", error);\n                        self.status =\n                            SyncTaskStatus::Error(Box::new(SyncTaskError::SendingToStream {\n                                error: Box::new(error),\n                            }));\n                        return (self.stream_id, self.status);\n                    }\n                } else {\n                    error!(\"Invalid certificate position fetched\");\n                    self.status =\n                        SyncTaskStatus::Error(Box::from(SyncTaskError::InvalidCertificatePosition));\n                    return (self.stream_id, self.status);\n                }\n            }\n\n            info!(\"The sync task for stream {} is done\", self.stream_id);\n            self.status = SyncTaskStatus::Done;\n            (self.stream_id, self.status)\n        })\n    }\n}\n"
  },
  {
    "path": "crates/topos-tce-api/src/runtime/tests.rs",
    "content": "use test_log::test;\n\n#[test(tokio::test)]\n#[ignore = \"not yet implemented\"]\nasync fn handling_new_stream() {}\n\n#[test(tokio::test)]\n#[ignore = \"not yet implemented\"]\nasync fn stream_collision() {}\n\n#[test(tokio::test)]\n#[ignore = \"not yet implemented\"]\nasync fn handle_stream_timedout() {}\n\n#[test(tokio::test)]\n#[ignore = \"not yet implemented\"]\nasync fn handle_stream_handshaked() {}\n\n#[test(tokio::test)]\n#[ignore = \"not yet implemented\"]\nasync fn handle_stream_registration() {}\n\n#[test(tokio::test)]\n#[ignore = \"not yet implemented\"]\nasync fn handle_certificate_submission() {}\n\n#[test(tokio::test)]\n#[ignore = \"not yet implemented\"]\nasync fn handle_stream_error() {}\n\n#[test(tokio::test)]\n#[ignore = \"not yet implemented\"]\nasync fn handle_stream_closing() {}\n\n#[test(tokio::test)]\n#[ignore = \"not yet implemented\"]\nasync fn forcing_a_stream_to_close() {}\n"
  },
  {
    "path": "crates/topos-tce-api/src/stream/commands.rs",
    "content": "use topos_core::api::grpc::checkpoints::TargetStreamPosition;\nuse topos_core::types::CertificateDelivered;\n\n#[derive(Debug)]\npub enum StreamCommand {\n    PushCertificate {\n        certificate: CertificateDelivered,\n        positions: Vec<TargetStreamPosition>,\n    },\n}\n"
  },
  {
    "path": "crates/topos-tce-api/src/stream/errors.rs",
    "content": "use crate::runtime::{error::RuntimeError, InternalRuntimeCommand};\nuse thiserror::Error;\nuse tokio::sync::{mpsc::error::SendError, oneshot::error::RecvError};\nuse tonic::Code;\nuse uuid::Uuid;\n\n#[derive(Error, Debug)]\npub(crate) enum StreamErrorKind {\n    #[error(transparent)]\n    HandshakeFailed(#[from] HandshakeError),\n    #[error(\"Pre-start error\")]\n    PreStartError,\n    #[error(\"Stream is closed\")]\n    StreamClosed,\n    #[error(\"A timeout occurred\")]\n    Timeout,\n    #[error(\"The submitted command is invalid\")]\n    InvalidCommand,\n    #[error(\"Transport error: {0}\")]\n    Transport(Code),\n    #[error(\"The submitted TargetCheckpoint is ill-formed\")]\n    MalformedTargetCheckpoint,\n}\n\n#[derive(Debug)]\npub struct StreamError {\n    pub(crate) stream_id: Uuid,\n    pub(crate) kind: StreamErrorKind,\n}\n\nimpl StreamError {\n    pub(crate) fn new(stream_id: Uuid, kind: StreamErrorKind) -> Self {\n        Self { stream_id, kind }\n    }\n}\n\n#[derive(Error, Debug)]\npub(crate) enum HandshakeError {\n    #[error(transparent)]\n    Runtime(#[from] RuntimeError),\n\n    #[error(transparent)]\n    OneshotCommunicationChannel(#[from] RecvError),\n\n    #[error(transparent)]\n    InternalCommunicationChannel(#[from] Box<SendError<InternalRuntimeCommand>>),\n}\n\n#[cfg(test)]\nmod tests {\n    use test_log::test;\n    use tokio::sync::oneshot;\n\n    use super::*;\n\n    #[test(tokio::test)]\n    async fn handshake_error_expected() {\n        let uuid = Uuid::new_v4();\n        let runtime_error = RuntimeError::PendingStreamNotFound(uuid);\n\n        let handshake_error: HandshakeError = runtime_error.into();\n\n        assert_eq!(\n            format!(\"The pending stream {uuid} was not found\"),\n            handshake_error.to_string()\n        );\n\n        let (sender, receiver) = oneshot::channel::<Result<(), RuntimeError>>();\n\n        drop(sender);\n\n        let handshake_error: HandshakeError = receiver.await.unwrap_err().into();\n        assert_eq!(\"channel closed\", handshake_error.to_string());\n    }\n}\n"
  },
  {
    "path": "crates/topos-tce-api/src/stream/mod.rs",
    "content": "use futures::{stream::BoxStream, StreamExt, TryStreamExt};\nuse std::sync::Arc;\nuse std::{collections::HashMap, fmt::Debug, time::Duration};\nuse tokio::{\n    sync::{\n        mpsc::{self, Receiver, Sender},\n        oneshot,\n    },\n    time::timeout,\n};\nuse tonic::Status;\nuse topos_core::api::grpc::checkpoints::{TargetCheckpoint, TargetStreamPosition};\nuse topos_core::types::CertificateDelivered;\nuse topos_core::uci::SubnetId;\nuse tracing::{debug, error, info, trace, warn};\nuse uuid::Uuid;\n\npub mod commands;\npub mod errors;\n\n#[cfg(test)]\nmod tests;\n\nuse crate::{\n    grpc::messaging::{\n        CertificatePushed, InboundMessage, OpenStream, OutboundMessage, StreamOpened,\n    },\n    runtime::InternalRuntimeCommand,\n    RuntimeError,\n};\n\npub use self::commands::StreamCommand;\npub use self::errors::StreamError;\npub(crate) use self::errors::{HandshakeError, StreamErrorKind};\n\n/// [`TransientStream`] is a stream that live as long as the connection is open.\n/// A [`TransientStream`] will not receive any certificates that were delivered\n/// before the stream was ready to listen.\n///\n/// [`TransientStream`] implements [`futures::Stream`] and use a custom [`Drop`]\n/// implementation to notify the `runtime` when ended.\n#[derive(Debug)]\npub struct TransientStream {\n    pub(crate) inner: mpsc::Receiver<Arc<CertificateDelivered>>,\n    pub(crate) stream_id: Uuid,\n    pub(crate) notifier: Option<oneshot::Sender<Uuid>>,\n}\n\nimpl futures::Stream for TransientStream {\n    type Item = Arc<CertificateDelivered>;\n\n    fn poll_next(\n        mut self: std::pin::Pin<&mut Self>,\n        cx: &mut std::task::Context<'_>,\n    ) -> std::task::Poll<Option<Self::Item>> {\n        self.inner.poll_recv(cx)\n    }\n}\n\nimpl Drop for TransientStream {\n    fn drop(&mut self) {\n        if let Some(notifier) = self.notifier.take() {\n            trace!(\n                \"Dropping TransientStream {}, notifying runtime for cleanup\",\n                self.stream_id\n            );\n            _ = notifier.send(self.stream_id);\n        }\n    }\n}\n\npub struct Stream {\n    pub(crate) stream_id: Uuid,\n\n    /// Mapping for each target subnet to the set of position per source subnet\n    pub(crate) target_subnet_listeners: HashMap<SubnetId, HashMap<SubnetId, TargetStreamPosition>>,\n\n    pub(crate) command_receiver: Receiver<StreamCommand>,\n    pub(crate) internal_runtime_command_sender: Sender<InternalRuntimeCommand>,\n\n    /// gRPC outbound stream\n    pub(crate) outbound_stream: Sender<Result<(Option<Uuid>, OutboundMessage), Status>>,\n    /// gRPC inbound stream\n    pub(crate) inbound_stream:\n        BoxStream<'static, Result<(Option<Uuid>, InboundMessage), StreamError>>,\n}\n\nimpl Debug for Stream {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        f.debug_struct(\"Stream\")\n            .field(\"stream_id\", &self.stream_id)\n            .field(\"target_subnet_listeners\", &self.target_subnet_listeners)\n            .finish()\n    }\n}\n\nimpl Stream {\n    pub(crate) fn new(\n        stream_id: Uuid,\n        inbound_stream: BoxStream<'static, Result<(Option<Uuid>, InboundMessage), StreamError>>,\n        outbound_stream: Sender<Result<(Option<Uuid>, OutboundMessage), Status>>,\n        command_receiver: mpsc::Receiver<StreamCommand>,\n        internal_runtime_command_sender: Sender<InternalRuntimeCommand>,\n    ) -> Self {\n        Self {\n            stream_id,\n            target_subnet_listeners: HashMap::new(),\n            command_receiver,\n            outbound_stream,\n            inbound_stream,\n            internal_runtime_command_sender,\n        }\n    }\n\n    pub async fn run(mut self) -> Result<Uuid, StreamError> {\n        // Prestart is the phase that waits for a particular message to being able to process the\n        // handshake. For now we do not have authentication nor authorization.\n        let (request_id, checkpoint) = self.pre_start().await?;\n\n        // The handshake is preparing the stream to broadcast certificates to the client.\n        // Notifying the manager about the subscriptions and defining everything related to\n        // the stream management.\n        self.handshake(checkpoint)\n            .await\n            .map_err(|error| StreamError::new(self.stream_id, StreamErrorKind::from(error)))?;\n\n        if let Err(error) = self\n            .outbound_stream\n            .send(Ok((\n                request_id,\n                OutboundMessage::StreamOpened(StreamOpened {\n                    subnet_ids: self.target_subnet_listeners.keys().copied().collect(),\n                }),\n            )))\n            .await\n        {\n            error!(%error, \"Handshake failed with stream\");\n\n            return Err(StreamError::new(\n                self.stream_id,\n                StreamErrorKind::StreamClosed,\n            ));\n        }\n\n        loop {\n            tokio::select! {\n                Some(command) = self.command_receiver.recv() => {\n                    if self.handle_command(command).await? {\n                        break\n                    }\n                }\n\n                // We currently open the stream, but no other message from the client is getting processed.\n                // We are using this open connection to communicate `delivered_certificates` to the client.\n                Some(stream_packet) = self.inbound_stream.next() => {\n                    match stream_packet {\n                        Ok((_request_id, _message)) => {\n                            trace!(\"Received message from stream_id: {:?}\", self.stream_id);\n                        }\n                        Err(error) => {\n                            match error.kind {\n                                StreamErrorKind::StreamClosed => {\n                                    warn!(\"Stream {} closed\", self.stream_id);\n                                    return Err(StreamError::new(self.stream_id, StreamErrorKind::StreamClosed));\n                                }\n                                _ => {\n                                    // We are not handling specific errors for now.\n                                    // If the sequencer is closing the connection, we are receiving a\n                                    // StreamErrorKind::TransportError.\n                                    error!( \"Stream error: {:?}\", error);\n                                    return Err(StreamError::new(self.stream_id, error.kind));\n\n                                }\n\n                            }\n                        }\n                    }\n\n                }\n\n                else => break,\n            }\n        }\n\n        Ok(self.stream_id)\n    }\n}\n\nimpl Stream {\n    async fn handle_command(&mut self, command: StreamCommand) -> Result<bool, StreamError> {\n        match command {\n            StreamCommand::PushCertificate {\n                certificate,\n                positions,\n            } => {\n                let certificate_id = certificate.certificate.id;\n                if let Err(error) = self\n                    .outbound_stream\n                    .send(Ok((\n                        None,\n                        OutboundMessage::CertificatePushed(Box::new(CertificatePushed {\n                            certificate,\n                            positions,\n                        })),\n                    )))\n                    .await\n                {\n                    error!(%error, \"Can't forward WatchCertificatesResponse to stream, channel seems dropped certificate {certificate_id}\");\n\n                    return Err(StreamError::new(\n                        self.stream_id,\n                        StreamErrorKind::StreamClosed,\n                    ));\n                } else {\n                    info!(\n                        \"Certificate {} sent to gRPC stream {}\",\n                        certificate_id, self.stream_id\n                    );\n                }\n            }\n        }\n\n        Ok(false)\n    }\n\n    async fn pre_start(&mut self) -> Result<(Option<Uuid>, TargetCheckpoint), StreamError> {\n        let waiting_for_open_stream = async {\n            if let Ok(Some((\n                request_id,\n                InboundMessage::OpenStream(OpenStream {\n                    target_checkpoint, ..\n                }),\n            ))) = self.inbound_stream.try_next().await\n            {\n                Ok((request_id, target_checkpoint))\n            } else {\n                Err(())\n            }\n        };\n\n        match timeout(Duration::from_millis(100), waiting_for_open_stream).await {\n            Ok(Ok(checkpoint)) => {\n                info!(\n                    \"Received an OpenStream command for the stream {}\",\n                    self.stream_id\n                );\n\n                Ok(checkpoint)\n            }\n            Ok(Err(_)) => {\n                if let Err(error) = self\n                    .outbound_stream\n                    .send(Err(Status::invalid_argument(\"No OpenStream provided\")))\n                    .await\n                {\n                    warn!(%error, \"Can't notify stream of invalid argument during pre_start\");\n                    Err(StreamError::new(\n                        self.stream_id,\n                        StreamErrorKind::StreamClosed,\n                    ))\n                } else {\n                    Err(StreamError::new(\n                        self.stream_id,\n                        StreamErrorKind::PreStartError,\n                    ))\n                }\n            }\n            _ => Err(StreamError::new(self.stream_id, StreamErrorKind::Timeout)),\n        }\n    }\n\n    async fn handshake(&mut self, checkpoint: TargetCheckpoint) -> Result<(), HandshakeError> {\n        _ = self.handle_checkpoint(checkpoint);\n        let (sender, receiver) = oneshot::channel::<Result<(), RuntimeError>>();\n\n        self.internal_runtime_command_sender\n            .send(InternalRuntimeCommand::Register {\n                stream_id: self.stream_id,\n                target_subnet_stream_positions: self.target_subnet_listeners.clone(),\n                sender,\n            })\n            .await\n            .map_err(Box::new)?;\n\n        receiver.await??;\n\n        self.internal_runtime_command_sender\n            .send(InternalRuntimeCommand::Handshaked {\n                stream_id: self.stream_id,\n            })\n            .await\n            .map_err(Box::new)?;\n\n        Ok(())\n    }\n\n    fn handle_checkpoint(&mut self, checkpoint: TargetCheckpoint) -> Result<(), StreamError> {\n        self.target_subnet_listeners.clear();\n\n        for target in checkpoint.target_subnet_ids {\n            self.target_subnet_listeners\n                .insert(target, Default::default());\n        }\n\n        for position in checkpoint.positions {\n            let target = position.target_subnet_id;\n            if let Some(entry) = self.target_subnet_listeners.get_mut(&target) {\n                let source = position.source_subnet_id;\n                if entry.insert(source, position).is_some() {\n                    debug!(\n                        \"Stream {} replaced its position for target {:?} -> {:?}\",\n                        self.stream_id, target, source\n                    );\n                }\n            } else {\n                return Err(StreamError::new(\n                    self.stream_id,\n                    StreamErrorKind::MalformedTargetCheckpoint,\n                ));\n            }\n        }\n\n        Ok(())\n    }\n}\n"
  },
  {
    "path": "crates/topos-tce-api/src/stream/tests/utils.rs",
    "content": "use std::collections::HashMap;\n\nuse futures::{stream::BoxStream, StreamExt};\nuse hyper::body::Sender;\nuse tokio::sync::mpsc;\nuse tonic::{\n    codec::{Codec, ProstCodec},\n    transport::Body,\n    Status, Streaming,\n};\nuse topos_core::api::grpc::tce::v1::{WatchCertificatesRequest, WatchCertificatesResponse};\nuse uuid::Uuid;\n\nuse crate::{\n    grpc::{\n        messaging::{InboundMessage, OutboundMessage},\n        TceGrpcService,\n    },\n    runtime::InternalRuntimeCommand,\n    stream::{Stream, StreamCommand, StreamError},\n};\n\ntype CreateStreamResult = (\n    Sender,\n    BoxStream<'static, Result<(Option<Uuid>, InboundMessage), StreamError>>,\n);\n\npub fn create_stream(stream_id: Uuid) -> CreateStreamResult {\n    let (tx, body) = Body::channel();\n    let mut codec = ProstCodec::<WatchCertificatesResponse, WatchCertificatesRequest>::default();\n    let stream = Streaming::new_request(codec.decoder(), body, None, None)\n        .map(move |message| TceGrpcService::parse_stream(message, stream_id))\n        .boxed();\n\n    (tx, stream)\n}\n\npub struct StreamBuilder {\n    outbound_stream_channel_size: usize,\n    runtime_channel_size: usize,\n    stream_channel_size: usize,\n    stream_id: Uuid,\n}\n\nimpl Default for StreamBuilder {\n    fn default() -> Self {\n        Self {\n            outbound_stream_channel_size: 10,\n            runtime_channel_size: 10,\n            stream_channel_size: 10,\n            stream_id: Uuid::new_v4(),\n        }\n    }\n}\n\nimpl StreamBuilder {\n    #[allow(dead_code)]\n    pub fn outbound_stream_channel_size(mut self, value: usize) -> Self {\n        self.outbound_stream_channel_size = value;\n\n        self\n    }\n\n    #[allow(dead_code)]\n    pub fn runtime_channel_size(mut self, value: usize) -> Self {\n        self.runtime_channel_size = value;\n\n        self\n    }\n\n    #[allow(dead_code)]\n    pub fn stream_channel_size(mut self, value: usize) -> Self {\n        self.stream_channel_size = value;\n\n        self\n    }\n\n    #[allow(dead_code)]\n    pub fn stream_id(mut self, value: Uuid) -> Self {\n        self.stream_id = value;\n\n        self\n    }\n\n    pub fn build(self) -> (Sender, Stream, StreamContext) {\n        let stream_id = Uuid::new_v4();\n        let (tx, stream) = create_stream(stream_id);\n        let (sender, stream_receiver) = mpsc::channel(self.outbound_stream_channel_size);\n        let (command_sender, command_receiver) = mpsc::channel(self.stream_channel_size);\n        let (internal_runtime_command_sender, runtime_receiver) =\n            mpsc::channel(self.runtime_channel_size);\n\n        let testable_stream = Stream {\n            stream_id,\n            target_subnet_listeners: HashMap::new(),\n            outbound_stream: sender,\n            inbound_stream: stream,\n            internal_runtime_command_sender,\n            command_receiver,\n        };\n\n        (\n            tx,\n            testable_stream,\n            StreamContext {\n                stream_receiver,\n                command_sender,\n                runtime_receiver,\n                stream_id,\n            },\n        )\n    }\n}\n\npub struct StreamContext {\n    pub(crate) stream_receiver: mpsc::Receiver<Result<(Option<Uuid>, OutboundMessage), Status>>,\n    #[allow(dead_code)]\n    pub(crate) command_sender: mpsc::Sender<StreamCommand>,\n    pub(crate) runtime_receiver: mpsc::Receiver<InternalRuntimeCommand>,\n    pub(crate) stream_id: Uuid,\n}\n"
  },
  {
    "path": "crates/topos-tce-api/src/stream/tests.rs",
    "content": "use rstest::*;\nuse std::time::Duration;\nuse tokio::sync::{mpsc, oneshot};\nuse tokio_stream::StreamExt;\nuse topos_core::uci::SUBNET_ID_LENGTH;\nuse topos_test_sdk::certificates::create_certificate_chain;\nuse topos_test_sdk::constants::{SOURCE_SUBNET_ID_2, TARGET_SUBNET_ID_1};\nuse uuid::Uuid;\n\nuse self::utils::StreamBuilder;\nuse crate::grpc::messaging::{OutboundMessage, StreamOpened};\nuse crate::runtime::InternalRuntimeCommand;\nuse crate::stream::{StreamError, StreamErrorKind, TransientStream};\nuse crate::tests::encode;\nuse crate::wait_for_command;\nuse test_log::test;\nuse tokio::spawn;\nuse topos_core::api::grpc::shared::v1::checkpoints::TargetCheckpoint;\nuse topos_core::api::grpc::shared::v1::positions::TargetStreamPosition;\nuse topos_core::api::grpc::tce::v1::watch_certificates_request::OpenStream as GrpcOpenStream;\nuse topos_core::api::grpc::tce::v1::WatchCertificatesRequest;\n\nmod utils;\n\n#[rstest]\n#[timeout(Duration::from_millis(100))]\n#[test(tokio::test)]\npub async fn sending_no_message() -> Result<(), Box<dyn std::error::Error>> {\n    let (_, stream, mut context) = StreamBuilder::default().build();\n\n    let join = spawn(stream.run());\n\n    wait_for_command!(\n        context.stream_receiver,\n        matches: Err(status) if status.message() == \"No OpenStream provided\"\n    );\n\n    let result = join.await?;\n\n    assert!(\n        matches!(result, Err(StreamError { stream_id, kind: StreamErrorKind::PreStartError}) if stream_id == context.stream_id),\n        \"Doesn't match {result:?}\",\n    );\n\n    Ok(())\n}\n\n#[rstest]\n#[timeout(Duration::from_millis(100))]\n#[test(tokio::test)]\npub async fn sending_open_stream_message() -> Result<(), Box<dyn std::error::Error>> {\n    let (mut tx, stream, mut context) = StreamBuilder::default().build();\n\n    let join = spawn(stream.run());\n\n    let msg: WatchCertificatesRequest = GrpcOpenStream {\n        target_checkpoint: Some(TargetCheckpoint {\n            target_subnet_ids: vec![TARGET_SUBNET_ID_1.into()],\n            positions: Vec::new(),\n        }),\n        source_checkpoint: None,\n    }\n    .into();\n\n    _ = tx.send_data(encode(&msg)?).await;\n\n    let expected_stream_id = context.stream_id;\n\n    wait_for_command!(\n        context.runtime_receiver,\n        matches: InternalRuntimeCommand::Register { stream_id, .. } if stream_id == expected_stream_id\n    );\n\n    join.abort();\n    Ok(())\n}\n\n#[rstest]\n#[timeout(Duration::from_millis(100))]\n#[test(tokio::test)]\nasync fn subscribing_to_one_target_with_position() -> Result<(), Box<dyn std::error::Error>> {\n    let (mut tx, stream, mut context) = StreamBuilder::default().build();\n\n    let join = spawn(stream.run());\n\n    let msg: WatchCertificatesRequest = GrpcOpenStream {\n        target_checkpoint: Some(TargetCheckpoint {\n            target_subnet_ids: vec![TARGET_SUBNET_ID_1.into()],\n            positions: vec![TargetStreamPosition {\n                source_subnet_id: Some(SOURCE_SUBNET_ID_2.into()),\n                target_subnet_id: Some(TARGET_SUBNET_ID_1.into()),\n                position: 1,\n                certificate_id: None,\n            }],\n        }),\n        source_checkpoint: None,\n    }\n    .into();\n\n    _ = tx.send_data(encode(&msg)?).await;\n\n    let expected_stream_id = context.stream_id;\n\n    wait_for_command!(\n        context.runtime_receiver,\n        matches: InternalRuntimeCommand::Register { stream_id, .. } if stream_id == expected_stream_id\n    );\n\n    join.abort();\n\n    Ok(())\n}\n\n#[rstest]\n#[timeout(Duration::from_millis(100))]\n#[test(tokio::test)]\nasync fn receive_expected_certificate_from_zero() -> Result<(), Box<dyn std::error::Error>> {\n    let (mut tx, stream, mut context) = StreamBuilder::default().build();\n\n    let expected_certificates =\n        create_certificate_chain(SOURCE_SUBNET_ID_2, &[TARGET_SUBNET_ID_1], 2);\n\n    let join = spawn(stream.run());\n\n    let msg: WatchCertificatesRequest = GrpcOpenStream {\n        target_checkpoint: Some(TargetCheckpoint {\n            target_subnet_ids: vec![TARGET_SUBNET_ID_1.into()],\n            positions: vec![],\n        }),\n        source_checkpoint: None,\n    }\n    .into();\n\n    _ = tx.send_data(encode(&msg)?).await;\n\n    let expected_stream_id = context.stream_id;\n\n    wait_for_command!(\n        context.runtime_receiver,\n        matches: InternalRuntimeCommand::Register { stream_id, sender, .. } if stream_id == expected_stream_id => {\n            sender.send(Ok(()))\n        }\n    );\n\n    let msg = context.stream_receiver.recv().await;\n    assert!(\n        matches!(\n            msg,\n            Some(Ok((_, OutboundMessage::StreamOpened(StreamOpened { ref subnet_ids })))) if subnet_ids == &[TARGET_SUBNET_ID_1],\n        ),\n        \"Expected StreamOpened, received: {msg:?}\"\n    );\n\n    for (index, expected_certificate) in expected_certificates.iter().enumerate() {\n        context\n            .command_sender\n            .send(crate::stream::StreamCommand::PushCertificate {\n                certificate: expected_certificate.clone(),\n                positions: vec![topos_core::api::grpc::checkpoints::TargetStreamPosition {\n                    position: index as u64,\n                    certificate_id: Some(expected_certificate.certificate.id),\n                    target_subnet_id: [1u8; SUBNET_ID_LENGTH].into(),\n                    source_subnet_id: expected_certificate.certificate.source_subnet_id,\n                }],\n            })\n            .await\n            .expect(\"Unable to send certificate during test\");\n    }\n\n    for (expected_position, expected_certificate) in expected_certificates.into_iter().enumerate() {\n        assert!(\n            matches!(\n                context.stream_receiver.recv().await,\n                Some(Ok((_, OutboundMessage::CertificatePushed(certificate_pushed)))) if certificate_pushed.certificate == expected_certificate\n                && certificate_pushed.positions[0].position == expected_position as u64,\n            ),\n            \"Expected CertificatePushed with {}, received: {:?}\",\n            expected_certificate.certificate.id,\n            msg\n        );\n    }\n\n    join.abort();\n    Ok(())\n}\n\n#[test(tokio::test)]\n#[ignore = \"not yet implemented\"]\nasync fn pausing_all_subscription() {}\n\n#[test(tokio::test)]\n#[ignore = \"not yet implemented\"]\nasync fn pausing_one_subscription() {}\n\n#[test(tokio::test)]\n#[ignore = \"not yet implemented\"]\nasync fn resuming_one_subscription() {}\n\n#[test(tokio::test)]\n#[ignore = \"not yet implemented\"]\nasync fn resuming_all_subscription() {}\n\n#[rstest]\n#[test(tokio::test)]\nasync fn closing_client_stream() -> Result<(), Box<dyn std::error::Error>> {\n    let (mut tx, stream, mut context) = StreamBuilder::default().build();\n\n    let join = spawn(stream.run());\n\n    let msg: WatchCertificatesRequest = GrpcOpenStream {\n        target_checkpoint: Some(TargetCheckpoint {\n            target_subnet_ids: vec![TARGET_SUBNET_ID_1.into()],\n            positions: vec![],\n        }),\n        source_checkpoint: None,\n    }\n    .into();\n\n    _ = tx.send_data(encode(&msg)?).await;\n\n    let expected_stream_id = context.stream_id;\n\n    wait_for_command!(\n        context.runtime_receiver,\n        matches: InternalRuntimeCommand::Register { stream_id, sender, .. } if stream_id == expected_stream_id => {\n            sender.send(Ok(()))\n        }\n    );\n\n    let msg = context.stream_receiver.recv().await;\n\n    assert!(\n        matches!(\n            msg,\n            Some(Ok((_, OutboundMessage::StreamOpened(StreamOpened { ref subnet_ids })))) if subnet_ids == &[TARGET_SUBNET_ID_1],\n        ),\n        \"Expected StreamOpened, received: {msg:?}\"\n    );\n\n    tx.abort();\n\n    let result = join.await?;\n\n    assert!(\n        matches!(result, Err(StreamError { stream_id, kind: StreamErrorKind::Transport(_)}) if stream_id == context.stream_id),\n        \"Doesn't match {result:?}\",\n    );\n\n    Ok(())\n}\n\n#[test(tokio::test)]\n#[ignore = \"not yet implemented\"]\nasync fn closing_server_stream() {}\n\n#[test(tokio::test)]\nasync fn opening_transient_stream() {\n    let (_sender, receiver) = mpsc::channel(1);\n    let (notifier, check) = oneshot::channel();\n    let id = Uuid::new_v4();\n\n    let stream = TransientStream {\n        inner: receiver,\n        stream_id: id,\n        notifier: Some(notifier),\n    };\n\n    tokio::spawn(async move {\n        drop(stream);\n    });\n\n    let res = check.await;\n\n    assert_eq!(res.unwrap(), id);\n}\n\n#[test(tokio::test)]\nasync fn opening_transient_stream_drop_sender() {\n    let (sender, receiver) = mpsc::channel(1);\n    let (notifier, check) = oneshot::channel();\n    let id = Uuid::new_v4();\n\n    let mut stream = TransientStream {\n        inner: receiver,\n        stream_id: id,\n        notifier: Some(notifier),\n    };\n\n    let handle = tokio::spawn(async move { while stream.next().await.is_some() {} });\n\n    tokio::time::sleep(Duration::from_millis(10)).await;\n    drop(sender);\n\n    let res = check.await;\n\n    assert_eq!(res.unwrap(), id);\n    assert!(handle.is_finished());\n}\n"
  },
  {
    "path": "crates/topos-tce-api/src/tests.rs",
    "content": "use bytes::{BufMut, Bytes, BytesMut};\nuse prost::Message;\n\n#[macro_export]\nmacro_rules! wait_for_command {\n    ($node:expr, matches: $(|)? $( $pattern:pat_param )|+ $( if $guard: expr )? $(=> $input_block:block)? $(,)?) => {\n        let assertion = async {\n            while let Some(command) = $node.recv().await {\n                match command {\n                    $( $pattern )|+ $( if $guard )? => {\n                        _ = {$($input_block)?};\n\n                        break;\n                    }\n                    _ => {}\n                }\n            }\n        };\n\n        if let Err(_) = tokio::time::timeout(std::time::Duration::from_millis(100), assertion).await\n        {\n            panic!(\"Timeout waiting for command\");\n        }\n    };\n}\n// Utility to encode our proto into GRPC stream format.\npub fn encode<T: Message>(proto: &T) -> Result<Bytes, Box<dyn std::error::Error>> {\n    let mut buf = BytesMut::new();\n    // See below comment on spec.\n    use std::mem::size_of;\n    const PREFIX_BYTES: usize = size_of::<u8>() + size_of::<u32>();\n    for _ in 0..PREFIX_BYTES {\n        // Advance our buffer first.\n        // We will backfill it once we know the size of the message.\n        buf.put_u8(0);\n    }\n    proto.encode(&mut buf)?;\n    let len = buf.len() - PREFIX_BYTES;\n    {\n        let mut buf = &mut buf[0..PREFIX_BYTES];\n        // See: https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#:~:text=Compressed-Flag\n        // for more details on spec.\n        // Compressed-Flag -> 0 / 1 # encoded as 1 byte unsigned integer.\n        buf.put_u8(0);\n        // Message-Length -> {length of Message} # encoded as 4 byte unsigned integer (big endian).\n        buf.put_u32(len as u32);\n        // Message -> *{binary octet}.\n    }\n\n    Ok(buf.freeze())\n}\n"
  },
  {
    "path": "crates/topos-tce-api/tests/grpc/certificate_precedence.rs",
    "content": "use base64ct::{Base64, Encoding};\nuse rstest::rstest;\nuse std::sync::Arc;\nuse test_log::test;\nuse topos_core::api::grpc::tce::v1::{GetLastPendingCertificatesRequest, LastPendingCertificate};\nuse topos_core::uci::Certificate;\nuse topos_test_sdk::{\n    certificates::create_certificate_chain,\n    constants::{SOURCE_SUBNET_ID_1, TARGET_SUBNET_ID_1},\n    storage::{create_fullnode_store, create_validator_store, storage_client},\n    tce::public_api::{broadcast_stream, create_public_api},\n};\n\nuse topos_tce_storage::validator::ValidatorStore;\n\n#[rstest]\n#[test(tokio::test)]\nasync fn fetch_latest_pending_certificates() {\n    let fullnode_store = create_fullnode_store(&[]).await;\n    let validator_store: Arc<ValidatorStore> =\n        create_validator_store(&[], futures::future::ready(fullnode_store.clone())).await;\n\n    let (api_context, _) = create_public_api(\n        storage_client(&[]),\n        broadcast_stream(),\n        futures::future::ready(validator_store.clone()),\n    )\n    .await;\n    let mut client = api_context.api_client;\n    let certificates = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 2);\n\n    let expected = certificates[1].certificate.clone();\n\n    assert!(validator_store\n        .insert_pending_certificate(&certificates[1].certificate)\n        .await\n        .unwrap()\n        .is_none());\n\n    assert!(validator_store\n        .insert_pending_certificate(&certificates[0].certificate)\n        .await\n        .unwrap()\n        .is_some());\n\n    let mut res = client\n        .get_last_pending_certificates(GetLastPendingCertificatesRequest {\n            subnet_ids: vec![SOURCE_SUBNET_ID_1.into()],\n        })\n        .await\n        .unwrap()\n        .into_inner();\n\n    let res: LastPendingCertificate = res\n        .last_pending_certificate\n        .remove(&Base64::encode_string(SOURCE_SUBNET_ID_1.as_array()))\n        .unwrap();\n\n    let res: Certificate = res.value.unwrap().try_into().unwrap();\n\n    assert_eq!(res, expected);\n}\n\n#[rstest]\n#[test(tokio::test)]\nasync fn fetch_latest_pending_certificates_with_conflicts() {\n    let fullnode_store = create_fullnode_store(&[]).await;\n    let validator_store: Arc<ValidatorStore> =\n        create_validator_store(&[], futures::future::ready(fullnode_store.clone())).await;\n\n    let (api_context, _) = create_public_api(\n        storage_client(&[]),\n        broadcast_stream(),\n        futures::future::ready(validator_store.clone()),\n    )\n    .await;\n    let mut client = api_context.api_client;\n    let mut certificates = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 3);\n\n    certificates[2].certificate.prev_id = certificates[1].certificate.prev_id;\n\n    let expected = certificates[2].certificate.clone();\n\n    for certificate in certificates.iter().skip(1) {\n        assert!(validator_store\n            .insert_pending_certificate(&certificate.certificate)\n            .await\n            .unwrap()\n            .is_none());\n    }\n\n    assert!(validator_store\n        .insert_pending_certificate(&certificates[0].certificate)\n        .await\n        .unwrap()\n        .is_some());\n\n    let mut res = client\n        .get_last_pending_certificates(GetLastPendingCertificatesRequest {\n            subnet_ids: vec![SOURCE_SUBNET_ID_1.into()],\n        })\n        .await\n        .unwrap()\n        .into_inner();\n\n    let res: LastPendingCertificate = res\n        .last_pending_certificate\n        .remove(&Base64::encode_string(SOURCE_SUBNET_ID_1.as_array()))\n        .unwrap();\n\n    let res: Certificate = res.value.unwrap().try_into().unwrap();\n\n    assert_eq!(res, expected);\n}\n"
  },
  {
    "path": "crates/topos-tce-api/tests/grpc/mod.rs",
    "content": "mod certificate_precedence;\n"
  },
  {
    "path": "crates/topos-tce-api/tests/runtime.rs",
    "content": "use futures::Stream;\nuse rstest::rstest;\nuse serde::Deserialize;\nuse std::collections::HashMap;\nuse std::sync::Arc;\nuse std::time::Duration;\nuse test_log::test;\nuse tokio::sync::{broadcast, mpsc};\nuse tokio::{spawn, sync::oneshot};\nuse tokio_stream::StreamExt;\nuse tonic::transport::channel;\nuse tonic::transport::Uri;\nuse topos_core::api::graphql::certificate::Certificate as GraphQLCertificate;\nuse topos_core::api::grpc::shared::v1::checkpoints::TargetCheckpoint;\nuse topos_core::api::grpc::shared::v1::positions::TargetStreamPosition;\nuse topos_core::types::stream::Position;\nuse topos_core::types::CertificateDelivered;\nuse topos_core::uci::CertificateId;\nuse topos_core::{\n    api::grpc::tce::v1::{\n        api_service_client::ApiServiceClient,\n        watch_certificates_request::OpenStream,\n        watch_certificates_response::{CertificatePushed, Event},\n    },\n    uci::Certificate,\n};\nuse topos_metrics::{STORAGE_PENDING_POOL_COUNT, STORAGE_PRECEDENCE_POOL_COUNT};\nuse topos_tce_api::{Runtime, RuntimeEvent};\nuse topos_tce_storage::types::CertificateDeliveredWithPositions;\nuse topos_tce_storage::validator::ValidatorStore;\nuse topos_tce_storage::StorageClient;\nuse topos_test_sdk::certificates::{\n    create_certificate, create_certificate_at_position, create_certificate_chain,\n};\nuse topos_test_sdk::constants::*;\nuse topos_test_sdk::networking::get_available_addr;\nuse topos_test_sdk::storage::{create_fullnode_store, create_validator_store, storage_client};\nuse topos_test_sdk::tce::public_api::{broadcast_stream, create_public_api, PublicApiContext};\n\nmod grpc;\n\n#[rstest]\n#[timeout(Duration::from_secs(4))]\n#[test(tokio::test)]\nasync fn runtime_can_dispatch_a_cert(\n    #[future] create_public_api: (PublicApiContext, impl Stream<Item = RuntimeEvent>),\n) {\n    let (mut api_context, _) = create_public_api.await;\n    let mut client = api_context.api_client;\n    let (tx, rx) = oneshot::channel::<Certificate>();\n\n    // This block represent a subnet A\n    spawn(async move {\n        let in_stream = async_stream::stream! {\n            yield OpenStream {\n                target_checkpoint: Some(TargetCheckpoint {\n                    target_subnet_ids: vec![TARGET_SUBNET_ID_1.into()],\n                    positions: Vec::new()\n                }),\n                source_checkpoint: None\n            }.into()\n        };\n\n        let response = client.watch_certificates(in_stream).await.unwrap();\n\n        let mut resp_stream = response.into_inner();\n\n        let mut tx = Some(tx);\n        while let Some(received) = resp_stream.next().await {\n            let received = received.unwrap();\n            if let Some(Event::CertificatePushed(CertificatePushed {\n                certificate: Some(certificate),\n                ..\n            })) = received.event\n            {\n                if let Some(tx) = tx.take() {\n                    _ = tx.send(certificate.try_into().unwrap());\n                } else {\n                    panic!(\"Double certificate sent\");\n                }\n            }\n        }\n    });\n\n    // Wait for client to be ready\n    tokio::time::sleep(Duration::from_millis(10)).await;\n\n    let cert = create_certificate_at_position(\n        Position::ZERO,\n        create_certificate(\n            SOURCE_SUBNET_ID_1,\n            &[TARGET_SUBNET_ID_1],\n            Some(PREV_CERTIFICATE_ID),\n        ),\n    );\n\n    let mut target_positions = std::collections::HashMap::new();\n    target_positions.insert(\n        TARGET_SUBNET_ID_1,\n        topos_core::api::grpc::checkpoints::TargetStreamPosition {\n            position: 0,\n            source_subnet_id: SOURCE_SUBNET_ID_1,\n            target_subnet_id: TARGET_SUBNET_ID_1,\n            certificate_id: Some(cert.certificate.id),\n        },\n    );\n\n    // Send a dispatch command that will be push to the subnet A\n    api_context\n        .client\n        .dispatch_certificate(cert.clone(), target_positions)\n        .await;\n\n    let certificate_received = rx.await.unwrap();\n    assert_eq!(cert.certificate, certificate_received);\n    drop(api_context.api_context.take());\n}\n\n#[rstest]\n#[timeout(Duration::from_secs(4))]\n#[test(tokio::test)]\nasync fn can_catchup_with_old_certs(\n    #[with(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 15)]\n    #[from(create_certificate_chain)]\n    certificates: Vec<CertificateDelivered>,\n) {\n    let storage_client = storage_client::partial_1(&certificates[..]);\n    let (mut api_context, _) = create_public_api::partial_1(storage_client).await;\n\n    let mut client = api_context.api_client;\n\n    let (tx, mut rx) = mpsc::channel::<Certificate>(16);\n\n    // This block represent a subnet A\n    spawn(async move {\n        let in_stream = async_stream::stream! {\n            yield OpenStream {\n                target_checkpoint: Some(TargetCheckpoint {\n                    target_subnet_ids: vec![TARGET_SUBNET_ID_1.into()],\n                    positions: Vec::new()\n                }),\n                source_checkpoint: None\n            }.into()\n        };\n\n        let response = client.watch_certificates(in_stream).await.unwrap();\n\n        let mut resp_stream = response.into_inner();\n\n        while let Some(received) = resp_stream.next().await {\n            let received = received.unwrap();\n            if let Some(Event::CertificatePushed(CertificatePushed {\n                certificate: Some(certificate),\n                ..\n            })) = received.event\n            {\n                _ = tx.send(certificate.try_into().unwrap()).await;\n            }\n        }\n    });\n\n    // Wait for client to be ready\n    tokio::time::sleep(Duration::from_millis(100)).await;\n\n    let last = certificates.last().map(|c| c.certificate.id).unwrap();\n    let cert = create_certificate_at_position(\n        certificates.len().try_into().unwrap(),\n        create_certificate(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], Some(last)),\n    );\n\n    let mut target_positions = std::collections::HashMap::new();\n    target_positions.insert(\n        TARGET_SUBNET_ID_1,\n        topos_core::api::grpc::checkpoints::TargetStreamPosition {\n            position: certificates.len() as u64,\n            source_subnet_id: SOURCE_SUBNET_ID_1,\n            target_subnet_id: TARGET_SUBNET_ID_1,\n            certificate_id: Some(cert.certificate.id),\n        },\n    );\n\n    // Send a dispatch command that will be push to the subnet A\n    api_context\n        .client\n        .dispatch_certificate(cert.clone(), target_positions)\n        .await;\n\n    for (index, certificate) in certificates.iter().enumerate() {\n        let certificate_received = rx\n            .recv()\n            .await\n            .unwrap_or_else(|| panic!(\"Didn't received index {index}\"));\n        assert_eq!(\n            certificate.certificate, certificate_received,\n            \"Certificate at index {index} not received\"\n        );\n    }\n\n    let certificate_received = rx.recv().await.unwrap();\n    assert_eq!(cert.certificate, certificate_received);\n    drop(api_context.api_context.take());\n}\n\n#[rstest]\n#[timeout(Duration::from_secs(4))]\n#[test(tokio::test)]\nasync fn can_catchup_with_old_certs_with_position(\n    broadcast_stream: broadcast::Receiver<CertificateDeliveredWithPositions>,\n) {\n    let (tx, mut rx) = mpsc::channel::<Certificate>(16);\n\n    let addr = get_available_addr();\n    let graphql_addr = get_available_addr();\n    let metrics_addr = get_available_addr();\n\n    // launch data store\n    let certificates = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 15);\n\n    let fullnode_store = create_fullnode_store::default().await;\n    let store = create_validator_store(\n        &certificates[..],\n        futures::future::ready(fullnode_store.clone()),\n    )\n    .await;\n\n    let storage_client = StorageClient::new(store.clone());\n\n    let (runtime_client, _launcher, _ctx) = Runtime::builder()\n        .with_broadcast_stream(broadcast_stream)\n        .storage(storage_client)\n        .store(store)\n        .serve_grpc_addr(addr)\n        .serve_graphql_addr(graphql_addr)\n        .serve_metrics_addr(metrics_addr)\n        .build_and_launch()\n        .await;\n\n    // Wait for server to boot\n    tokio::time::sleep(Duration::from_millis(100)).await;\n\n    let uri = Uri::builder()\n        .path_and_query(\"/\")\n        .authority(addr.to_string())\n        .scheme(\"http\")\n        .build()\n        .unwrap();\n\n    // This block represent a subnet A\n    spawn(async move {\n        let channel = channel::Channel::builder(uri).connect_lazy();\n        let mut client = ApiServiceClient::new(channel);\n        let in_stream = async_stream::stream! {\n            yield OpenStream {\n                target_checkpoint: Some(TargetCheckpoint {\n                    target_subnet_ids: vec![TARGET_SUBNET_ID_1.into()],\n                    positions: vec![\n                        TargetStreamPosition {\n                            certificate_id: None,\n                            position: 5,\n                            source_subnet_id: Some(SOURCE_SUBNET_ID_1.into()),\n                            target_subnet_id: Some(TARGET_SUBNET_ID_1.into())\n                        }\n                    ]\n                }),\n                source_checkpoint: None\n            }.into()\n        };\n\n        let response = client.watch_certificates(in_stream).await.unwrap();\n\n        let mut resp_stream = response.into_inner();\n\n        while let Some(received) = resp_stream.next().await {\n            let received = received.unwrap();\n            if let Some(Event::CertificatePushed(CertificatePushed {\n                certificate: Some(certificate),\n                ..\n            })) = received.event\n            {\n                _ = tx.send(certificate.try_into().unwrap()).await;\n            }\n        }\n    });\n\n    // Wait for client to be ready\n    tokio::time::sleep(Duration::from_millis(100)).await;\n\n    let last = certificates.last().map(|c| c.certificate.id).unwrap();\n    let cert = create_certificate_at_position(\n        certificates.len().try_into().unwrap(),\n        create_certificate(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], Some(last)),\n    );\n\n    let mut target_positions = std::collections::HashMap::new();\n    target_positions.insert(\n        TARGET_SUBNET_ID_1,\n        topos_core::api::grpc::checkpoints::TargetStreamPosition {\n            position: certificates.len() as u64,\n            source_subnet_id: SOURCE_SUBNET_ID_1,\n            target_subnet_id: TARGET_SUBNET_ID_1,\n            certificate_id: Some(cert.certificate.id),\n        },\n    );\n\n    // Send a dispatch command that will be push to the subnet A\n    runtime_client\n        .dispatch_certificate(cert.clone(), target_positions)\n        .await;\n\n    for (index, certificate) in certificates.iter().skip(5).enumerate() {\n        let certificate_received = rx\n            .recv()\n            .await\n            .unwrap_or_else(|| panic!(\"Didn't received index {index}\"));\n        assert_eq!(\n            certificate.certificate, certificate_received,\n            \"Certificate at index {index} not received\"\n        );\n    }\n\n    let certificate_received = rx.recv().await.unwrap();\n    assert_eq!(cert.certificate, certificate_received);\n}\n\n#[test(tokio::test)]\n#[ignore = \"not yet implemented\"]\nasync fn can_listen_for_multiple_subnet_id() {}\n\n#[rstest]\n#[timeout(Duration::from_secs(4))]\n#[test(tokio::test)]\nasync fn boots_healthy_graphql_server(\n    broadcast_stream: broadcast::Receiver<CertificateDeliveredWithPositions>,\n) {\n    let addr = get_available_addr();\n    let graphql_addr = get_available_addr();\n    let metrics_addr = get_available_addr();\n\n    // launch data store\n    let certificates = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 15);\n\n    let fullnode_store = create_fullnode_store::default().await;\n    let store = create_validator_store(\n        &certificates[..],\n        futures::future::ready(fullnode_store.clone()),\n    )\n    .await;\n    let storage_client = StorageClient::new(store.clone());\n    let (_runtime_client, _launcher, _ctx) = Runtime::builder()\n        .with_broadcast_stream(broadcast_stream)\n        .storage(storage_client)\n        .store(store)\n        .serve_grpc_addr(addr)\n        .serve_graphql_addr(graphql_addr)\n        .serve_metrics_addr(metrics_addr)\n        .build_and_launch()\n        .await;\n\n    // Wait for server to boot\n    tokio::time::sleep(Duration::from_millis(100)).await;\n\n    let res = reqwest::get(format!(\"http://{}/health\", graphql_addr))\n        .await\n        .unwrap()\n        .text()\n        .await\n        .unwrap();\n\n    assert_eq!(res, \"{\\\"healthy\\\":true}\");\n}\n\n#[rstest]\n#[timeout(Duration::from_secs(4))]\n#[test(tokio::test)]\nasync fn graphql_server_enables_cors(\n    broadcast_stream: broadcast::Receiver<CertificateDeliveredWithPositions>,\n) {\n    let addr = get_available_addr();\n    let graphql_addr = get_available_addr();\n    let metrics_addr = get_available_addr();\n\n    // launch data store\n    let certificates = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 15);\n\n    let fullnode_store = create_fullnode_store::default().await;\n    let store = create_validator_store(\n        &certificates[..],\n        futures::future::ready(fullnode_store.clone()),\n    )\n    .await;\n\n    let storage_client = StorageClient::new(store.clone());\n\n    let (_runtime_client, _launcher, _ctx) = Runtime::builder()\n        .with_broadcast_stream(broadcast_stream)\n        .storage(storage_client)\n        .store(store)\n        .serve_grpc_addr(addr)\n        .serve_graphql_addr(graphql_addr)\n        .serve_metrics_addr(metrics_addr)\n        .build_and_launch()\n        .await;\n\n    // Wait for server to boot\n    tokio::time::sleep(Duration::from_millis(100)).await;\n\n    let mut headers = reqwest::header::HeaderMap::new();\n    headers.insert(\"Origin\", \"http://example.com\".parse().unwrap());\n    headers.insert(\"Access-Control-Request-Method\", \"POST\".parse().unwrap());\n    headers.insert(\n        \"Access-Control-Request-Headers\",\n        \"X-Requested-With\".parse().unwrap(),\n    );\n\n    let client = reqwest::Client::new();\n\n    let res = client\n        .request(\n            \"OPTIONS\".parse().unwrap(),\n            format!(\"http://{}/health\", graphql_addr),\n        )\n        .headers(headers)\n        .send()\n        .await\n        .unwrap();\n\n    let headers = res.headers();\n\n    let ac_allow_origin = headers.get(\"Access-Control-Allow-Origin\");\n    assert_eq!(ac_allow_origin.unwrap().to_str().unwrap(), \"*\");\n\n    let ac_allow_methods = headers.get(\"Access-Control-Allow-Methods\");\n    assert_eq!(ac_allow_methods.unwrap().to_str().unwrap(), \"GET,POST\");\n\n    let ac_allow_headers = headers.get(\"Access-Control-Allow-Headers\");\n    assert_eq!(ac_allow_headers.unwrap().to_str().unwrap(), \"content-type\");\n}\n\n#[rstest]\n#[timeout(Duration::from_secs(4))]\n#[test(tokio::test)]\nasync fn can_query_graphql_endpoint_for_certificates(\n    broadcast_stream: broadcast::Receiver<CertificateDeliveredWithPositions>,\n) {\n    let (tx, mut rx) = mpsc::channel::<Certificate>(16);\n\n    let addr = get_available_addr();\n    let graphql_addr = get_available_addr();\n    let metrics_addr = get_available_addr();\n\n    // launch data store\n    let certificates = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 15);\n\n    let fullnode_store = create_fullnode_store::default().await;\n\n    let store = create_validator_store(\n        &certificates[..],\n        futures::future::ready(fullnode_store.clone()),\n    )\n    .await;\n\n    let storage_client = StorageClient::new(store.clone());\n\n    let (runtime_client, _launcher, _ctx) = Runtime::builder()\n        .with_broadcast_stream(broadcast_stream)\n        .storage(storage_client)\n        .store(store)\n        .serve_grpc_addr(addr)\n        .serve_graphql_addr(graphql_addr)\n        .serve_metrics_addr(metrics_addr)\n        .build_and_launch()\n        .await;\n\n    // Wait for server to boot\n    tokio::time::sleep(Duration::from_millis(100)).await;\n\n    let uri = Uri::builder()\n        .path_and_query(\"/\")\n        .authority(addr.to_string())\n        .scheme(\"http\")\n        .build()\n        .unwrap();\n\n    // This block represent a subnet A\n    spawn(async move {\n        let channel = channel::Channel::builder(uri).connect_lazy();\n        let mut client = ApiServiceClient::new(channel);\n        let in_stream = async_stream::stream! {\n            yield OpenStream {\n                target_checkpoint: Some(TargetCheckpoint {\n                    target_subnet_ids: vec![TARGET_SUBNET_ID_1.into()],\n                    positions: vec![\n                        TargetStreamPosition {\n                            certificate_id: None,\n                            position: 5,\n                            source_subnet_id: Some(SOURCE_SUBNET_ID_1.into()),\n                            target_subnet_id: Some(TARGET_SUBNET_ID_1.into())\n                        }\n                    ]\n                }),\n                source_checkpoint: None\n            }.into()\n        };\n\n        let response = client.watch_certificates(in_stream).await.unwrap();\n\n        let mut resp_stream = response.into_inner();\n\n        while let Some(received) = resp_stream.next().await {\n            let received = received.unwrap();\n            if let Some(Event::CertificatePushed(CertificatePushed {\n                certificate: Some(certificate),\n                ..\n            })) = received.event\n            {\n                _ = tx.send(certificate.try_into().unwrap()).await;\n            }\n        }\n    });\n\n    // Wait for client to be ready\n    tokio::time::sleep(Duration::from_millis(100)).await;\n\n    let last = certificates.last().map(|c| c.certificate.id).unwrap();\n    let cert = create_certificate_at_position(\n        certificates.len().try_into().unwrap(),\n        create_certificate(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], Some(last)),\n    );\n\n    let mut target_positions = std::collections::HashMap::new();\n    target_positions.insert(\n        TARGET_SUBNET_ID_1,\n        topos_core::api::grpc::checkpoints::TargetStreamPosition {\n            position: certificates.len() as u64,\n            source_subnet_id: SOURCE_SUBNET_ID_1,\n            target_subnet_id: TARGET_SUBNET_ID_1,\n            certificate_id: Some(cert.certificate.id),\n        },\n    );\n\n    // Send a dispatch command that will be push to the subnet A\n    runtime_client\n        .dispatch_certificate(cert.clone(), target_positions)\n        .await;\n\n    for (index, certificate) in certificates.iter().skip(5).enumerate() {\n        let certificate_received = rx\n            .recv()\n            .await\n            .unwrap_or_else(|| panic!(\"Didn't received index {index}\"));\n        assert_eq!(\n            certificate.certificate, certificate_received,\n            \"Certificate at index {index} not received\"\n        );\n    }\n\n    let _ = rx.recv().await.unwrap();\n\n    let query = format!(\n        r#\"\n        query {{\n            certificates(\n                fromSourceCheckpoint: {{\n                    sourceSubnetIds: [\n                        \"{SOURCE_SUBNET_ID_1}\"\n                    ],\n                    positions: [\n                        {{\n                            sourceSubnetId:\"{SOURCE_SUBNET_ID_1}\",\n                            position: 0,\n                        }}\n                    ]\n                }},\n                first: 10\n            ) {{\n                id\n                prevId\n                proof\n                signature\n                sourceSubnetId\n                stateRoot\n                targetSubnets\n                txRootHash\n                receiptsRootHash\n                verifier\n                positions {{\n                  source {{\n                    sourceSubnetId\n                    position\n                    certificateId\n                  }}\n                }}\n            }}\n        }}\n        \"#\n    );\n\n    #[derive(Deserialize)]\n    struct Response {\n        data: CertificatesResponse,\n    }\n\n    #[derive(Deserialize, Debug)]\n    struct CertificatesResponse {\n        certificates: Vec<GraphQLCertificate>,\n    }\n\n    let client = reqwest::Client::new();\n\n    let response = client\n        .post(format!(\"http://{}\", graphql_addr))\n        .json(&serde_json::json!({\n            \"query\": query,\n        }))\n        .send()\n        .await\n        .unwrap()\n        .json::<Response>()\n        .await\n        .unwrap();\n\n    let graphql_certificate: GraphQLCertificate = cert.as_ref().into();\n\n    assert_eq!(response.data.certificates.len(), 10);\n    assert_eq!(\n        response.data.certificates[0].source_subnet_id,\n        graphql_certificate.source_subnet_id\n    );\n}\n\n#[rstest]\n#[timeout(Duration::from_secs(4))]\n#[test(tokio::test)]\nasync fn check_storage_pool_stats(\n    broadcast_stream: broadcast::Receiver<CertificateDeliveredWithPositions>,\n) {\n    let addr = get_available_addr();\n    let graphql_addr = get_available_addr();\n    let metrics_addr = get_available_addr();\n\n    let fullnode_store = create_fullnode_store::default().await;\n\n    let store = create_validator_store(&[], futures::future::ready(fullnode_store.clone())).await;\n    STORAGE_PENDING_POOL_COUNT.set(10);\n    STORAGE_PRECEDENCE_POOL_COUNT.set(200);\n\n    let storage_client = StorageClient::new(store.clone());\n\n    let (_runtime_client, _launcher, _ctx) = Runtime::builder()\n        .with_broadcast_stream(broadcast_stream)\n        .storage(storage_client)\n        .store(store)\n        .serve_grpc_addr(addr)\n        .serve_graphql_addr(graphql_addr)\n        .serve_metrics_addr(metrics_addr)\n        .build_and_launch()\n        .await;\n\n    // Wait for server to boot\n    tokio::time::sleep(Duration::from_millis(100)).await;\n\n    let query = \"query {getStoragePoolStats}\";\n\n    #[derive(Debug, Deserialize)]\n    struct Response {\n        // data: HashMap<String, serde_json::Value>,\n        data: Stats,\n    }\n\n    #[derive(Debug, Deserialize)]\n    #[serde(rename_all = \"camelCase\")]\n    struct Stats {\n        get_storage_pool_stats: PoolStats,\n    }\n\n    #[derive(Debug, Deserialize)]\n    struct PoolStats {\n        metrics_pending_pool: u64,\n        metrics_precedence_pool: u64,\n    }\n\n    let client = reqwest::Client::new();\n\n    let response = client\n        .post(format!(\"http://{}\", graphql_addr))\n        .json(&serde_json::json!({\n            \"query\": query,\n        }))\n        .send()\n        .await\n        .unwrap()\n        .json::<Response>()\n        .await\n        .unwrap();\n\n    assert_eq!(\n        response.data.get_storage_pool_stats.metrics_pending_pool,\n        10\n    );\n    assert_eq!(\n        response.data.get_storage_pool_stats.metrics_precedence_pool,\n        200\n    );\n}\n\n#[rstest]\n#[timeout(Duration::from_secs(4))]\n#[test(tokio::test)]\nasync fn get_pending_pool(\n    broadcast_stream: broadcast::Receiver<CertificateDeliveredWithPositions>,\n) {\n    let addr = get_available_addr();\n    let graphql_addr = get_available_addr();\n    let metrics_addr = get_available_addr();\n\n    // launch data store\n    let certificates = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 15);\n\n    let fullnode_store = create_fullnode_store::default().await;\n\n    let store: Arc<ValidatorStore> =\n        create_validator_store(&[], futures::future::ready(fullnode_store.clone())).await;\n\n    for certificate in &certificates {\n        _ = store\n            .insert_pending_certificate(&certificate.certificate)\n            .await;\n    }\n\n    let storage_client = StorageClient::new(store.clone());\n\n    let (_runtime_client, _launcher, _ctx) = Runtime::builder()\n        .with_broadcast_stream(broadcast_stream)\n        .storage(storage_client)\n        .store(store)\n        .serve_grpc_addr(addr)\n        .serve_graphql_addr(graphql_addr)\n        .serve_metrics_addr(metrics_addr)\n        .build_and_launch()\n        .await;\n\n    // Wait for server to boot\n    tokio::time::sleep(Duration::from_millis(100)).await;\n\n    let query = \"query { getPendingPool }\".to_string();\n\n    #[derive(Debug, Deserialize)]\n    struct Response {\n        data: PendingPool,\n    }\n\n    #[derive(Debug, Deserialize)]\n    #[serde(rename_all = \"camelCase\")]\n    struct PendingPool {\n        #[serde(rename = \"getPendingPool\")]\n        pool: HashMap<u64, String>,\n    }\n\n    let client = reqwest::Client::new();\n\n    let mut response = client\n        .post(format!(\"http://{}\", graphql_addr))\n        .json(&serde_json::json!({\n            \"query\": query,\n        }))\n        .send()\n        .await\n        .unwrap()\n        .json::<Response>()\n        .await\n        .unwrap();\n\n    assert_eq!(response.data.pool.len(), 1);\n    let first: CertificateId = response\n        .data\n        .pool\n        .remove(&1)\n        .unwrap()\n        .as_bytes()\n        .try_into()\n        .unwrap();\n\n    assert_eq!(first, certificates[0].certificate.id);\n}\n\n#[rstest]\n#[timeout(Duration::from_secs(4))]\n#[test(tokio::test)]\nasync fn check_precedence(\n    broadcast_stream: broadcast::Receiver<CertificateDeliveredWithPositions>,\n) {\n    let addr = get_available_addr();\n    let graphql_addr = get_available_addr();\n    let metrics_addr = get_available_addr();\n\n    // launch data store\n    let certificates = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 15);\n\n    let fullnode_store = create_fullnode_store::default().await;\n\n    let store: Arc<ValidatorStore> =\n        create_validator_store(&[], futures::future::ready(fullnode_store.clone())).await;\n\n    for certificate in &certificates {\n        _ = store\n            .insert_pending_certificate(&certificate.certificate)\n            .await;\n    }\n\n    let storage_client = StorageClient::new(store.clone());\n\n    let (_runtime_client, _launcher, _ctx) = Runtime::builder()\n        .with_broadcast_stream(broadcast_stream)\n        .storage(storage_client)\n        .store(store)\n        .serve_grpc_addr(addr)\n        .serve_graphql_addr(graphql_addr)\n        .serve_metrics_addr(metrics_addr)\n        .build_and_launch()\n        .await;\n\n    // Wait for server to boot\n    tokio::time::sleep(Duration::from_millis(100)).await;\n\n    let certificate_one = certificates[0].certificate.id;\n\n    let query = format!(\n        r#\"\n        query {{ checkPrecedence(certificateId: \"{}\") {{ id }} }}\n        \"#,\n        certificate_one\n    );\n\n    #[derive(Debug, Deserialize)]\n    struct Response {\n        data: CheckPrecedenceResponse,\n    }\n\n    #[derive(Debug, Deserialize)]\n    #[serde(rename_all = \"camelCase\")]\n    struct CheckPrecedenceResponse {\n        check_precedence: CheckPrecedence,\n    }\n\n    #[derive(Debug, Deserialize)]\n    #[serde(rename_all = \"camelCase\")]\n    struct CheckPrecedence {\n        id: String,\n    }\n\n    let client = reqwest::Client::new();\n\n    let response = client\n        .post(format!(\"http://{}\", graphql_addr))\n        .json(&serde_json::json!({\n            \"query\": query,\n        }))\n        .send()\n        .await\n        .unwrap()\n        .json::<Response>()\n        .await\n        .unwrap();\n\n    assert_eq!(\n        TryInto::<CertificateId>::try_into(response.data.check_precedence.id.as_bytes()).unwrap(),\n        certificates[1].certificate.id\n    );\n}\n"
  },
  {
    "path": "crates/topos-tce-broadcast/Cargo.toml",
    "content": "[package]\nname = \"topos-tce-broadcast\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n[lints]\nworkspace = true\n\n[dependencies]\nbyteorder.workspace = true\nfutures.workspace = true\nlazy_static.workspace = true\nrand.workspace = true\nrand_core.workspace = true\nserde.workspace = true\nthiserror.workspace = true\ntokio = { workspace = true, features = [\"full\"] }\ntokio-stream = { workspace = true, features = [\"sync\"] }\ntokio-util.workspace = true\ntracing-subscriber = { workspace = true, features = [\"env-filter\", \"fmt\"] }\ntracing.workspace = true\ntopos-core = { workspace = true, features = [\"uci\"] }\ntopos-config = { path = \"../topos-config/\" }\ntopos-metrics = { path = \"../topos-metrics/\" }\ntopos-tce-storage = { path = \"../topos-tce-storage/\" }\ntopos-crypto = { path = \"../topos-crypto\" }\n\n[dev-dependencies]\ncriterion = { version = \"0.5.1\", features = [\"async_futures\", \"async_tokio\"] }\nrstest = { workspace = true, features = [\"async-timeout\"] }\ntest-log.workspace = true\nenv_logger.workspace = true\nrand.workspace = true\nhex.workspace = true\ntopos-test-sdk = { path = \"../topos-test-sdk/\" }\n\n[[bench]]\nname = \"double_echo\"\npath = \"benches/double_echo.rs\"\nharness = false\n"
  },
  {
    "path": "crates/topos-tce-broadcast/README.md",
    "content": "# topos-tce-broadcast\n\nImplementation of Topos Reliable Broadcast to be used in the Transmission Control Engine (TCE)\n\nThis crate is designed to be used as a library in the TCE implementation.\nIt covers the Reliable Broadcast part of the TCE, which is the core of the TCE.\nIt doesn't handle how messages are sent or received, nor how the certificates are stored.\nIt is designed to be used with any transport and storage implementation, relying on the\n`ProtocolEvents` and `DoubleEchoCommand` to communicate with the transport and storage.\n\nThe reliable broadcast allows a set of validators to agree on a set of messages in order to\nreach agreement about the delivery of a certificate.\n\nEach certificates need to be broadcast to the network, and each validator needs to\nreceive a threshold of messages from the other validators.\nThe thresholds are defined by the `ReliableBroadcastParams` and correspond to the minimum number of\nvalidators who need to agree on one certificate in order to consider it delivered.\n\nThis crate is responsible for validating and driving the broadcast of every certificates.\n\n### Input\n\nThe input of the broadcast is a certificate to be broadcast. It can be received from\nthe transport layer, or from the storage layer (from the pending tables).\n\nThe transport layer can be anything from p2p network to API calls.\n\nOther inputs are the messages received from the transport layer, coming from other validators.\nThey're `Echo` and `Ready` signed messages.\n\n### Output\n\nThe outcome of the broadcast is either a certificate delivered or a failure on the delivery.\n\nThe implementation is based on the paper: [Topos: A Secure, Trustless, and Decentralized Interoperability Protocol](https://arxiv.org/pdf/2206.03481.pdf)\n\n"
  },
  {
    "path": "crates/topos-tce-broadcast/benches/double_echo.rs",
    "content": "use criterion::async_executor::FuturesExecutor;\nuse criterion::{criterion_group, criterion_main, Criterion};\nuse topos_test_sdk::storage::create_validator_store;\nmod task_manager;\n\npub fn criterion_benchmark(c: &mut Criterion) {\n    let certificates = 10_000;\n    let runtime = tokio::runtime::Builder::new_current_thread()\n        .build()\n        .unwrap();\n\n    let store = runtime.block_on(async { create_validator_store::partial_1(&[]).await });\n\n    c.bench_function(\"double_echo\", |b| {\n        b.to_async(FuturesExecutor).iter(|| async {\n            runtime.block_on(async {\n                task_manager::processing_double_echo(certificates, store.clone()).await\n            })\n        })\n    });\n}\n\ncriterion_group!(benches, criterion_benchmark);\ncriterion_main!(benches);\n"
  },
  {
    "path": "crates/topos-tce-broadcast/benches/task_manager.rs",
    "content": "use std::collections::HashSet;\nuse std::str::FromStr;\nuse std::sync::Arc;\nuse tokio::sync::{broadcast, mpsc, oneshot};\nuse topos_config::tce::broadcast::ReliableBroadcastParams;\nuse topos_core::types::ValidatorId;\nuse topos_crypto::messages::MessageSigner;\nuse topos_tce_broadcast::double_echo::DoubleEcho;\nuse topos_tce_storage::validator::ValidatorStore;\nuse topos_test_sdk::certificates::create_certificate_chain;\nuse topos_test_sdk::constants::{SOURCE_SUBNET_ID_1, TARGET_SUBNET_ID_1};\n\nconst CHANNEL_SIZE: usize = 256_000;\nconst PRIVATE_KEY: &str = \"d6f8d1fe6d0f3606ccb15ef383910f10d83ca77bf3d73007f12fef023dabaab9\";\n\nstruct TceParams {\n    nb_peers: usize,\n    broadcast_params: ReliableBroadcastParams,\n}\n\npub async fn processing_double_echo(n: u64, validator_store: Arc<ValidatorStore>) {\n    let (_cmd_sender, cmd_receiver) = mpsc::channel(CHANNEL_SIZE);\n    let (event_sender, _event_receiver) = mpsc::channel(CHANNEL_SIZE);\n    let (broadcast_sender, mut broadcast_receiver) = broadcast::channel(CHANNEL_SIZE);\n    let (_double_echo_shutdown_sender, double_echo_shutdown_receiver) =\n        mpsc::channel::<oneshot::Sender<()>>(1);\n    let (task_manager_message_sender, task_manager_message_receiver) = mpsc::channel(CHANNEL_SIZE);\n\n    let params = TceParams {\n        nb_peers: 10,\n        broadcast_params: ReliableBroadcastParams {\n            echo_threshold: 8,\n            ready_threshold: 5,\n            delivery_threshold: 8,\n        },\n    };\n\n    let message_signer: Arc<MessageSigner> =\n        Arc::new(MessageSigner::from_str(PRIVATE_KEY).unwrap());\n    let mut validators = HashSet::new();\n    let validator_id = ValidatorId::from(message_signer.public_address);\n    validators.insert(validator_id);\n\n    for i in 1..params.nb_peers {\n        validators.insert(ValidatorId::from(\n            MessageSigner::new(&[i as u8; 32]).unwrap().public_address,\n        ));\n    }\n\n    let mut double_echo = DoubleEcho::new(\n        params.broadcast_params,\n        validator_id,\n        message_signer.clone(),\n        validators.clone(),\n        task_manager_message_sender.clone(),\n        cmd_receiver,\n        event_sender,\n        double_echo_shutdown_receiver,\n        validator_store.clone(),\n        broadcast_sender,\n    );\n\n    double_echo.spawn_task_manager(task_manager_message_receiver);\n\n    let certificates =\n        create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], n as usize);\n\n    let double_echo_selected_echo = double_echo\n        .subscriptions\n        .echo\n        .iter()\n        .take(double_echo.params.echo_threshold)\n        .cloned()\n        .collect::<Vec<_>>();\n\n    let double_echo_selected_ready = double_echo\n        .subscriptions\n        .ready\n        .iter()\n        .take(double_echo.params.delivery_threshold)\n        .cloned()\n        .collect::<Vec<_>>();\n\n    for cert in &certificates {\n        _ = validator_store\n            .insert_pending_certificate(&cert.certificate)\n            .await\n            .unwrap();\n    }\n\n    for cert in &certificates {\n        let mut payload = Vec::new();\n        payload.extend_from_slice(cert.certificate.id.as_array());\n        payload.extend_from_slice(validator_id.as_bytes());\n\n        for _ in &double_echo_selected_echo {\n            let signature = message_signer.sign_message(&payload).unwrap();\n\n            double_echo\n                .handle_echo(cert.certificate.id, validator_id, signature)\n                .await;\n        }\n\n        for _ in &double_echo_selected_ready {\n            let signature = message_signer.sign_message(&payload).unwrap();\n\n            double_echo\n                .handle_ready(cert.certificate.id, validator_id, signature)\n                .await;\n        }\n    }\n\n    let mut count = 0;\n\n    while let Ok(_event) = broadcast_receiver.recv().await {\n        count += 1;\n\n        if count == n {\n            break;\n        }\n    }\n}\n"
  },
  {
    "path": "crates/topos-tce-broadcast/src/constant.rs",
    "content": "use lazy_static::lazy_static;\n\nlazy_static! {\n    /// Size of the double echo command channel\n    pub static ref COMMAND_CHANNEL_SIZE: usize =\n        std::env::var(\"TOPOS_DOUBLE_ECHO_COMMAND_CHANNEL_SIZE\")\n            .ok()\n            .and_then(|s| s.parse().ok())\n            .unwrap_or(2048);\n    /// Size of the channel between double echo and the task manager\n    pub static ref BROADCAST_TASK_MANAGER_CHANNEL_SIZE: usize =\n        std::env::var(\"TOPOS_BROADCAST_TASK_MANAGER_CHANNEL_SIZE\")\n            .ok()\n            .and_then(|s| s.parse().ok())\n            .unwrap_or(20_480);\n    /// Size of the channel to send protocol events from the double echo\n    pub static ref PROTOCOL_CHANNEL_SIZE: usize =\n        std::env::var(\"TOPOS_PROTOCOL_CHANNEL_SIZE\")\n            .ok()\n            .and_then(|s| s.parse().ok())\n            .unwrap_or(2048);\n    /// Capacity alert threshold for the double echo command channel\n    pub static ref COMMAND_CHANNEL_CAPACITY: usize = COMMAND_CHANNEL_SIZE\n        .checked_mul(10)\n        .map(|v| {\n            let r: usize = v.checked_div(100).unwrap_or(*COMMAND_CHANNEL_SIZE);\n            r\n        })\n        .unwrap_or(*COMMAND_CHANNEL_SIZE);\n    ///\n    pub static ref PENDING_LIMIT_PER_REQUEST_TO_STORAGE: usize =\n        std::env::var(\"TOPOS_PENDING_LIMIT_PER_REQUEST_TO_STORAGE\")\n        .ok()\n        .and_then(|s| s.parse().ok())\n        .unwrap_or(1000);\n}\n"
  },
  {
    "path": "crates/topos-tce-broadcast/src/double_echo/broadcast_state/status.rs",
    "content": "use std::fmt::Display;\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq)]\npub enum Status {\n    Pending,\n    EchoSent,\n    ReadySent,\n    DeliveredWithReadySent,\n    Delivered,\n}\n\nimpl Display for Status {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        match self {\n            Self::Pending => write!(f, \"Pending\"),\n            Self::EchoSent => write!(f, \"EchoSent\"),\n            Self::ReadySent => write!(f, \"ReadySent\"),\n            Self::DeliveredWithReadySent => write!(f, \"DeliveredWithReadySent\"),\n            Self::Delivered => write!(f, \"Delivered\"),\n        }\n    }\n}\n\nimpl Status {\n    pub(crate) fn is_ready_sent(&self) -> bool {\n        matches!(self, Self::ReadySent) || matches!(self, Self::DeliveredWithReadySent)\n    }\n\n    pub(crate) fn is_delivered(&self) -> bool {\n        matches!(self, Self::Delivered) || matches!(self, Self::DeliveredWithReadySent)\n    }\n\n    pub(crate) fn ready_sent(self) -> Self {\n        match self {\n            Self::EchoSent => Self::ReadySent,\n            Self::Delivered => Self::DeliveredWithReadySent,\n            _ => self,\n        }\n    }\n\n    pub(crate) fn delivered(self) -> Self {\n        match self {\n            Self::ReadySent => Self::DeliveredWithReadySent,\n            _ => Self::Delivered,\n        }\n    }\n}\n"
  },
  {
    "path": "crates/topos-tce-broadcast/src/double_echo/broadcast_state.rs",
    "content": "use crate::event::ProtocolEvents;\nuse crate::sampler::SubscriptionsView;\nuse std::sync::Arc;\nuse std::{collections::HashSet, time};\nuse tokio::sync::mpsc;\nuse topos_core::{\n    types::{\n        stream::{CertificateSourceStreamPosition, Position},\n        CertificateDelivered, ProofOfDelivery, Ready, ValidatorId,\n    },\n    uci::Certificate,\n};\nuse topos_crypto::messages::MessageSigner;\nuse topos_metrics::DOUBLE_ECHO_BROADCAST_FINISHED_TOTAL;\nuse tracing::{debug, error, info, trace};\nmod status;\n\npub use status::Status;\n\n#[derive(Debug)]\npub struct BroadcastState {\n    subscriptions_view: SubscriptionsView,\n    status: Status,\n    pub(crate) certificate: Certificate,\n    validator_id: ValidatorId,\n    echo_threshold: usize,\n    ready_threshold: usize,\n    delivery_threshold: usize,\n    message_signer: Arc<MessageSigner>,\n    event_sender: mpsc::Sender<ProtocolEvents>,\n    delivery_time: time::Instant,\n    readies: HashSet<Ready>,\n    pub(crate) expected_position: Option<Position>,\n}\n\nimpl BroadcastState {\n    #[allow(clippy::too_many_arguments)]\n    pub fn new(\n        certificate: Certificate,\n        validator_id: ValidatorId,\n        echo_threshold: usize,\n        ready_threshold: usize,\n        delivery_threshold: usize,\n        event_sender: mpsc::Sender<ProtocolEvents>,\n        subscriptions_view: SubscriptionsView,\n        need_gossip: bool,\n        message_signer: Arc<MessageSigner>,\n    ) -> Self {\n        let mut state = Self {\n            subscriptions_view,\n            status: Status::Pending,\n            certificate,\n            validator_id,\n            echo_threshold,\n            ready_threshold,\n            delivery_threshold,\n            message_signer,\n            event_sender,\n            delivery_time: time::Instant::now(),\n            readies: HashSet::new(),\n            expected_position: None,\n        };\n\n        _ = state.event_sender.try_send(ProtocolEvents::Broadcast {\n            certificate_id: state.certificate.id,\n        });\n\n        if need_gossip {\n            debug!(\n                \"📣 Gossiping the Certificate {} from the source subnet {}\",\n                &state.certificate.id, &state.certificate.source_subnet_id\n            );\n            let _ = state.event_sender.try_send(ProtocolEvents::Gossip {\n                cert: state.certificate.clone(),\n            });\n        }\n\n        state.update_status();\n\n        state\n    }\n\n    pub fn into_delivered(&self) -> CertificateDelivered {\n        CertificateDelivered {\n            certificate: self.certificate.clone(),\n            proof_of_delivery: ProofOfDelivery {\n                certificate_id: self.certificate.id,\n                delivery_position: CertificateSourceStreamPosition {\n                    subnet_id: self.certificate.source_subnet_id,\n                    // FIXME: Should never fails but need to find how to remove the unwrap\n                    position: self\n                        .expected_position\n                        .expect(\"Expected position is not set, this is a bug\"),\n                },\n                readies: self\n                    .readies\n                    .iter()\n                    .cloned()\n                    .map(|r| (r, \"signature\".to_string()))\n                    .collect(),\n                threshold: self.delivery_threshold as u64,\n            },\n        }\n    }\n\n    pub fn apply_echo(&mut self, validator_id: ValidatorId) -> Option<Status> {\n        if self.subscriptions_view.echo.remove(&validator_id) {\n            self.update_status()\n        } else {\n            None\n        }\n    }\n\n    pub fn apply_ready(&mut self, validator_id: ValidatorId) -> Option<Status> {\n        if self.subscriptions_view.ready.remove(&validator_id) {\n            self.readies.insert(validator_id.to_string());\n            self.update_status()\n        } else {\n            None\n        }\n    }\n\n    fn update_status(&mut self) -> Option<Status> {\n        // Nothing happened yet, we're in the initial state and didn't process\n        // any Echo or Ready messages\n        // Sending our Echo message\n        if let Status::Pending = self.status {\n            let mut payload = Vec::new();\n            payload.extend_from_slice(self.certificate.id.as_array());\n            payload.extend_from_slice(self.validator_id.as_bytes());\n\n            let _ = self.event_sender.try_send(ProtocolEvents::Echo {\n                certificate_id: self.certificate.id,\n                signature: self.message_signer.sign_message(&payload).ok()?,\n                validator_id: self.validator_id,\n            });\n\n            self.status = Status::EchoSent;\n            trace!(\n                \"Certificate {} is now {}\",\n                &self.certificate.id,\n                self.status\n            );\n            return Some(self.status);\n        }\n\n        // Upon reaching the Echo or Ready threshold, if the status is either\n        // EchoSent or Delivered (without ReadySent), we send the Ready message\n        // and update the status accordingly.\n        // If the status was EchoSent, we update it to ReadySent\n        // If the status was Delivered, we update it to DeliveredWithReadySent\n        if !self.status.is_ready_sent() && self.reached_ready_threshold() {\n            let mut payload = Vec::new();\n            payload.extend_from_slice(self.certificate.id.as_array());\n            payload.extend_from_slice(self.validator_id.as_bytes());\n\n            let event = ProtocolEvents::Ready {\n                certificate_id: self.certificate.id,\n                signature: self.message_signer.sign_message(&payload).ok()?,\n                validator_id: self.validator_id,\n            };\n            if let Err(e) = self.event_sender.try_send(event) {\n                error!(\"Failed to send the Ready message: {}\", e);\n            }\n\n            self.status = self.status.ready_sent();\n\n            trace!(\n                \"Certificate {} is now {}\",\n                &self.certificate.id,\n                self.status\n            );\n            return Some(self.status);\n        }\n\n        // Upon reaching the Delivery threshold, if the status is not Delivered,\n        // we update the status to Delivered and change the status\n        if !self.status.is_delivered() && self.reached_delivery_threshold() {\n            self.status = self.status.delivered();\n\n            trace!(\n                \"Certificate {} is now {}\",\n                &self.certificate.id,\n                self.status\n            );\n            // Calculate delivery time\n            let from = self.delivery_time;\n            let duration = from.elapsed();\n            let d = duration;\n\n            info!(\n                \"📝 Certificate delivered {} with broadcast duration: {:?}\",\n                self.certificate.id, d\n            );\n\n            DOUBLE_ECHO_BROADCAST_FINISHED_TOTAL.inc();\n\n            return Some(self.status);\n        }\n\n        None\n    }\n\n    fn reached_ready_threshold(&self) -> bool {\n        // Compute the threshold\n        let reached_echo_threshold = match self\n            .subscriptions_view\n            .network_size\n            .checked_sub(self.subscriptions_view.echo.len())\n        {\n            Some(consumed) => consumed >= self.echo_threshold,\n            None => false,\n        };\n\n        let reached_ready_threshold = match self\n            .subscriptions_view\n            .network_size\n            .checked_sub(self.subscriptions_view.ready.len())\n        {\n            Some(consumed) => consumed >= self.ready_threshold,\n            None => false,\n        };\n\n        trace!(\n            \"Certificate {} reached Echo threshold: {} and Ready threshold: {}\",\n            &self.certificate.id,\n            reached_echo_threshold,\n            reached_ready_threshold\n        );\n        // If reached any of the Echo or Ready thresholds, I send the Ready\n        reached_echo_threshold || reached_ready_threshold\n    }\n\n    fn reached_delivery_threshold(&self) -> bool {\n        // If reached the delivery threshold, I can deliver\n        let delivery_threshold = match self\n            .subscriptions_view\n            .network_size\n            .checked_sub(self.subscriptions_view.ready.len())\n        {\n            Some(consumed) => consumed >= self.delivery_threshold,\n            None => false,\n        };\n\n        trace!(\n            \"Certificate {} reached Delivery threshold: {}\",\n            &self.certificate.id,\n            delivery_threshold\n        );\n\n        delivery_threshold\n    }\n}\n"
  },
  {
    "path": "crates/topos-tce-broadcast/src/double_echo/mod.rs",
    "content": "//! Everything related to the double_echo implementation\n//!\n//! ## Messages and roles\n//!\n//! In order to prevent many non validator's messages to be published on the\n//! gossip topics, messages are filtered when the [`DoubleEcho`] is producing events.\n//!\n//! For `validator` nothing changed, for `fullnode` and `sentry` node, their `Echo` and\n//! `Ready` messages are filtered, they still produce `Gossip` messages tho.\n//!\n//! It doesn't mean that a `fullnode` will stop propagate messages from\n//! `validators`, it only prevents a non validator to publish messages that will\n//! be ignored by others. `fullnode` still consumes Echo and Ready coming from\n//! validators and use those messages to build their state.\n\nuse crate::event::ProtocolEvents;\nuse crate::{DoubleEchoCommand, SubscriptionsView};\nuse std::collections::HashSet;\nuse std::sync::Arc;\nuse tokio::sync::{broadcast, mpsc, oneshot};\nuse tokio_util::sync::CancellationToken;\nuse topos_config::tce::broadcast::ReliableBroadcastParams;\nuse topos_core::{types::ValidatorId, uci::CertificateId};\nuse topos_crypto::messages::{MessageSigner, Signature};\nuse topos_tce_storage::store::ReadStore;\nuse topos_tce_storage::types::CertificateDeliveredWithPositions;\nuse topos_tce_storage::validator::ValidatorStore;\nuse tracing::{debug, error, info, warn};\n\npub mod broadcast_state;\n\npub struct DoubleEcho {\n    /// Channel to receive commands\n    command_receiver: mpsc::Receiver<DoubleEchoCommand>,\n    /// Channel to send events\n    event_sender: mpsc::Sender<ProtocolEvents>,\n    /// Channel to receive shutdown signal\n    pub(crate) shutdown: mpsc::Receiver<oneshot::Sender<()>>,\n    /// The threshold parameters for the double echo\n    pub params: ReliableBroadcastParams,\n    /// The connection to the TaskManager to forward DoubleEchoCommand messages\n    task_manager_message_sender: mpsc::Sender<DoubleEchoCommand>,\n    /// The overview of the network, which holds echo and ready subscriptions and the network size\n    pub subscriptions: SubscriptionsView,\n    /// Local node ValidatorId\n    pub validator_id: ValidatorId,\n    /// Keypair to sign and verify ECHO and READY messages\n    pub message_signer: Arc<MessageSigner>,\n    /// List of approved validators through smart contract and/or genesis\n    pub validators: HashSet<ValidatorId>,\n    pub validator_store: Arc<ValidatorStore>,\n    pub broadcast_sender: broadcast::Sender<CertificateDeliveredWithPositions>,\n\n    pub task_manager_cancellation: CancellationToken,\n}\n\nimpl DoubleEcho {\n    pub const MAX_BUFFER_SIZE: usize = 2048;\n\n    #[allow(clippy::too_many_arguments)]\n    pub fn new(\n        params: ReliableBroadcastParams,\n        validator_id: ValidatorId,\n        message_signer: Arc<MessageSigner>,\n        validators: HashSet<ValidatorId>,\n        task_manager_message_sender: mpsc::Sender<DoubleEchoCommand>,\n        command_receiver: mpsc::Receiver<DoubleEchoCommand>,\n        event_sender: mpsc::Sender<ProtocolEvents>,\n        shutdown: mpsc::Receiver<oneshot::Sender<()>>,\n        validator_store: Arc<ValidatorStore>,\n        broadcast_sender: broadcast::Sender<CertificateDeliveredWithPositions>,\n    ) -> Self {\n        Self {\n            params,\n            validator_id,\n            message_signer,\n            validators: validators.clone(),\n            task_manager_message_sender,\n            command_receiver,\n            event_sender,\n            subscriptions: SubscriptionsView {\n                echo: validators.clone(),\n                ready: validators.clone(),\n                network_size: validators.len(),\n            },\n            shutdown,\n            validator_store,\n            broadcast_sender,\n            task_manager_cancellation: CancellationToken::new(),\n        }\n    }\n\n    pub fn spawn_task_manager(\n        &mut self,\n        task_manager_message_receiver: mpsc::Receiver<DoubleEchoCommand>,\n    ) {\n        let task_manager = crate::task_manager::TaskManager::new(\n            task_manager_message_receiver,\n            self.subscriptions.clone(),\n            self.event_sender.clone(),\n            self.validator_id,\n            self.params.clone(),\n            self.message_signer.clone(),\n            self.validator_store.clone(),\n            self.broadcast_sender.clone(),\n        );\n\n        tokio::spawn(task_manager.run(self.task_manager_cancellation.child_token()));\n    }\n\n    /// DoubleEcho main loop\n    ///   - Listen for shutdown signal\n    ///   - Read new messages from command_receiver\n    ///      - If a new certificate is received, add it to the buffer\n    ///      - If a new subscription view is received, update the subscriptions\n    ///      - If a new Echo/Ready is received, update the state of the certificate or buffer\n    ///      the message\n    pub(crate) async fn run(\n        mut self,\n        task_manager_message_receiver: mpsc::Receiver<DoubleEchoCommand>,\n    ) {\n        self.spawn_task_manager(task_manager_message_receiver);\n\n        info!(\"DoubleEcho started\");\n\n        let shutdowned: Option<oneshot::Sender<()>> = loop {\n            tokio::select! {\n                biased;\n\n                shutdown = self.shutdown.recv() => {\n                        warn!(\"Double echo shutdown signal received {:?}\", shutdown);\n                        self.task_manager_cancellation.cancel();\n                        break shutdown;\n                },\n                Some(command) = self.command_receiver.recv() => {\n                    match command {\n\n                        command if self.subscriptions.is_some() => {\n                            match command {\n                                DoubleEchoCommand::Broadcast { cert, need_gossip, pending_id } => {\n                                    _ = self\n                                        .task_manager_message_sender\n                                        .send(DoubleEchoCommand::Broadcast { need_gossip, cert, pending_id })\n                                        .await;\n                                    }\n                                DoubleEchoCommand::Echo { certificate_id, validator_id, signature } => {\n                                    // Check if source is part of known_validators\n                                    if !self.validators.contains(&validator_id) {\n                                        debug!(\"ECHO message comes from non-validator: {}\", validator_id);\n                                        continue;\n                                    }\n\n                                    let mut payload = Vec::new();\n                                    payload.extend_from_slice(certificate_id.as_array());\n                                    payload.extend_from_slice(validator_id.as_bytes());\n\n                                    if let Err(e) = self.message_signer.verify_signature(signature, &payload, validator_id.address()) {\n                                        debug!(\"ECHO message signature cannot be verified from: {}\", e);\n                                        continue;\n                                    }\n\n                                    self.handle_echo(certificate_id, validator_id, signature).await\n                                },\n                                DoubleEchoCommand::Ready { certificate_id, validator_id, signature } => {\n                                    // Check if source is part of known_validators\n                                    if !self.validators.contains(&validator_id) {\n                                        debug!(\"READY message comes from non-validator: {}\", validator_id);\n                                        continue;\n                                    }\n\n                                    let mut payload = Vec::new();\n                                    payload.extend_from_slice(certificate_id.as_array());\n                                    payload.extend_from_slice(validator_id.as_bytes());\n\n                                    if let Err(e) = self.message_signer.verify_signature(signature, &payload, validator_id.address()) {\n                                        debug!(\"READY message signature cannot be verified from: {}\", e);\n                                        continue;\n                                    }\n\n                                    self.handle_ready(certificate_id, validator_id, signature).await\n                                },\n                            }\n\n                        },\n                        command => {\n                            warn!(\"Received a command {command:?} while not having a complete sampling\");\n                        }\n                    }\n                }\n\n                else => {\n                    debug!(\"Break the tokio loop for the double echo\");\n                    break None;\n                }\n            }\n        };\n\n        if let Some(sender) = shutdowned {\n            info!(\"Shutting down p2p double echo...\");\n            _ = sender.send(());\n        } else {\n            debug!(\"Shutting down p2p double echo due to error...\");\n        }\n    }\n}\n\nimpl DoubleEcho {\n    pub async fn handle_echo(\n        &mut self,\n        certificate_id: CertificateId,\n        validator_id: ValidatorId,\n        signature: Signature,\n    ) {\n        match self.validator_store.get_certificate(&certificate_id) {\n            Err(storage_error) => error!(\n                \"Unable to get the Certificate {} due to {:?}\",\n                &certificate_id, storage_error\n            ),\n            Ok(Some(_)) => debug!(\n                \"Certificate {} already delivered, ignoring echo\",\n                &certificate_id\n            ),\n            Ok(None) => {\n                let _ = self\n                    .task_manager_message_sender\n                    .send(DoubleEchoCommand::Echo {\n                        validator_id,\n                        certificate_id,\n                        signature,\n                    })\n                    .await;\n            }\n        }\n    }\n\n    pub async fn handle_ready(\n        &mut self,\n        certificate_id: CertificateId,\n        validator_id: ValidatorId,\n        signature: Signature,\n    ) {\n        match self.validator_store.get_certificate(&certificate_id) {\n            Err(storage_error) => error!(\n                \"Unable to get the Certificate {} due to {:?}\",\n                &certificate_id, storage_error\n            ),\n            Ok(Some(_)) => debug!(\n                \"Certificate {} already delivered, ignoring echo\",\n                &certificate_id\n            ),\n            Ok(None) => {\n                let _ = self\n                    .task_manager_message_sender\n                    .send(DoubleEchoCommand::Ready {\n                        validator_id,\n                        certificate_id,\n                        signature,\n                    })\n                    .await;\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "crates/topos-tce-broadcast/src/event.rs",
    "content": "use topos_core::{\n    types::ValidatorId,\n    uci::{Certificate, CertificateId},\n};\nuse topos_crypto::messages::Signature;\n\n/// Protocol events\n#[derive(Clone, Debug)]\npub enum ProtocolEvents {\n    BroadcastFailed {\n        certificate_id: CertificateId,\n    },\n    AlreadyDelivered {\n        certificate_id: CertificateId,\n    },\n\n    /// (pb.Broadcast)\n    Broadcast {\n        certificate_id: CertificateId,\n    },\n    /// Indicates that 'gossip' message broadcasting is required\n    Gossip {\n        cert: Certificate,\n    },\n    /// Indicates that 'echo' message broadcasting is required\n    Echo {\n        certificate_id: CertificateId,\n        signature: Signature,\n        validator_id: ValidatorId,\n    },\n    /// Indicates that 'ready' message broadcasting is required\n    Ready {\n        certificate_id: CertificateId,\n        signature: Signature,\n        validator_id: ValidatorId,\n    },\n}\n"
  },
  {
    "path": "crates/topos-tce-broadcast/src/lib.rs",
    "content": "//! Implementation of Topos Reliable Broadcast to be used in the Transmission Control Engine (TCE)\n//!\n//! This crate is designed to be used as a library in the TCE implementation.\n//! It covers the Reliable Broadcast part of the TCE, which is the core of the TCE.\n//! It doesn't handle how messages are sent or received, nor how the certificates are stored.\n//! It is designed to be used with any transport and storage implementation, relying on the\n//! `ProtocolEvents` and `DoubleEchoCommand` to communicate with the transport and storage.\n//!\n//! The reliable broadcast allows a set of validators to agree on a set of messages in order to\n//! reach agreement about the delivery of a certificate.\n//!\n//! Each certificates need to be broadcast to the network, and each validator needs to\n//! receive a threshold of messages from the other validators.\n//! The thresholds are defined by the `ReliableBroadcastParams` and correspond to the minimum number of\n//! validators who need to agree on one certificate in order to consider it delivered.\n//!\n//! This crate is responsible for validating and driving the broadcast of every certificates.\n//!\n//! ## Input\n//!\n//! The input of the broadcast is a certificate to be broadcast. It can be received from\n//! the transport layer, or from the storage layer (from the pending tables).\n//!\n//! The transport layer can be anything from p2p network to API calls.\n//!\n//! Other inputs are the messages received from the transport layer, coming from other validators.\n//! They're `Echo` and `Ready` signed messages.\n//!\n//! ## Output\n//!\n//! The outcome of the broadcast is either a certificate delivered or a failure on the delivery.\n//!\n//! The implementation is based on the paper: [Topos: A Secure, Trustless, and Decentralized Interoperability Protocol](https://arxiv.org/pdf/2206.03481.pdf)\n//!\nuse crate::event::ProtocolEvents;\nuse double_echo::DoubleEcho;\nuse futures::Stream;\nuse std::collections::HashSet;\nuse std::sync::Arc;\nuse thiserror::Error;\nuse tokio::spawn;\nuse tokio::sync::mpsc::Sender;\nuse tokio::sync::{broadcast, mpsc, oneshot};\nuse tokio_stream::wrappers::ReceiverStream;\nuse topos_config::tce::broadcast::ReliableBroadcastParams;\nuse topos_core::types::ValidatorId;\nuse topos_core::uci::{Certificate, CertificateId};\nuse topos_crypto::messages::{MessageSigner, Signature};\nuse topos_tce_storage::types::CertificateDeliveredWithPositions;\nuse topos_tce_storage::validator::ValidatorStore;\nuse tracing::{debug, error, Instrument};\n\npub use topos_core::uci;\n\npub type Peer = String;\n\nmod constant;\npub mod double_echo;\npub mod event;\npub mod sampler;\n\npub mod task_manager;\n\n#[cfg(test)]\nmod tests;\n\nuse crate::sampler::SubscriptionsView;\n\n#[derive(Debug)]\npub enum TaskStatus {\n    /// The task finished successfully and broadcast the certificate + received ready\n    Success,\n    /// The task did not finish successfully and stopped.\n    Failure,\n}\n\n/// Configuration of TCE implementation\npub struct ReliableBroadcastConfig {\n    pub tce_params: ReliableBroadcastParams,\n    pub validator_id: ValidatorId,\n    pub validators: HashSet<ValidatorId>,\n    pub message_signer: Arc<MessageSigner>,\n}\n\n#[derive(Debug, Clone)]\npub enum DoubleEchoCommand {\n    /// Entry point for new certificate to submit as initial sender\n    Broadcast {\n        need_gossip: bool,\n        cert: Certificate,\n        pending_id: u64,\n    },\n\n    /// When echo reply received\n    Echo {\n        validator_id: ValidatorId,\n        certificate_id: CertificateId,\n        signature: Signature,\n    },\n\n    /// When ready reply received\n    Ready {\n        validator_id: ValidatorId,\n        certificate_id: CertificateId,\n        signature: Signature,\n    },\n}\n\n/// Thread safe client to the protocol aggregate\n#[derive(Clone, Debug)]\npub struct ReliableBroadcastClient {\n    command_sender: Sender<DoubleEchoCommand>,\n    pub(crate) double_echo_shutdown_channel: Sender<oneshot::Sender<()>>,\n}\n\nimpl ReliableBroadcastClient {\n    /// Creates new instance of the aggregate and returns proxy to it.\n    ///\n    /// New client instances to the same aggregate can be cloned from the returned one.\n    /// Aggregate is spawned as new task.\n    pub async fn new(\n        config: ReliableBroadcastConfig,\n        validator_store: Arc<ValidatorStore>,\n        broadcast_sender: broadcast::Sender<CertificateDeliveredWithPositions>,\n    ) -> (Self, impl Stream<Item = ProtocolEvents>) {\n        let (event_sender, event_receiver) = mpsc::channel(*constant::PROTOCOL_CHANNEL_SIZE);\n        let (command_sender, command_receiver) = mpsc::channel(*constant::COMMAND_CHANNEL_SIZE);\n        let (double_echo_shutdown_channel, double_echo_shutdown_receiver) =\n            mpsc::channel::<oneshot::Sender<()>>(1);\n\n        let (task_manager_message_sender, task_manager_message_receiver) =\n            mpsc::channel(*constant::BROADCAST_TASK_MANAGER_CHANNEL_SIZE);\n\n        let double_echo = DoubleEcho::new(\n            config.tce_params,\n            config.validator_id,\n            config.message_signer,\n            config.validators,\n            task_manager_message_sender,\n            command_receiver,\n            event_sender,\n            double_echo_shutdown_receiver,\n            validator_store,\n            broadcast_sender,\n        );\n\n        spawn(\n            double_echo\n                .run(task_manager_message_receiver)\n                .in_current_span(),\n        );\n\n        (\n            Self {\n                command_sender,\n                double_echo_shutdown_channel,\n            },\n            ReceiverStream::new(event_receiver),\n        )\n    }\n\n    pub fn get_double_echo_channel(&self) -> Sender<DoubleEchoCommand> {\n        self.command_sender.clone()\n    }\n\n    pub async fn shutdown(&self) -> Result<(), Errors> {\n        debug!(\"Shutting down reliable broadcast client\");\n        let (double_echo_sender, double_echo_receiver) = oneshot::channel();\n        self.double_echo_shutdown_channel\n            .send(double_echo_sender)\n            .await\n            .map_err(Errors::ShutdownCommunication)?;\n        double_echo_receiver.await?;\n\n        Ok(())\n    }\n}\n\n/// Protocol and technical errors\n#[derive(Error, Debug)]\npub enum Errors {\n    #[error(\"Error while sending a DoubleEchoCommand to DoubleEcho: {0:?}\")]\n    DoubleEchoSend(#[from] Box<mpsc::error::SendError<DoubleEchoCommand>>),\n\n    #[error(\"Error while waiting for a DoubleEchoCommand response: {0:?}\")]\n    DoubleEchoRecv(#[from] oneshot::error::RecvError),\n\n    #[error(\"Requested certificate not found\")]\n    CertificateNotFound,\n\n    #[error(\"Requested digest not found for certificate {0:?}\")]\n    DigestNotFound(CertificateId),\n\n    #[error(\"Cannot create public address from private key\")]\n    ProducePublicAddress,\n\n    #[error(\"Unable to execute shutdown for the reliable broadcast: {0}\")]\n    ShutdownCommunication(mpsc::error::SendError<oneshot::Sender<()>>),\n}\n"
  },
  {
    "path": "crates/topos-tce-broadcast/src/sampler/mod.rs",
    "content": "use std::collections::HashSet;\nuse topos_core::types::ValidatorId;\n\n/// Stateful network view with whom we broadcast the Certificate\n/// The Echo and the Ready sets are initially equal to the whole network\n#[derive(Debug, Clone, Eq, PartialEq, Default)]\npub struct SubscriptionsView {\n    /// Set of Peer from which we listen for ECHO messages\n    pub echo: HashSet<ValidatorId>,\n    /// Set of Peer from which we listen for READY messages\n    pub ready: HashSet<ValidatorId>,\n    /// Size of the network\n    pub network_size: usize,\n}\n\nimpl SubscriptionsView {\n    pub fn is_some(&self) -> bool {\n        !self.is_none()\n    }\n\n    pub fn is_none(&self) -> bool {\n        self.echo.is_empty() && self.ready.is_empty()\n    }\n}\n"
  },
  {
    "path": "crates/topos-tce-broadcast/src/task_manager/mod.rs",
    "content": "use crate::event::ProtocolEvents;\nuse futures::stream::FuturesUnordered;\nuse futures::Future;\nuse futures::StreamExt;\nuse std::collections::HashMap;\nuse std::future::IntoFuture;\nuse std::pin::Pin;\nuse std::sync::Arc;\nuse std::time::Duration;\nuse tokio::sync::broadcast;\nuse tokio::{spawn, sync::mpsc};\nuse tokio_util::sync::CancellationToken;\nuse topos_config::tce::broadcast::ReliableBroadcastParams;\nuse topos_core::types::ValidatorId;\nuse topos_core::uci::Certificate;\nuse topos_core::uci::CertificateId;\nuse topos_metrics::CERTIFICATE_PROCESSING_FROM_API_TOTAL;\nuse topos_metrics::CERTIFICATE_PROCESSING_FROM_GOSSIP_TOTAL;\nuse topos_metrics::CERTIFICATE_PROCESSING_TOTAL;\nuse topos_metrics::DOUBLE_ECHO_ACTIVE_TASKS_COUNT;\nuse topos_tce_storage::store::ReadStore;\nuse topos_tce_storage::types::CertificateDeliveredWithPositions;\nuse topos_tce_storage::validator::ValidatorStore;\nuse topos_tce_storage::PendingCertificateId;\nuse tracing::{debug, error, info, trace, warn};\n\npub mod task;\n\nuse crate::constant::PENDING_LIMIT_PER_REQUEST_TO_STORAGE;\nuse crate::double_echo::broadcast_state::BroadcastState;\nuse crate::sampler::SubscriptionsView;\nuse crate::DoubleEchoCommand;\nuse crate::TaskStatus;\nuse task::{Task, TaskContext};\nuse topos_crypto::messages::MessageSigner;\n\ntype RunningTasks =\n    FuturesUnordered<Pin<Box<dyn Future<Output = (CertificateId, TaskStatus)> + Send + 'static>>>;\n\n/// The TaskManager is responsible for receiving messages from the network and distributing them\n/// among tasks. These tasks are either created if none for a certain CertificateID exists yet,\n/// or existing tasks will receive the messages.\npub struct TaskManager {\n    pub message_receiver: mpsc::Receiver<DoubleEchoCommand>,\n    pub subscriptions: SubscriptionsView,\n    pub event_sender: mpsc::Sender<ProtocolEvents>,\n    pub tasks: HashMap<CertificateId, TaskContext>,\n    pub message_signer: Arc<MessageSigner>,\n    #[allow(clippy::type_complexity)]\n    pub running_tasks: RunningTasks,\n    pub buffered_messages: HashMap<CertificateId, Vec<DoubleEchoCommand>>,\n    pub thresholds: ReliableBroadcastParams,\n    pub validator_id: ValidatorId,\n    pub validator_store: Arc<ValidatorStore>,\n    pub broadcast_sender: broadcast::Sender<CertificateDeliveredWithPositions>,\n    pub latest_pending_id: PendingCertificateId,\n}\n\nimpl TaskManager {\n    #[allow(clippy::too_many_arguments)]\n    pub fn new(\n        message_receiver: mpsc::Receiver<DoubleEchoCommand>,\n        subscriptions: SubscriptionsView,\n        event_sender: mpsc::Sender<ProtocolEvents>,\n        validator_id: ValidatorId,\n        thresholds: ReliableBroadcastParams,\n        message_signer: Arc<MessageSigner>,\n        validator_store: Arc<ValidatorStore>,\n        broadcast_sender: broadcast::Sender<CertificateDeliveredWithPositions>,\n    ) -> Self {\n        Self {\n            message_receiver,\n            subscriptions,\n            event_sender,\n            tasks: HashMap::new(),\n            running_tasks: FuturesUnordered::new(),\n            buffered_messages: Default::default(),\n            validator_id,\n            message_signer,\n            thresholds,\n            validator_store,\n            broadcast_sender,\n            latest_pending_id: 0,\n        }\n    }\n\n    /// Fetch the next pending certificates from the storage and create tasks for them.\n    /// This method is called periodically to check for new pending certificates and when\n    /// a task has finished.\n    fn next_pending_certificate(&mut self) {\n        debug!(\"Checking for next pending_certificates\");\n        match self.validator_store.get_next_pending_certificates(\n            &self.latest_pending_id,\n            *PENDING_LIMIT_PER_REQUEST_TO_STORAGE,\n        ) {\n            Ok(pendings) => {\n                debug!(\"Received {} pending certificates\", pendings.len());\n                for (pending_id, certificate) in pendings {\n                    self.create_task(&certificate, true, pending_id);\n                    self.latest_pending_id = pending_id;\n                }\n            }\n            Err(error) => {\n                error!(\"Failed to fetch the pending certificates: {:?}\", error);\n            }\n        }\n    }\n\n    pub async fn run(mut self, shutdown_receiver: CancellationToken) {\n        let mut interval = tokio::time::interval(Duration::from_secs(1));\n\n        loop {\n            tokio::select! {\n                biased;\n\n                _ = interval.tick() => {\n                    self.next_pending_certificate();\n                }\n                Some(msg) = self.message_receiver.recv() => {\n                    match msg {\n                        DoubleEchoCommand::Echo { certificate_id, .. } | DoubleEchoCommand::Ready { certificate_id, .. } => {\n                            if let Some(task_context) = self.tasks.get(&certificate_id) {\n                                _ = task_context.sink.send(msg).await;\n                            } else {\n                                self.buffered_messages\n                                    .entry(certificate_id)\n                                    .or_default()\n                                    .push(msg);\n                            };\n                        }\n                        DoubleEchoCommand::Broadcast { ref cert, need_gossip, pending_id } => {\n                            trace!(\"Received broadcast message for certificate {} \", cert.id);\n\n                            self.create_task(cert, need_gossip, pending_id)\n                        }\n                    }\n                }\n\n\n                Some((certificate_id, status)) = self.running_tasks.next() => {\n                    if let TaskStatus::Success = status {\n                        trace!(\"Task for certificate {} finished successfully\", certificate_id);\n                        self.tasks.remove(&certificate_id);\n                        DOUBLE_ECHO_ACTIVE_TASKS_COUNT.dec();\n\n                    } else {\n                        error!(\"Task for certificate {} finished unsuccessfully\", certificate_id);\n                    }\n\n                    self.next_pending_certificate();\n                }\n\n                _ = shutdown_receiver.cancelled() => {\n                    info!(\"Task Manager shutting down\");\n\n                    debug!(\"Remaining active tasks: {:?}\", self.tasks.len());\n                    if !self.tasks.is_empty() {\n                        debug!(\"Certificates still in broadcast: {:?}\", self.tasks.keys());\n                    }\n                    warn!(\"Remaining buffered messages: {}\", self.buffered_messages.len());\n                    for task in self.tasks.iter() {\n                        task.1.shutdown_sender.send(()).await.unwrap();\n                    }\n\n                    break;\n                }\n            }\n        }\n    }\n\n    fn start_task(\n        running_tasks: &RunningTasks,\n        task: Task,\n        sink: mpsc::Sender<DoubleEchoCommand>,\n        messages: Option<Vec<DoubleEchoCommand>>,\n        need_gossip: bool,\n    ) {\n        running_tasks.push(task.into_future());\n\n        if let Some(messages) = messages {\n            spawn(async move {\n                for msg in messages {\n                    _ = sink.send(msg).await;\n                }\n            });\n        }\n\n        DOUBLE_ECHO_ACTIVE_TASKS_COUNT.inc();\n\n        CERTIFICATE_PROCESSING_TOTAL.inc();\n        if need_gossip {\n            CERTIFICATE_PROCESSING_FROM_API_TOTAL.inc();\n        } else {\n            CERTIFICATE_PROCESSING_FROM_GOSSIP_TOTAL.inc();\n        }\n    }\n\n    /// Create a new task for the given certificate and add it to the running tasks.\n    /// If the previous certificate is not available yet, the task will be created but not started.\n    /// This method is called when a pending certificate is fetched from the storage.\n    fn create_task(&mut self, cert: &Certificate, need_gossip: bool, pending_id: u64) {\n        match self.tasks.entry(cert.id) {\n            std::collections::hash_map::Entry::Vacant(entry) => {\n                let broadcast_state = BroadcastState::new(\n                    cert.clone(),\n                    self.validator_id,\n                    self.thresholds.echo_threshold,\n                    self.thresholds.ready_threshold,\n                    self.thresholds.delivery_threshold,\n                    self.event_sender.clone(),\n                    self.subscriptions.clone(),\n                    need_gossip,\n                    self.message_signer.clone(),\n                );\n\n                let (task, task_context) = Task::new(\n                    cert.id,\n                    broadcast_state,\n                    self.validator_store.clone(),\n                    self.broadcast_sender.clone(),\n                );\n\n                let prev = self.validator_store.get_certificate(&cert.prev_id);\n                if matches!(prev, Ok(Some(_)))\n                    || cert.prev_id == topos_core::uci::INITIAL_CERTIFICATE_ID\n                {\n                    Self::start_task(\n                        &self.running_tasks,\n                        task,\n                        task_context.sink.clone(),\n                        self.buffered_messages.remove(&cert.id),\n                        need_gossip,\n                    );\n                } else {\n                    debug!(\n                        \"Received broadcast message for certificate {} but the previous \\\n                         certificate {} is not available yet\",\n                        cert.id, cert.prev_id\n                    );\n                }\n                debug!(\n                    \"Creating task for pending certificate {} at position {} if needed\",\n                    cert.id, pending_id\n                );\n                entry.insert(task_context);\n            }\n            std::collections::hash_map::Entry::Occupied(_) => {\n                trace!(\n                    \"Received broadcast message for certificate {} but it is already being \\\n                     processed\",\n                    cert.id\n                );\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "crates/topos-tce-broadcast/src/task_manager/task.rs",
    "content": "use std::future::{Future, IntoFuture};\nuse std::pin::Pin;\nuse std::sync::Arc;\nuse tokio::sync::{broadcast, mpsc};\n\nuse topos_core::types::stream::Position;\nuse topos_core::uci::CertificateId;\nuse topos_tce_storage::errors::StorageError;\nuse topos_tce_storage::store::{ReadStore, WriteStore};\nuse topos_tce_storage::types::CertificateDeliveredWithPositions;\nuse topos_tce_storage::validator::ValidatorStore;\nuse tracing::{debug, error};\n\nuse crate::double_echo::broadcast_state::{BroadcastState, Status};\nuse crate::{DoubleEchoCommand, TaskStatus};\n\n#[derive(Debug)]\npub struct TaskContext {\n    pub sink: mpsc::Sender<DoubleEchoCommand>,\n    pub shutdown_sender: mpsc::Sender<()>,\n}\n\npub struct Task {\n    pub validator_store: Arc<ValidatorStore>,\n    pub message_receiver: mpsc::Receiver<DoubleEchoCommand>,\n    pub certificate_id: CertificateId,\n    pub broadcast_state: BroadcastState,\n    pub shutdown_receiver: mpsc::Receiver<()>,\n    broadcast_sender: broadcast::Sender<CertificateDeliveredWithPositions>,\n}\n\nimpl Task {\n    pub fn new(\n        certificate_id: CertificateId,\n        broadcast_state: BroadcastState,\n        validator_store: Arc<ValidatorStore>,\n        broadcast_sender: broadcast::Sender<CertificateDeliveredWithPositions>,\n    ) -> (Task, TaskContext) {\n        let (message_sender, message_receiver) = mpsc::channel(10_024);\n        let (shutdown_sender, shutdown_receiver) = mpsc::channel(1);\n\n        let task_context = TaskContext {\n            sink: message_sender,\n            shutdown_sender,\n        };\n\n        let task = Task {\n            validator_store,\n            message_receiver,\n            certificate_id,\n            broadcast_state,\n            shutdown_receiver,\n            broadcast_sender,\n        };\n\n        (task, task_context)\n    }\n\n    pub async fn persist(&self) -> Result<CertificateDeliveredWithPositions, StorageError> {\n        let certificate_delivered = self.broadcast_state.into_delivered();\n\n        let positions = self\n            .validator_store\n            .insert_certificate_delivered(&certificate_delivered)\n            .await?;\n\n        Ok(CertificateDeliveredWithPositions(\n            certificate_delivered,\n            positions,\n        ))\n    }\n}\n\nimpl IntoFuture for Task {\n    type Output = (CertificateId, TaskStatus);\n\n    type IntoFuture = Pin<Box<dyn Future<Output = Self::Output> + Send + 'static>>;\n\n    fn into_future(mut self) -> Self::IntoFuture {\n        Box::pin(async move {\n            // When the task starts, we need to gather information such as current stream position\n            // for the source subnet in order to expect its position\n            let expected_position = match self.validator_store.last_delivered_position_for_subnet(\n                &self.broadcast_state.certificate.source_subnet_id,\n            ) {\n                Ok(Some(stream_position)) => stream_position.position.increment().unwrap(),\n                Ok(None) => Position::ZERO,\n                Err(_) => return (self.certificate_id, TaskStatus::Failure),\n            };\n\n            debug!(\n                \"Expected position for Certificate {} is {:?} for the subnet {}\",\n                self.certificate_id,\n                expected_position,\n                self.broadcast_state.certificate.source_subnet_id\n            );\n            self.broadcast_state.expected_position = Some(expected_position);\n\n            loop {\n                tokio::select! {\n                    Some(msg) = self.message_receiver.recv() => {\n                        match msg {\n                            DoubleEchoCommand::Echo { validator_id, .. } => {\n                                if let Some(Status::DeliveredWithReadySent) = self.broadcast_state.apply_echo(validator_id) {\n                                    match self.persist().await {\n                                        Ok(delivered) => {\n                                            _ = self.broadcast_sender.send(delivered);\n\n                                            return (self.certificate_id, TaskStatus::Success);\n                                        }\n                                        Err(error) => {\n                                            error!(\"Unable to persist one delivered certificate: {:?}\", error);\n                                            return (self.certificate_id, TaskStatus::Failure);\n                                        }\n                                    }\n\n                                }\n                            }\n                            DoubleEchoCommand::Ready { validator_id, .. } => {\n                                if let Some(Status::DeliveredWithReadySent) = self.broadcast_state.apply_ready(validator_id) {\n                                    match self.persist().await {\n                                        Ok(delivered) => {\n                                            _ = self.broadcast_sender.send(delivered);\n\n                                            return (self.certificate_id, TaskStatus::Success);\n                                        }\n                                        Err(error) => {\n                                            error!(\"Unable to persist one delivered certificate: {:?}\", error);\n                                            return (self.certificate_id, TaskStatus::Failure);\n                                        }\n                                    }\n                                }\n                            }\n                            _ => {}\n                        }\n                    }\n                    _ = self.shutdown_receiver.recv() => {\n                        debug!(\"Received shutdown, shutting down task {:?}\", self.certificate_id);\n                        return (self.certificate_id, TaskStatus::Failure)\n                    }\n                }\n            }\n        })\n    }\n}\n"
  },
  {
    "path": "crates/topos-tce-broadcast/src/tests/mod.rs",
    "content": "use crate::double_echo::*;\nuse crate::event::ProtocolEvents;\nuse rstest::*;\nuse std::collections::HashSet;\nuse std::str::FromStr;\nuse std::sync::Arc;\nuse std::time::Duration;\nuse tokio::sync::mpsc::Receiver;\nuse tokio::sync::{broadcast, mpsc, oneshot};\nuse topos_config::tce::broadcast::ReliableBroadcastParams;\nuse topos_core::uci::Certificate;\nuse topos_crypto::messages::MessageSigner;\nuse topos_crypto::validator_id::ValidatorId;\nuse topos_tce_storage::types::CertificateDeliveredWithPositions;\nuse topos_tce_storage::validator::ValidatorStore;\nuse topos_test_sdk::constants::*;\nuse topos_test_sdk::storage::create_validator_store;\n\nmod task;\nmod task_manager;\n\nconst CHANNEL_SIZE: usize = 10;\nconst PRIVATE_KEY: &str = \"d6f8d1fe6d0f3606ccb15ef383910f10d83ca77bf3d73007f12fef023dabaab9\";\n\n#[fixture]\nfn small_config() -> TceParams {\n    TceParams {\n        nb_peers: 10,\n        broadcast_params: ReliableBroadcastParams {\n            echo_threshold: 8,\n            ready_threshold: 5,\n            delivery_threshold: 8,\n        },\n    }\n}\n\n#[fixture]\nfn medium_config() -> TceParams {\n    TceParams {\n        nb_peers: 50,\n        broadcast_params: ReliableBroadcastParams {\n            echo_threshold: 33,\n            ready_threshold: 16,\n            delivery_threshold: 32,\n        },\n    }\n}\n\n#[derive(Debug)]\nstruct TceParams {\n    nb_peers: usize,\n    broadcast_params: ReliableBroadcastParams,\n}\n\nstruct Context {\n    event_receiver: Receiver<ProtocolEvents>,\n    broadcast_receiver: broadcast::Receiver<CertificateDeliveredWithPositions>,\n    validator_store: Arc<ValidatorStore>,\n}\n\nasync fn create_context(params: TceParams) -> (DoubleEcho, Context) {\n    let validator_store = create_validator_store::default().await;\n    let (_cmd_sender, cmd_receiver) = mpsc::channel(CHANNEL_SIZE);\n    let (event_sender, event_receiver) = mpsc::channel(CHANNEL_SIZE);\n    let (_double_echo_shutdown_sender, double_echo_shutdown_receiver) =\n        mpsc::channel::<oneshot::Sender<()>>(1);\n    let (task_manager_message_sender, task_manager_message_receiver) = mpsc::channel(CHANNEL_SIZE);\n\n    let message_signer = Arc::new(MessageSigner::from_str(PRIVATE_KEY).unwrap());\n\n    let mut validators = HashSet::new();\n    let validator_id = ValidatorId::from(message_signer.public_address);\n    validators.insert(validator_id);\n\n    for i in 1..params.nb_peers {\n        let message_signer = Arc::new(MessageSigner::new(&[i as u8; 32]).unwrap());\n        let validator_id = ValidatorId::from(message_signer.public_address);\n        validators.insert(validator_id);\n    }\n\n    let (broadcast_sender, broadcast_receiver) = broadcast::channel(CHANNEL_SIZE);\n    let mut double_echo = DoubleEcho::new(\n        params.broadcast_params,\n        validator_id,\n        message_signer,\n        validators.clone(),\n        task_manager_message_sender.clone(),\n        cmd_receiver,\n        event_sender,\n        double_echo_shutdown_receiver,\n        validator_store.clone(),\n        broadcast_sender,\n    );\n\n    double_echo.spawn_task_manager(task_manager_message_receiver);\n\n    (\n        double_echo,\n        Context {\n            event_receiver,\n            broadcast_receiver,\n            validator_store,\n        },\n    )\n}\n\nasync fn reach_echo_threshold(double_echo: &mut DoubleEcho, cert: &Certificate) {\n    let selected = double_echo\n        .subscriptions\n        .echo\n        .iter()\n        .take(double_echo.params.echo_threshold)\n        .cloned()\n        .collect::<Vec<_>>();\n\n    let message_signer = Arc::new(MessageSigner::from_str(PRIVATE_KEY).unwrap());\n    let validator_id = ValidatorId::from(message_signer.public_address);\n\n    let mut payload = Vec::new();\n    payload.extend_from_slice(cert.id.as_array());\n    payload.extend_from_slice(validator_id.as_bytes());\n\n    let signature = message_signer.sign_message(&payload).unwrap();\n\n    for val_id in selected {\n        double_echo.handle_echo(cert.id, val_id, signature).await;\n    }\n}\n\nasync fn reach_ready_threshold(double_echo: &mut DoubleEcho, cert: &Certificate) {\n    let selected = double_echo\n        .subscriptions\n        .ready\n        .iter()\n        .take(double_echo.params.ready_threshold)\n        .cloned()\n        .collect::<Vec<_>>();\n\n    let message_signer = Arc::new(MessageSigner::from_str(PRIVATE_KEY).unwrap());\n\n    let validator_id = ValidatorId::from(message_signer.public_address);\n\n    let mut payload = Vec::new();\n    payload.extend_from_slice(cert.id.as_array());\n    payload.extend_from_slice(validator_id.as_bytes());\n\n    let signature = message_signer.sign_message(&payload).unwrap();\n\n    for val_id in selected {\n        double_echo.handle_ready(cert.id, val_id, signature).await;\n    }\n}\n\nasync fn reach_delivery_threshold(double_echo: &mut DoubleEcho, cert: &Certificate) {\n    let selected = double_echo\n        .subscriptions\n        .ready\n        .iter()\n        .take(double_echo.params.delivery_threshold)\n        .cloned()\n        .collect::<Vec<_>>();\n\n    let message_signer = Arc::new(MessageSigner::from_str(PRIVATE_KEY).unwrap());\n    let validator_id = ValidatorId::from(message_signer.public_address);\n\n    let mut payload = Vec::new();\n    payload.extend_from_slice(cert.id.as_array());\n    payload.extend_from_slice(validator_id.as_bytes());\n\n    let signature = message_signer.sign_message(&payload).unwrap();\n\n    for val_id in selected {\n        double_echo.handle_ready(cert.id, val_id, signature).await;\n    }\n}\n\n#[rstest]\n#[case::small_config(small_config())]\n#[case(medium_config())]\n#[test_log::test(tokio::test)]\n#[trace]\n#[timeout(Duration::from_secs(10))]\nasync fn trigger_success_path_upon_reaching_threshold(#[case] params: TceParams) {\n    let (mut double_echo, mut ctx) = create_context(params).await;\n\n    let dummy_cert =\n        Certificate::new_with_default_fields(PREV_CERTIFICATE_ID, SOURCE_SUBNET_ID_1, &[])\n            .expect(\"Dummy certificate\");\n\n    _ = ctx\n        .validator_store\n        .insert_pending_certificate(&dummy_cert)\n        .await\n        .unwrap();\n\n    assert!(matches!(\n        ctx.event_receiver.recv().await,\n        Some(ProtocolEvents::Broadcast { certificate_id }) if certificate_id == dummy_cert.id\n    ));\n\n    assert!(matches!(\n        ctx.event_receiver.try_recv(),\n        Ok(ProtocolEvents::Gossip { .. })\n    ));\n    assert!(matches!(\n        ctx.event_receiver.try_recv(),\n        Ok(ProtocolEvents::Echo { .. })\n    ));\n\n    assert!(matches!(\n        ctx.event_receiver.try_recv(),\n        Err(mpsc::error::TryRecvError::Empty)\n    ));\n\n    // Trigger Ready upon reaching the Echo threshold\n    reach_echo_threshold(&mut double_echo, &dummy_cert).await;\n\n    assert!(matches!(\n        ctx.event_receiver.recv().await,\n        Some(ProtocolEvents::Ready { .. })\n    ));\n\n    // Trigger Delivery upon reaching the Delivery threshold\n    reach_delivery_threshold(&mut double_echo, &dummy_cert).await;\n    let x = ctx.broadcast_receiver.recv().await;\n    assert!(matches!(\n            x,\n        Ok(CertificateDeliveredWithPositions(topos_core::types::CertificateDelivered { certificate, .. }, _)) if certificate == dummy_cert\n    ));\n}\n\n#[rstest]\n#[case::small_config(small_config())]\n#[case(medium_config())]\n#[test_log::test(tokio::test)]\n#[trace]\n#[timeout(Duration::from_secs(4))]\nasync fn trigger_ready_when_reached_enough_ready(#[case] params: TceParams) {\n    let (mut double_echo, mut ctx) = create_context(params).await;\n\n    let dummy_cert =\n        Certificate::new_with_default_fields(PREV_CERTIFICATE_ID, SOURCE_SUBNET_ID_1, &[])\n            .expect(\"Dummy certificate\");\n\n    _ = ctx\n        .validator_store\n        .insert_pending_certificate(&dummy_cert)\n        .await\n        .unwrap();\n\n    assert!(matches!(\n        ctx.event_receiver.recv().await,\n        Some(ProtocolEvents::Broadcast { certificate_id }) if certificate_id == dummy_cert.id\n    ));\n\n    assert!(matches!(\n        ctx.event_receiver.try_recv(),\n        Ok(ProtocolEvents::Gossip { .. })\n    ));\n\n    assert!(matches!(\n        ctx.event_receiver.try_recv(),\n        Ok(ProtocolEvents::Echo { .. })\n    ));\n\n    // Trigger Ready upon reaching the Ready threshold\n    reach_ready_threshold(&mut double_echo, &dummy_cert).await;\n\n    assert!(matches!(\n        ctx.event_receiver.recv().await,\n        Some(ProtocolEvents::Ready { .. })\n    ));\n}\n"
  },
  {
    "path": "crates/topos-tce-broadcast/src/tests/task.rs",
    "content": "use std::{future::IntoFuture, sync::Arc, time::Duration};\n\nuse rstest::rstest;\nuse tokio::{\n    spawn,\n    sync::{broadcast, mpsc},\n};\nuse topos_core::uci::Certificate;\nuse topos_crypto::{messages::MessageSigner, validator_id::ValidatorId};\nuse topos_tce_storage::validator::ValidatorStore;\nuse topos_test_sdk::{\n    certificates::create_certificate_chain,\n    constants::{SOURCE_SUBNET_ID_1, TARGET_SUBNET_ID_1},\n    crypto::message_signer,\n    storage::create_validator_store,\n};\n\nuse crate::{\n    double_echo::broadcast_state::BroadcastState, event::ProtocolEvents,\n    sampler::SubscriptionsView, task_manager::task::Task,\n};\n\n#[rstest]\n#[test_log::test(tokio::test)]\n#[timeout(Duration::from_secs(1))]\nasync fn start_with_ungossiped_cert(\n    #[future(awt)]\n    #[from(create_validator_store)]\n    validatore_store: Arc<ValidatorStore>,\n    message_signer: Arc<MessageSigner>,\n) {\n    let certificate = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 1)\n        .pop()\n        .unwrap()\n        .certificate;\n    let certificate_id = certificate.id;\n    let validator_id = ValidatorId::default();\n    let thresholds = topos_config::tce::broadcast::ReliableBroadcastParams {\n        echo_threshold: 1,\n        ready_threshold: 1,\n        delivery_threshold: 1,\n    };\n    let (event_sender, mut event_receiver) = mpsc::channel(2);\n    let (broadcast_sender, _) = broadcast::channel(1);\n    let need_gossip = true;\n    let subscriptions = SubscriptionsView::default();\n\n    let broadcast_state = BroadcastState::new(\n        certificate,\n        validator_id,\n        thresholds.echo_threshold,\n        thresholds.ready_threshold,\n        thresholds.delivery_threshold,\n        event_sender,\n        subscriptions,\n        need_gossip,\n        message_signer,\n    );\n\n    let (task, _ctx) = Task::new(\n        certificate_id,\n        broadcast_state,\n        validatore_store,\n        broadcast_sender,\n    );\n\n    let _handle = spawn(task.into_future());\n\n    let event = event_receiver.recv().await;\n    assert!(matches!(\n            event,\n        Some(ProtocolEvents::Broadcast {\n            certificate_id: id\n        }) if id == certificate_id\n    ));\n\n    let event = event_receiver.recv().await;\n    assert!(matches!(\n            event,\n        Some(ProtocolEvents::Gossip {\n            cert: Certificate { id, .. }\n        }) if id == certificate_id\n    ));\n}\n"
  },
  {
    "path": "crates/topos-tce-broadcast/src/tests/task_manager.rs",
    "content": "use std::sync::Arc;\n\nuse rstest::rstest;\nuse tokio::{\n    spawn,\n    sync::{broadcast, mpsc},\n};\nuse tokio_util::sync::CancellationToken;\nuse topos_crypto::{messages::MessageSigner, validator_id::ValidatorId};\nuse topos_metrics::DOUBLE_ECHO_ACTIVE_TASKS_COUNT;\nuse topos_tce_storage::validator::ValidatorStore;\nuse topos_test_sdk::{\n    certificates::create_certificate_chain,\n    constants::{SOURCE_SUBNET_ID_1, TARGET_SUBNET_ID_1},\n    crypto::message_signer,\n    storage::create_validator_store,\n};\n\nuse crate::{sampler::SubscriptionsView, task_manager::TaskManager};\n\n#[rstest]\n#[tokio::test]\nasync fn can_start(\n    #[future(awt)]\n    #[from(create_validator_store)]\n    validator_store: Arc<ValidatorStore>,\n    message_signer: Arc<MessageSigner>,\n) {\n    let (message_sender, message_receiver) = mpsc::channel(1);\n    let (event_sender, _) = mpsc::channel(1);\n    let (broadcast_sender, _) = broadcast::channel(1);\n    let shutdown = CancellationToken::new();\n    let validator_id = ValidatorId::default();\n    let thresholds = topos_config::tce::broadcast::ReliableBroadcastParams {\n        echo_threshold: 1,\n        ready_threshold: 1,\n        delivery_threshold: 1,\n    };\n\n    let manager = TaskManager::new(\n        message_receiver,\n        SubscriptionsView::default(),\n        event_sender,\n        validator_id,\n        thresholds,\n        message_signer,\n        validator_store,\n        broadcast_sender,\n    );\n\n    spawn(manager.run(shutdown));\n\n    let certificates = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 2);\n    let parent = certificates\n        .first()\n        .take()\n        .expect(\"Failed to create certificate\");\n\n    let child = certificates\n        .last()\n        .take()\n        .expect(\"Failed to create certificate\");\n\n    let _ = message_sender\n        .send(crate::DoubleEchoCommand::Broadcast {\n            need_gossip: false,\n            cert: child.certificate.clone(),\n            pending_id: 0,\n        })\n        .await;\n\n    let _ = message_sender\n        .send(crate::DoubleEchoCommand::Broadcast {\n            need_gossip: false,\n            cert: parent.certificate.clone(),\n            pending_id: 0,\n        })\n        .await;\n\n    let _ = message_sender\n        .send(crate::DoubleEchoCommand::Broadcast {\n            need_gossip: false,\n            cert: parent.certificate.clone(),\n            pending_id: 0,\n        })\n        .await;\n\n    assert_eq!(DOUBLE_ECHO_ACTIVE_TASKS_COUNT.get(), 1);\n}\n"
  },
  {
    "path": "crates/topos-tce-gatekeeper/Cargo.toml",
    "content": "[package]\nname = \"topos-tce-gatekeeper\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n[lints]\nworkspace = true\n\n[dependencies]\nasync-trait.workspace = true\nfutures.workspace = true\nrand.workspace = true\nthiserror.workspace = true\ntracing.workspace = true\ntokio = { workspace = true, features = [\"full\"] }\ntopos-core = { workspace = true, features = [\"uci\"] }\ntopos-p2p = { path = \"../topos-p2p\" }\n\n[dev-dependencies]\nrstest.workspace = true\ntracing-subscriber = { workspace = true, features = [\"env-filter\", \"fmt\"] }\ntest-log.workspace = true\nenv_logger.workspace = true\n"
  },
  {
    "path": "crates/topos-tce-gatekeeper/src/builder.rs",
    "content": "use std::future::IntoFuture;\n\nuse futures::{future::BoxFuture, FutureExt};\nuse tokio::sync::mpsc;\n\nuse crate::{client::GatekeeperClient, Gatekeeper, GatekeeperError};\n\n#[derive(Default)]\npub struct GatekeeperBuilder {}\n\nimpl IntoFuture for GatekeeperBuilder {\n    type Output = Result<(GatekeeperClient, Gatekeeper), GatekeeperError>;\n\n    type IntoFuture = BoxFuture<'static, Self::Output>;\n\n    fn into_future(self) -> Self::IntoFuture {\n        let (shutdown_channel, shutdown) = mpsc::channel(1);\n\n        futures::future::ok((\n            GatekeeperClient { shutdown_channel },\n            Gatekeeper {\n                shutdown,\n                ..Gatekeeper::default()\n            },\n        ))\n        .boxed()\n    }\n}\n"
  },
  {
    "path": "crates/topos-tce-gatekeeper/src/client.rs",
    "content": "use crate::GatekeeperError;\nuse tokio::sync::{mpsc, oneshot};\n\n#[derive(Clone)]\npub struct GatekeeperClient {\n    pub(crate) shutdown_channel: mpsc::Sender<oneshot::Sender<()>>,\n}\n\nimpl GatekeeperClient {\n    pub async fn shutdown(&self) -> Result<(), GatekeeperError> {\n        let (sender, receiver) = oneshot::channel();\n        self.shutdown_channel\n            .send(sender)\n            .await\n            .map_err(GatekeeperError::ShutdownCommunication)?;\n\n        Ok(receiver.await?)\n    }\n}\n"
  },
  {
    "path": "crates/topos-tce-gatekeeper/src/lib.rs",
    "content": "use std::{future::IntoFuture, time::Duration};\n\nuse builder::GatekeeperBuilder;\nuse futures::{future::BoxFuture, FutureExt};\nuse thiserror::Error;\nuse tokio::{\n    sync::{mpsc, oneshot},\n    time,\n};\nuse tracing::error;\n\nmod builder;\nmod client;\n#[cfg(test)]\nmod tests;\n\npub use client::GatekeeperClient;\nuse tracing::{info, warn};\n\npub struct Gatekeeper {\n    pub(crate) shutdown: mpsc::Receiver<oneshot::Sender<()>>,\n    pub(crate) tick_duration: Duration,\n}\n\nimpl Default for Gatekeeper {\n    fn default() -> Self {\n        let (_shutdown_channel, shutdown) = mpsc::channel(1);\n        let tick_duration = Duration::from_secs(Self::DEFAULT_TICK_DURATION);\n\n        Self {\n            shutdown,\n            tick_duration,\n        }\n    }\n}\n\nimpl IntoFuture for Gatekeeper {\n    type Output = Result<(), GatekeeperError>;\n\n    type IntoFuture = BoxFuture<'static, Self::Output>;\n\n    fn into_future(mut self) -> Self::IntoFuture {\n        async move {\n            let mut interval = time::interval(self.tick_duration);\n\n            let shutdowned: Option<oneshot::Sender<()>> = loop {\n                tokio::select! {\n                    _ = interval.tick() => {}\n                    sender = self.shutdown.recv() => {\n                        break sender;\n                    }\n                }\n            };\n\n            if let Some(sender) = shutdowned {\n                info!(\"Shutting down gatekeeper...\");\n                _ = sender.send(());\n            } else {\n                warn!(\"Shutting down gatekeeper due to error...\");\n            }\n\n            Ok(())\n        }\n        .boxed()\n    }\n}\n\nimpl Gatekeeper {\n    pub(crate) const DEFAULT_TICK_DURATION: u64 = 10;\n\n    pub fn builder() -> GatekeeperBuilder {\n        GatekeeperBuilder::default()\n    }\n}\n\n#[derive(Debug, Error)]\npub enum GatekeeperError {\n    #[error(\"Unable to receive expected response from Gatekeeper: {0}\")]\n    ResponseChannel(#[from] oneshot::error::RecvError),\n\n    #[error(\"Unable to execute command on the Gatekeeper: {0}\")]\n    InvalidCommand(String),\n\n    #[error(\"Unable to execute shutdown on the Gatekeeper: {0}\")]\n    ShutdownCommunication(mpsc::error::SendError<oneshot::Sender<()>>),\n\n    #[error(\"The command produce no update\")]\n    NoUpdate,\n}\n"
  },
  {
    "path": "crates/topos-tce-gatekeeper/src/tests.rs",
    "content": "use std::future::IntoFuture;\n\nuse rstest::fixture;\nuse test_log::test;\nuse tokio::spawn;\nuse topos_p2p::PeerId;\n\nuse crate::{client::GatekeeperClient, Gatekeeper};\n\n#[test(tokio::test)]\nasync fn can_start_and_stop() -> Result<(), Box<dyn std::error::Error>> {\n    let (client, server) = Gatekeeper::builder().await?;\n\n    let handler = spawn(server.into_future());\n\n    client.shutdown().await?;\n\n    assert!(handler.is_finished());\n\n    Ok(())\n}\n\n#[fixture]\nasync fn gatekeeper() -> GatekeeperClient {\n    let (client, server) = Gatekeeper::builder().await.unwrap();\n\n    spawn(server.into_future());\n\n    client\n}\n\n#[fixture]\nfn peer_list(#[default(10)] number: usize) -> Vec<PeerId> {\n    (0..number)\n        .map(|i| {\n            topos_p2p::utils::local_key_pair(Some(i as u8))\n                .public()\n                .to_peer_id()\n        })\n        .collect()\n}\n"
  },
  {
    "path": "crates/topos-tce-proxy/Cargo.toml",
    "content": "[package]\nname = \"topos-tce-proxy\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n[lints]\nworkspace = true\n\n[dependencies]\ntopos-core = { workspace = true, features = [\"uci\", \"api\"] }\ntopos-telemetry = { path = \"../topos-telemetry\" }\n\nasync-stream.workspace = true\nbackoff.workspace = true\nbyteorder.workspace = true\nfutures.workspace = true\nhex.workspace = true\nhyper.workspace = true\nserde = { workspace = true, features = [\"derive\"] }\nserde_json.workspace = true\nthiserror.workspace = true\ntokio = { workspace = true, features = [\n    \"io-util\",\n    \"io-std\",\n    \"macros\",\n    \"rt\",\n    \"rt-multi-thread\",\n    \"fs\",\n    \"time\",\n    \"sync\",\n] }\ntokio-stream.workspace = true\ntonic = { workspace = true, features = [\"transport\"] }\ntracing-subscriber = { workspace = true, features = [\"env-filter\", \"json\", \"ansi\", \"fmt\"] }\ntracing.workspace = true\nuuid.workspace = true\ntracing-opentelemetry.workspace = true\nopentelemetry.workspace = true\nbase64ct.workspace = true\n\n[dev-dependencies]\nlibp2p.workspace = true\ntopos-tce = { path = \"../topos-tce\" }\nrstest = { workspace = true, features = [\"async-timeout\"] }\ntest-log.workspace = true\nenv_logger.workspace = true\nserial_test.workspace = true\nbyteorder = \"1.4.3\"\ndockertest = \"0.3.1\"\ntopos-tce-storage = { path = \"../topos-tce-storage\" }\ntopos-test-sdk = { path = \"../topos-test-sdk/\" }\n"
  },
  {
    "path": "crates/topos-tce-proxy/src/client.rs",
    "content": "use crate::{Error, TceProxyEvent};\nuse base64ct::{Base64, Encoding};\nuse futures::stream::FuturesOrdered;\nuse opentelemetry::trace::FutureExt;\nuse std::collections::HashMap;\nuse std::time::Duration;\nuse tokio::sync::{mpsc, oneshot};\nuse tokio_stream::StreamExt;\n\nuse tonic::IntoRequest;\nuse topos_core::api::grpc::checkpoints::{TargetCheckpoint, TargetStreamPosition};\nuse topos_core::api::grpc::tce::v1::{\n    GetLastPendingCertificatesRequest, GetSourceHeadRequest, GetSourceHeadResponse,\n};\nuse topos_core::{\n    api::grpc::tce::v1::{\n        watch_certificates_request, watch_certificates_response, SubmitCertificateRequest,\n        WatchCertificatesRequest, WatchCertificatesResponse,\n    },\n    uci::{Certificate, SubnetId},\n};\nuse tracing::{debug, error, info, info_span, warn, Instrument, Span};\nuse tracing_opentelemetry::OpenTelemetrySpanExt;\n\nconst CERTIFICATE_OUTBOUND_CHANNEL_SIZE: usize = 100;\nconst CERTIFICATE_INBOUND_CHANNEL_SIZE: usize = 100;\nconst TCE_PROXY_COMMAND_CHANNEL_SIZE: usize = 100;\n\n// Maximum backoff retry timeout in seconds (1 hour)\nconst TCE_SUBMIT_CERTIFICATE_BACKOFF_TIMEOUT: Duration = Duration::from_secs(3600);\n\npub(crate) enum TceClientCommand {\n    // Get head certificate that was sent to the TCE node for this subnet\n    GetSourceHead {\n        subnet_id: SubnetId,\n        sender: oneshot::Sender<Result<(Certificate, u64), Error>>,\n    },\n    // Get map of subnet id->last pending certificate\n    GetLastPendingCertificates {\n        subnet_ids: Vec<SubnetId>,\n        #[allow(clippy::type_complexity)]\n        sender: oneshot::Sender<Result<HashMap<SubnetId, Option<(Certificate, u64)>>, Error>>,\n    },\n    // Open the stream to the TCE node\n    // Mark the position from which TCE node certificates should be retrieved\n    OpenStream {\n        target_checkpoint: TargetCheckpoint,\n    },\n    // Send generated certificate to the TCE node\n    SendCertificate {\n        cert: Box<Certificate>,\n        span: tracing::Span,\n    },\n    Shutdown,\n}\n\n/// Create new backoff library error based on error that happened\npub(crate) fn new_tce_proxy_backoff_err<E: std::fmt::Display>(err: E) -> backoff::Error<E> {\n    // Retry according to backoff policy\n    backoff::Error::Transient {\n        err,\n        retry_after: None,\n    }\n}\n\npub struct TceClient {\n    subnet_id: topos_core::uci::SubnetId,\n    tce_endpoint: String,\n    command_sender: mpsc::Sender<TceClientCommand>,\n}\n\nimpl TceClient {\n    pub async fn open_stream(&self, positions: Vec<TargetStreamPosition>) -> Result<(), Error> {\n        self.command_sender\n            .send(TceClientCommand::OpenStream {\n                target_checkpoint: TargetCheckpoint {\n                    target_subnet_ids: vec![self.subnet_id],\n                    positions,\n                },\n            })\n            .await\n            .map_err(|_| Error::InvalidChannelError)?;\n        Ok(())\n    }\n    pub async fn send_certificate(&mut self, cert: Certificate) -> Result<(), Error> {\n        self.command_sender\n            .send(TceClientCommand::SendCertificate {\n                cert: Box::new(cert),\n                span: tracing::Span::current(),\n            })\n            .with_current_context()\n            .in_current_span()\n            .await\n            .map_err(|_| Error::InvalidChannelError)?;\n        Ok(())\n    }\n    pub async fn close(&mut self) -> Result<(), Error> {\n        self.command_sender\n            .send(TceClientCommand::Shutdown)\n            .await\n            .map_err(|_| Error::InvalidChannelError)?;\n        Ok(())\n    }\n\n    // Return source head and position of the certificate\n    pub async fn get_source_head(&mut self) -> Result<(Certificate, u64), Error> {\n        #[allow(clippy::type_complexity)]\n        let (sender, receiver): (\n            oneshot::Sender<Result<(Certificate, u64), Error>>,\n            oneshot::Receiver<Result<(Certificate, u64), Error>>,\n        ) = oneshot::channel();\n        self.command_sender\n            .send(TceClientCommand::GetSourceHead {\n                subnet_id: self.subnet_id,\n                sender,\n            })\n            .await\n            .map_err(|_| Error::InvalidChannelError)?;\n\n        receiver.await.map_err(|_| Error::InvalidChannelError)?\n    }\n\n    pub async fn get_last_pending_certificates(\n        &mut self,\n        subnet_ids: Vec<SubnetId>,\n    ) -> Result<HashMap<SubnetId, Option<(Certificate, u64)>>, Error> {\n        #[allow(clippy::type_complexity)]\n        let (sender, receiver) = oneshot::channel();\n        self.command_sender\n            .send(TceClientCommand::GetLastPendingCertificates { subnet_ids, sender })\n            .await\n            .map_err(|_| Error::InvalidChannelError)?;\n\n        receiver.await.map_err(|_| Error::InvalidChannelError)?\n    }\n\n    pub fn get_subnet_id(&self) -> SubnetId {\n        self.subnet_id\n    }\n\n    pub fn get_tce_endpoint(&self) -> &str {\n        self.tce_endpoint.as_str()\n    }\n}\n\n#[derive(Default)]\npub struct TceClientBuilder {\n    tce_endpoint: Option<String>,\n    subnet_id: Option<SubnetId>,\n    tce_proxy_event_sender: Option<mpsc::Sender<TceProxyEvent>>,\n}\n\nimpl TceClientBuilder {\n    pub fn set_tce_endpoint<T: ToString>(mut self, endpoint: T) -> Self {\n        self.tce_endpoint = Some(endpoint.to_string());\n        self\n    }\n\n    pub fn set_subnet_id(mut self, subnet_id: SubnetId) -> Self {\n        self.subnet_id = Some(subnet_id);\n        self\n    }\n\n    pub fn set_proxy_event_sender(\n        mut self,\n        tce_proxy_event_sender: mpsc::Sender<TceProxyEvent>,\n    ) -> Self {\n        self.tce_proxy_event_sender = Some(tce_proxy_event_sender);\n        self\n    }\n\n    pub async fn build_and_launch(\n        self,\n        mut shutdown: mpsc::Receiver<oneshot::Sender<()>>,\n    ) -> Result<\n        (\n            TceClient,\n            impl futures::stream::Stream<Item = (Certificate, TargetStreamPosition)>,\n        ),\n        Error,\n    > {\n        // Channel used to pass received certificates (certificates pushed TCE node) from the TCE client to the application\n        let (inbound_certificate_sender, inbound_certificate_receiver) =\n            mpsc::channel::<(Certificate, TargetStreamPosition)>(CERTIFICATE_INBOUND_CHANNEL_SIZE);\n\n        let tce_endpoint = self\n            .tce_endpoint\n            .as_ref()\n            .ok_or(Error::InvalidTceEndpoint)?\n            .clone();\n        // Connect to tce node service using backoff strategy\n        let mut tce_grpc_client =\n            match crate::connect_to_tce_service_with_retry(tce_endpoint.clone()).await {\n                Ok(client) => {\n                    info!(\"Connected to the TCE service at {}\", &tce_endpoint);\n                    client\n                }\n                Err(e) => {\n                    error!(\"Unable to connect to tce client: {}\", e);\n                    return Err(e);\n                }\n            };\n\n        // Channel used to initiate watch_certificates_request::Command that will be sent to the TCE through stream\n        let (outbound_stream_command_sender, mut outbound_stream_command_receiver) =\n            mpsc::channel::<WatchCertificatesRequest>(CERTIFICATE_OUTBOUND_CHANNEL_SIZE);\n\n        // Outbound stream used to send watch_certificates_request::Command to the TCE node service\n        let outbound_watch_certificates_stream = async_stream::stream! {\n            loop {\n                while let Some(request) = outbound_stream_command_receiver.recv().await {\n                    yield request;\n                }\n            }\n        };\n\n        // Call TCE service watch certificates, get inbound response stream\n        let mut inbound_watch_certificates_stream: tonic::Streaming<WatchCertificatesResponse> =\n            tce_grpc_client\n                .watch_certificates(outbound_watch_certificates_stream)\n                .await\n                .map(|r| r.into_inner())?;\n\n        // Channel used to shut down task for inbound stream responses processing\n        let (inbound_shutdown_sender, mut inbound_shutdown_receiver) =\n            mpsc::unbounded_channel::<()>();\n\n        let subnet_id = *self.subnet_id.as_ref().ok_or(Error::InvalidSubnetId)?;\n\n        let tce_proxy_event_sender = self.tce_proxy_event_sender.clone();\n\n        // Run task and process inbound watch certificate stream responses\n        tokio::spawn(async move {\n            // Listen for feedback from TCE service (WatchCertificatesResponse)\n            info!(\n                \"Entering watch certificate response loop for tce node {} for subnet id {}\",\n                &tce_endpoint, &subnet_id\n            );\n            loop {\n                tokio::select! {\n                    Some(response) = inbound_watch_certificates_stream.next() => {\n                        match response {\n                            Ok(watch_certificate_response) => match watch_certificate_response.event {\n                                // Received CertificatePushed event from TCE (new certificate has been received from TCE)\n                                Some(watch_certificates_response::Event::CertificatePushed(\n                                    mut certificate_pushed\n                                )) => {\n                                    info!(\"Certificate {:?} received from the TCE\", &certificate_pushed);\n                                    if let Some(certificate) = certificate_pushed.certificate.take() {\n                                        let cert: Certificate = match certificate.try_into() {\n                                            Ok(c) => c,\n                                            Err(e) => {\n                                                error!(\"Invalid Certificate conversion for  certificate: {e}\");\n                                                continue;\n                                            }\n                                        };\n                                        // Currently only one target stream position is expected\n                                        let position: TargetStreamPosition = match certificate_pushed.positions.first() {\n                                            Some(p) => {\n                                                if let Ok(p) = TryInto::<TargetStreamPosition>::try_into(p.clone()) {\n                                                    p\n                                                } else {\n                                                    error!(\"Invalid target stream position for certificate id {}\",cert.id);\n                                                    continue;\n                                                }\n                                            },\n                                            None => {\n                                                error!(\"Invalid target stream position for certificate id {}\",cert.id);\n                                                continue;\n                                            }\n                                        };\n                                        if let Err(e) = inbound_certificate_sender\n                                            .send((cert, position))\n                                            .await\n                                        {\n                                            error!(\n                                                \"Unable to pass received certificate to application: {e}\"\n                                            )\n                                        }\n                                    }\n                                }\n                                // Confirmation from TCE that stream has been opened\n                                Some(watch_certificates_response::Event::StreamOpened(stream_opened)) => {\n                                    info!(\n                                        \"Successfully opened the Certificate stream with the TCE at {} for the subnet(s): {:?}\",\n                                         &tce_endpoint, stream_opened.subnet_ids\n                                    );\n                                }\n                                None => {\n                                    warn!(\n                                        \"Watch certificate stream received None object from the TCE node at {}\", &tce_endpoint\n                                    );\n                                }\n                            },\n                            Err(e) => {\n                                error!(\n                                    \"Failed to open the Certificate stream with the TCE node at {} for the subnet(s): {:?}: {}\",\n                                    &tce_endpoint, &subnet_id, e.to_string()\n                                );\n                                // Send warning to restart TCE proxy\n                                if let Some(tce_proxy_event_sender) = tce_proxy_event_sender.clone() {\n                                    if let Err(e) = tce_proxy_event_sender.send(TceProxyEvent::WatchCertificatesChannelFailed).await {\n                                          error!(\"Unable to send watch certificates channel failed signal: {e}\");\n                                    }\n                                }\n                            }\n                        }\n                    }\n                    Some(_) = inbound_shutdown_receiver.recv() => {\n                        info!(\"Finishing watch certificates task...\");\n                        // Finish this task listener\n                        break;\n                    }\n                }\n            }\n            info!(\n                \"Finishing watch certificate task for tce node {} subnet_id {:?}\",\n                &tce_endpoint, &subnet_id\n            );\n        });\n\n        // Channel used to pass commands from the application to the TCE proxy\n        // To close to chanel worker task, send None as Certificate\n        let (tce_command_sender, mut tce_command_receiver) =\n            mpsc::channel::<TceClientCommand>(TCE_PROXY_COMMAND_CHANNEL_SIZE);\n\n        // Run task for sending certificates to the TCE stream\n        let tce_endpoint = self\n            .tce_endpoint\n            .as_ref()\n            .ok_or(Error::InvalidTceEndpoint)?\n            .clone();\n\n        let tce_proxy_event_sender = self.tce_proxy_event_sender.clone();\n\n        tokio::spawn(async move {\n            let mut certificate_to_send = FuturesOrdered::new();\n            info!(\n                \"Entering tce proxy command loop for stream {}\",\n                &tce_endpoint\n            );\n            loop {\n                tokio::select! {\n                    Some(result) = certificate_to_send.next() => {\n                        match result {\n                            Ok(()) => {\n                                // All good, after one certificate is submitted carry on\n                                continue;\n                            }\n                            Err(e) => {\n                                // Backoff maximum period timeout. We need to restart sequencer.\n                                error!(\"Failed to submit certificate to the tce network, backoff timeout with error: {e}. Restarting sequencer...\");\n                                if let Some(tce_proxy_event_sender) = tce_proxy_event_sender.clone() {\n                                    if let Err(e) = tce_proxy_event_sender.send(TceProxyEvent::TceServiceFailure).await {\n                                          error!(\"Unable to send tce communication failure signal: {e}\");\n                                    }\n                                }\n                            }\n                        }\n                    }\n                    Some(sender) = shutdown.recv() => {\n                        info!(\"Shutdown tce proxy command received...\");\n                        if !certificate_to_send.is_empty() {\n                            info!(\"Waiting for all certificates to be sent...\");\n                            while certificate_to_send.next().await.is_some() {}\n                        }\n\n                        inbound_shutdown_sender.send(()).expect(\"valid channel for shutting down task\");\n\n                        sender.send(()).expect(\"valid channel for shutting down task\");\n                        break;\n                    }\n                    command = tce_command_receiver.recv() => {\n                        match command {\n                           Some(TceClientCommand::SendCertificate {cert, span}) =>  {\n                                // Send new ceritficate to the TCE network\n                                let cert_id = cert.id;\n                                let previous_cert_id = cert.prev_id;\n                                let span = info_span!(parent: &span, \"SendCertificate\", %cert_id, %previous_cert_id, %tce_endpoint);\n                                let context = span.context();\n                                let tce_endpoint = tce_endpoint.clone();\n                                let tce_grpc_client = tce_grpc_client.clone();\n                                let context_backoff = context.clone();\n                                // TODO: Push certificates to the TCE one by one\n                                certificate_to_send.push_back(async move {\n                                    debug!(\"Submitting certificate {} to the TCE using backoff strategy...\", &tce_endpoint);\n                                    let cert = cert.clone();\n                                    let op = || async {\n                                        let mut tce_grpc_client = tce_grpc_client.clone();\n                                        let mut request = SubmitCertificateRequest {\n                                            certificate: Some(topos_core::api::grpc::uci::v1::Certificate::from(*(cert.clone()))),\n                                        }.into_request();\n\n                                        let mut span_context = topos_telemetry::TonicMetaInjector(request.metadata_mut());\n                                        span_context.inject(&context_backoff);\n\n                                        tce_grpc_client\n                                        .submit_certificate(request)\n                                        .with_context(context_backoff.clone())\n                                        .instrument(Span::current())\n                                        .await\n                                        .map(|_response| {\n                                            info!(\"Successfully submitted the Certificate {} (previous: {}) to the TCE at {}\",\n                                                &cert_id, &previous_cert_id, &tce_endpoint);\n                                        })\n                                        .map_err(|e| {\n                                            error!(\"Failed to submit the Certificate to the TCE at {}, error: {e}\", &tce_endpoint);\n                                            new_tce_proxy_backoff_err(e)\n                                        })\n                                    };\n\n                                    let backoff_configuration = backoff::ExponentialBackoff {\n                                        max_elapsed_time: Some(TCE_SUBMIT_CERTIFICATE_BACKOFF_TIMEOUT),\n                                        ..Default::default()\n                                    };\n                                    backoff::future::retry(backoff_configuration, op)\n                                        .await\n                                        .map_err(|e| {\n                                            error!(\"Failed to submit certificate to the TCE: {e}\");\n                                           e\n                                        })\n                                }\n                                .with_context(context)\n                                .instrument(span));\n                            }\n                            Some(TceClientCommand::OpenStream {target_checkpoint}) =>  {\n                                // Send command to TCE to open stream with my subnet id\n                                info!(\n                                    \"Sending OpenStream command to the TCE node at {} for the Subnet {}\",\n                                    &tce_endpoint, &subnet_id\n                                );\n                                if let Err(e) = outbound_stream_command_sender\n                                    .send(\n                                            watch_certificates_request::OpenStream {\n                                                target_checkpoint:\n                                                    Some(target_checkpoint.into()),\n                                                source_checkpoint: None\n                                            }.into(),\n                                    )\n                                    .await\n                                    {\n                                        error!(\n                                            \"Unable to send OpenStream command: {e}\"\n                                        )\n                                    }\n                            }\n                            Some(TceClientCommand::Shutdown) =>  {\n                                info!(\"Shutdown tce proxy command received...\");\n                                inbound_shutdown_sender.send(()).expect(\"valid channel for shutting down task\");\n                                break;\n                            }\n                            Some(TceClientCommand::GetSourceHead {subnet_id, sender}) =>  {\n                                    let result: Result<(Certificate, u64), Error> = match tce_grpc_client\n                                    .get_source_head(GetSourceHeadRequest {\n                                        subnet_id: Some(subnet_id.into())\n                                    })\n                                    .await\n                                    .map(|r| r.into_inner()) {\n                                        Ok(GetSourceHeadResponse {\n                                            position: Some(pos),\n                                            certificate: Some(cert),\n                                        }) => {\n                                            info!(\"Source head certificate acquired from tce, position: {}, certificate: {:?}\", pos.position, &cert);\n                                            Ok((cert.try_into().map_err(|_| Error::InvalidCertificate)?,\n                                                pos.position))\n                                        },\n                                        Ok(_) => {\n                                            Err(Error::SourceHeadEmpty{subnet_id})\n                                        },\n                                        Err(e) => {\n                                            Err(Error::UnableToGetSourceHeadCertificate{subnet_id, details: e.to_string()})\n                                        }\n                                    };\n\n                                if sender.send(result).is_err() {\n                                    error!(\"Unable to pass result of the source head, channel failed\");\n                                };\n                            }\n                            Some(TceClientCommand::GetLastPendingCertificates { subnet_ids, sender }) => {\n                                let result =\n                                    match tce_grpc_client\n                                        .get_last_pending_certificates(GetLastPendingCertificatesRequest {\n                                            subnet_ids: subnet_ids.into_iter().map(Into::into).collect(),\n                                        })\n                                        .await\n                                        .map(|r| r.into_inner())\n                                    {\n                                        Ok(response) => {\n                                            let result = response\n                                                .last_pending_certificate\n                                                .into_iter()\n                                                .map(|(subnet_id, last_pending_certificate)| {\n                                                    let subnet_id: SubnetId = TryInto::<SubnetId>::try_into(\n                                                        Base64::decode_vec(subnet_id.as_str()).map_err(|_| Error::InvalidSubnetId)?.as_slice(),\n                                                    )\n                                                    .map_err(|_| Error::InvalidSubnetId)?;\n\n                                                    let certificate_and_index: Option<(Certificate, u64)> =\n                                                        match last_pending_certificate.value {\n                                                            Some(certificate) => Some(\n                                                                Certificate::try_from(certificate)\n                                                                .map(|certificate| (certificate, last_pending_certificate.index))\n                                                                .map_err(\n                                                                    |e| Error::UnableToGetLastPendingCertificates {\n                                                                        details: e.to_string(),\n                                                                        subnet_id,\n                                                                    },\n                                                                )?,\n                                                            ),\n                                                            None => None,\n                                                        };\n\n\n                                                    Ok((\n                                                        subnet_id,\n                                                        certificate_and_index\n                                                    ))\n                                                })\n                                                .collect::<Result<HashMap<SubnetId, Option<(Certificate, u64)>>, Error>>()?;\n                                            Ok(result)\n                                        }\n                                        Err(e) => Err(Error::UnableToGetLastPendingCertificates {\n                                            subnet_id,\n                                            details: e.to_string(),\n                                        }),\n                                    };\n\n                                if sender.send(result).is_err() {\n                                    error!(\"Unable to pass result for the last pending certificates, channel failed\");\n                                };\n                            }\n                            None => {\n                                error!(\"Unexpected termination of the TCE proxy service of the Sequencer\");\n                                break;\n                            }\n                        }\n                    }\n                }\n            }\n            info!(\n                \"Finished submit certificate loop for stream {}\",\n                &tce_endpoint\n            );\n            Result::<(), Error>::Ok(())\n        });\n\n        Ok((\n            TceClient {\n                subnet_id: self.subnet_id.ok_or(Error::InvalidSubnetId)?,\n                tce_endpoint: self.tce_endpoint.ok_or(Error::InvalidTceEndpoint)?,\n                command_sender: tce_command_sender,\n            },\n            tokio_stream::wrappers::ReceiverStream::new(inbound_certificate_receiver),\n        ))\n    }\n}\n"
  },
  {
    "path": "crates/topos-tce-proxy/src/lib.rs",
    "content": "//!\n//! Handles incoming events from the friendly TCE node\n//!\npub mod client;\npub mod worker;\n\nuse opentelemetry::Context;\nuse std::time::Duration;\nuse tonic::transport::channel;\nuse topos_core::api::grpc::checkpoints::TargetStreamPosition;\nuse topos_core::{\n    api::grpc::tce::v1::api_service_client::ApiServiceClient,\n    uci::{Certificate, SubnetId},\n};\nuse tracing::{error, info};\n\n// Maximum backoff retry timeout in seconds (12 hours)\nconst TCE_CONNECT_BACKOFF_TIMEOUT: Duration = Duration::from_secs(12 * 3600);\n\n#[derive(Debug, thiserror::Error)]\npub enum Error {\n    #[error(\"Tonic transport error\")]\n    TonicTransportError {\n        #[from]\n        source: tonic::transport::Error,\n    },\n    #[error(\"Tonic error\")]\n    TonicStatusError {\n        #[from]\n        source: tonic::Status,\n    },\n    #[error(\"Invalid channel error\")]\n    InvalidChannelError,\n    #[error(\"Invalid tce endpoint error\")]\n    InvalidTceEndpoint,\n    #[error(\"Invalid subnet id error\")]\n    InvalidSubnetId,\n    #[error(\"Invalid certificate error\")]\n    InvalidCertificate,\n    #[error(\"Hex conversion error {source}\")]\n    HexConversionError {\n        #[from]\n        source: hex::FromHexError,\n    },\n    #[error(\"Unable to get source head certificate for subnet id {subnet_id}: {details}\")]\n    UnableToGetSourceHeadCertificate {\n        subnet_id: SubnetId,\n        details: String,\n    },\n    #[error(\"Certificate source head empty for subnet id {subnet_id}\")]\n    SourceHeadEmpty { subnet_id: SubnetId },\n    #[error(\"Unable to get last pending certificates for subnet id {subnet_id}: {details}\")]\n    UnableToGetLastPendingCertificates {\n        subnet_id: SubnetId,\n        details: String,\n    },\n}\n\n/// Control the TceProxy\n#[derive(Debug)]\npub enum TceProxyCommand {\n    /// Submit a newly created certificate to the TCE\n    SubmitCertificate {\n        cert: Box<Certificate>,\n        ctx: Context,\n    },\n\n    /// Shutdown command\n    Shutdown(tokio::sync::oneshot::Sender<()>),\n}\n\n/// Events related to synchronizing certificates with the TCE network.\n#[derive(Debug, Clone)]\npub enum TceProxyEvent {\n    /// New delivered certificate (and its position) fetched from the TCE network\n    NewDeliveredCerts {\n        certificates: Vec<(Certificate, u64)>,\n        ctx: Context,\n    },\n    /// Failed watching certificates channel. Requires a restart of the sequencer tce proxy to recover.\n    WatchCertificatesChannelFailed,\n    /// Failure in communication with the TCE grpc service. Sequencer needs to be restarted\n    TceServiceFailure,\n}\n\n/// Configuration data for the TCE proxy, used to configure the `TceProxyWorker`.\npub struct TceProxyConfig {\n    /// The [`SubnetId`] this config handles certificate proxying for.\n    pub subnet_id: SubnetId,\n    /// The GRPC endpoint where the Sequencer is expecting to find a TCE node.\n    pub tce_endpoint: String,\n    /// The positions in the index of the known Certificates.\n    pub positions: Vec<TargetStreamPosition>,\n}\n\nasync fn connect_to_tce_service_with_retry(\n    endpoint: String,\n) -> Result<ApiServiceClient<tonic::transport::channel::Channel>, Error> {\n    info!(\n        \"Connecting to the TCE at {} using the exponential backoff strategy...\",\n        endpoint\n    );\n    let op = || async {\n        let channel = channel::Endpoint::from_shared(endpoint.clone())?\n            .connect()\n            .await\n            .map_err(|e| {\n                error!(\"Failed to connect to the TCE at {}: {e}\", &endpoint);\n                e\n            })?;\n        Ok(ApiServiceClient::new(channel))\n    };\n    let backoff_configuration = backoff::ExponentialBackoff {\n        max_elapsed_time: Some(TCE_CONNECT_BACKOFF_TIMEOUT),\n        ..Default::default()\n    };\n    backoff::future::retry(backoff_configuration, op)\n        .await\n        .map_err(|e| {\n            error!(\"Failed to connect to the TCE at {}: {e}\", &endpoint);\n            Error::TonicTransportError { source: e }\n        })\n}\n"
  },
  {
    "path": "crates/topos-tce-proxy/src/worker.rs",
    "content": "use crate::{client::TceClientBuilder, Error, TceProxyCommand, TceProxyConfig, TceProxyEvent};\nuse opentelemetry::trace::FutureExt;\nuse tokio::sync::{mpsc, oneshot};\nuse tokio_stream::StreamExt;\nuse topos_core::uci::Certificate;\nuse tracing::{error, info, info_span, Instrument, Span};\nuse tracing_opentelemetry::OpenTelemetrySpanExt;\n\n/// Proxy with the TCE\n///\n/// Performs two tasks:\n/// 1) Fetch the certificates that were delivered from the TCE\n/// 2) Submit the new certificates to the TCE\npub struct TceProxyWorker {\n    /// The [`TceProxyConfig`] used to setup this worker.\n    pub config: TceProxyConfig,\n    commands: mpsc::Sender<TceProxyCommand>,\n    events: mpsc::Receiver<TceProxyEvent>,\n}\n\nimpl TceProxyWorker {\n    /// Construct a new [`TceProxyWorker`] with a 128 items deep channel to send commands to and receive events from a TCE node on the given subnet.\n    /// The worker holds a [`crate::client::TceClient`]\n    pub async fn new(config: TceProxyConfig) -> Result<(Self, Option<(Certificate, u64)>), Error> {\n        let (command_sender, mut command_rcv) = mpsc::channel::<TceProxyCommand>(128);\n        let (evt_sender, evt_rcv) = mpsc::channel::<TceProxyEvent>(128);\n        let (tce_client_shutdown_channel, shutdown_receiver) =\n            mpsc::channel::<oneshot::Sender<()>>(1);\n\n        let (mut tce_client, mut receiving_certificate_stream) = TceClientBuilder::default()\n            .set_subnet_id(config.subnet_id)\n            .set_tce_endpoint(&config.tce_endpoint)\n            .set_proxy_event_sender(evt_sender.clone())\n            .build_and_launch(shutdown_receiver)\n            .await?;\n\n        tce_client.open_stream(config.positions.clone()).await?;\n\n        // Get pending certificates from the TCE node. Source head certificate\n        // is latest pending certificate for this subnet\n        let source_last_pending_certificate: Option<(Certificate, u64)> = match tce_client\n            .get_last_pending_certificates(vec![tce_client.get_subnet_id()])\n            .await\n        {\n            Ok(mut pending_certificates) => pending_certificates\n                .remove(&tce_client.get_subnet_id())\n                .unwrap_or_default(),\n            Err(e) => {\n                error!(\"Unable to retrieve latest pending certificate {e}\");\n                return Err(e);\n            }\n        };\n        info!(\n            \"Last pending certificate: {:?}\",\n            source_last_pending_certificate\n        );\n\n        let source_last_delivered_certificate = match tce_client.get_source_head().await {\n            Ok(certificate) => Some(certificate),\n            Err(Error::SourceHeadEmpty { subnet_id: _ }) => {\n                // This is also OK, TCE node does not have any data about certificates\n                // We should start certificate production from scratch\n                None\n            }\n            Err(e) => {\n                return Err(e);\n            }\n        };\n        info!(\n            \"Last delivered certificate: {:?}\",\n            source_last_delivered_certificate\n        );\n\n        let source_last_certificate = if source_last_pending_certificate.is_none() {\n            // There are no pending certificates on the TCE\n            // Block height to get next from subnet is position +1\n            source_last_delivered_certificate\n        } else {\n            // Last generated is pending certificate\n            // Block height to get next from subnet is position of the last delivered certificate + index of the pending certificate\n            let delivered_certificate_position = source_last_delivered_certificate\n                .map(|(_cert, position)| position)\n                .unwrap_or_default();\n            source_last_pending_certificate\n                .map(|(cert, index)| (cert, delivered_certificate_position + index))\n        };\n\n        tokio::spawn(async move {\n            info!(\n                \"Starting the TCE proxy connected to the TCE at {}\",\n                tce_client.get_tce_endpoint()\n            );\n            loop {\n                tokio::select! {\n                    // process TCE proxy commands received from application\n                    Some(cmd) = command_rcv.recv() => {\n                        match cmd {\n                            TceProxyCommand::SubmitCertificate{cert, ctx} => {\n                                let span = info_span!(\"Sequencer TCE Proxy\");\n                                span.set_parent(ctx);\n                                async {\n                                    info!(\"Submitting new certificate to the TCE network: {}\", &cert.id);\n                                    if let Err(e) = tce_client.send_certificate(*cert).await {\n                                        error!(\"Failure on the submission of the Certificate to the TCE client: {e}\");\n                                    }\n                                }\n                                .with_context(span.context())\n                                .instrument(span)\n                                .await;\n                            }\n                            TceProxyCommand::Shutdown(sender) => {\n                                info!(\"Received TceProxyCommand::Shutdown command, closing tce client...\");\n                                let (killer, waiter) = oneshot::channel::<()>();\n                                tce_client_shutdown_channel.send(killer).await.unwrap();\n                                waiter.await.unwrap();\n\n                                 _ = sender.send(());\n                                break;\n                            }\n                        }\n                    }\n\n                     // Process certificates received from the TCE node\n                    Some((cert, target_stream_position)) = receiving_certificate_stream.next() => {\n                        let span = info_span!(\"PushCertificate\");\n                        async {\n                            info!(\"Received certificate from TCE {:?}, target stream position {}\", cert, target_stream_position.position);\n                            if let Err(e) = evt_sender.send(TceProxyEvent::NewDeliveredCerts {\n                                certificates: vec![(cert, target_stream_position.position)],\n                                ctx: Span::current().context()}\n                            )\n                            .await {\n                                error!(\"Unable to send NewDeliveredCerts event {e}\");\n                            }\n                        }\n                        .with_context(span.context())\n                        .instrument(span)\n                        .await;\n                    }\n                }\n            }\n            info!(\n                \"Exiting the TCE proxy worker handle loop connected to the TCE at {}\",\n                tce_client.get_tce_endpoint()\n            );\n        });\n\n        // Save channels and handles, return latest tce known certificate\n        Ok((\n            Self {\n                commands: command_sender,\n                events: evt_rcv,\n                config,\n            },\n            source_last_certificate,\n        ))\n    }\n\n    /// Send commands to TCE\n    pub async fn send_command(&self, cmd: TceProxyCommand) -> Result<(), String> {\n        match self.commands.send(cmd).await {\n            Ok(_) => Ok(()),\n            Err(e) => Err(e.to_string()),\n        }\n    }\n\n    /// Pollable (in select!) event listener\n    pub async fn next_event(&mut self) -> Result<TceProxyEvent, String> {\n        let event = self.events.recv().await;\n        Ok(event.unwrap())\n    }\n\n    /// Shut down TCE proxy\n    pub async fn shutdown(&self) -> Result<(), String> {\n        info!(\"Shutting down TCE proxy worker...\");\n        let (sender, receiver) = oneshot::channel();\n        if let Err(e) = self.commands.send(TceProxyCommand::Shutdown(sender)).await {\n            error!(\"Error sending shutdown signal to TCE worker {e}\");\n            return Err(e.to_string());\n        };\n        receiver.await.map_err(|e| e.to_string())\n    }\n}\n"
  },
  {
    "path": "crates/topos-tce-proxy/tests/tce_tests.rs",
    "content": "use base64ct::{Base64, Encoding};\nuse futures::StreamExt;\nuse rstest::*;\nuse std::collections::{HashMap, HashSet};\nuse test_log::test;\nuse tokio::sync::{mpsc, oneshot};\nuse tokio::time::Duration;\nuse topos_core::api::grpc::shared::v1::positions::SourceStreamPosition;\nuse topos_core::api::grpc::shared::v1::{\n    checkpoints::TargetCheckpoint, positions::TargetStreamPosition,\n};\nuse topos_core::api::grpc::shared::v1::{CertificateId, StarkProof, SubnetId};\nuse topos_core::api::grpc::tce::v1::LastPendingCertificate;\nuse topos_core::api::grpc::tce::v1::{\n    watch_certificates_request, watch_certificates_response,\n    watch_certificates_response::CertificatePushed, GetLastPendingCertificatesRequest,\n    GetLastPendingCertificatesResponse, GetSourceHeadRequest, GetSourceHeadResponse,\n    SubmitCertificateRequest,\n};\nuse topos_core::api::grpc::uci::v1::Certificate;\nuse topos_core::types::CertificateDelivered;\nuse topos_core::uci::SUBNET_ID_LENGTH;\nuse topos_tce_proxy::client::{TceClient, TceClientBuilder};\nuse topos_tce_proxy::worker::TceProxyWorker;\nuse topos_tce_proxy::{TceProxyCommand, TceProxyConfig, TceProxyEvent};\nuse topos_test_sdk::tce::{start_node, NodeConfig};\nuse tracing::{debug, error, info, warn};\n\nuse topos_test_sdk::{certificates::create_certificate_chain, constants::*, tce::TceContext};\n\npub const SOURCE_SUBNET_ID_1_NUMBER_OF_PREFILLED_CERTIFICATES: usize = 15;\npub const SOURCE_SUBNET_ID_2_NUMBER_OF_PREFILLED_CERTIFICATES: usize = 10;\n\n#[test(tokio::test)]\nasync fn test_tce_submit_certificate() -> Result<(), Box<dyn std::error::Error>> {\n    let mut context = start_node::partial_2(&[], NodeConfig::standalone()).await;\n\n    let source_subnet_id: SubnetId = SOURCE_SUBNET_ID_1.into();\n    let prev_certificate_id: CertificateId = CERTIFICATE_ID_1.into();\n    let certificate_id: CertificateId = CERTIFICATE_ID_2.into();\n\n    match context\n        .api_grpc_client\n        .submit_certificate(SubmitCertificateRequest {\n            certificate: Some(Certificate {\n                source_subnet_id: Some(source_subnet_id.clone()),\n                id: Some(certificate_id),\n                prev_id: Some(prev_certificate_id),\n                target_subnets: vec![],\n                state_root: [0u8; 32].to_vec(),\n                tx_root_hash: [0u8; 32].to_vec(),\n                receipts_root_hash: [0u8; 32].to_vec(),\n                verifier: 0,\n                proof: Some(StarkProof { value: Vec::new() }),\n                signature: Some(Default::default()),\n            }),\n        })\n        .await\n        .map(|r| r.into_inner())\n    {\n        Ok(response) => {\n            debug!(\"Certificate successfully submitted {:?}\", response);\n        }\n        Err(e) => {\n            error!(\"Unable to submit the certificate: {e:?}\");\n            return Err(Box::from(e));\n        }\n    };\n    info!(\"Shutting down TCE node client\");\n    context.shutdown().await?;\n    Ok(())\n}\n\n#[test(tokio::test)]\nasync fn test_tce_watch_certificates() -> Result<(), Box<dyn std::error::Error>> {\n    let mut context = start_node::partial_2(&[], NodeConfig::standalone()).await;\n\n    let source_subnet_id: SubnetId = SubnetId {\n        value: [1u8; SUBNET_ID_LENGTH].to_vec(),\n    };\n\n    //Outbound stream\n    let subnet_id_instream = source_subnet_id.clone();\n    let in_stream = async_stream::stream! {\n        yield watch_certificates_request::OpenStream {\n            target_checkpoint: Some(TargetCheckpoint {\n                target_subnet_ids: vec![ subnet_id_instream ],\n                positions: Vec::new()\n            }),\n            source_checkpoint: None\n        }.into()\n    };\n\n    let response = context\n        .api_grpc_client\n        .watch_certificates(in_stream)\n        .await\n        .unwrap();\n\n    let mut resp_stream = response.into_inner();\n\n    info!(\"TCE client: waiting for watch certificate response\");\n    while let Some(received) = resp_stream.next().await {\n        info!(\"TCE client received: {:?}\", received);\n        let received = received.unwrap();\n        match received.event {\n            Some(watch_certificates_response::Event::CertificatePushed(CertificatePushed {\n                certificate: Some(certificate),\n                ..\n            })) => {\n                info!(\"Certificate received {:?}\", certificate);\n            }\n            Some(watch_certificates_response::Event::StreamOpened(\n                watch_certificates_response::StreamOpened { subnet_ids },\n            )) => {\n                debug!(\"TCE client: stream opened for subnet_ids {:?}\", subnet_ids);\n                assert_eq!(subnet_ids[0].value, source_subnet_id.value);\n                // We have opened connection and 2 way stream, finishing test\n                break;\n            }\n            Some(watch_certificates_response::Event::CertificatePushed(CertificatePushed {\n                certificate: None,\n                ..\n            })) => {\n                panic!(\"TCE client: empty certificate received\");\n            }\n            _ => {\n                panic!(\"TCE client: something unexpected is received\");\n            }\n        }\n    }\n    info!(\"Shutting down TCE node client\");\n    context.shutdown().await?;\n    Ok(())\n}\n\n#[test(tokio::test)]\nasync fn test_tce_get_source_head_certificate() -> Result<(), Box<dyn std::error::Error>> {\n    let mut context = start_node::partial_2(&[], NodeConfig::standalone()).await;\n\n    let source_subnet_id: SubnetId = SOURCE_SUBNET_ID_1.into();\n    let default_cert_id: CertificateId = PREV_CERTIFICATE_ID.into();\n    let certificate_id: CertificateId = CERTIFICATE_ID_2.into();\n\n    // Test get source head certificate for empty TCE history\n    // This will be actual genesis certificate\n    let response = context\n        .api_grpc_client\n        .get_source_head(GetSourceHeadRequest {\n            subnet_id: Some(source_subnet_id.clone()),\n        })\n        .await\n        .map(|r| r.into_inner())\n        .expect(\"valid response\");\n\n    let expected_default_genesis_certificate = Certificate {\n        id: Some(default_cert_id.clone()),\n        prev_id: Some(default_cert_id.clone()),\n        source_subnet_id: Some(source_subnet_id.clone()),\n        target_subnets: vec![],\n        state_root: [0u8; 32].to_vec(),\n        tx_root_hash: [0u8; 32].to_vec(),\n        receipts_root_hash: [0u8; 32].to_vec(),\n        verifier: 0,\n        proof: Some(StarkProof { value: Vec::new() }),\n        signature: Some(Default::default()),\n    };\n    let expected_response = GetSourceHeadResponse {\n        certificate: Some(expected_default_genesis_certificate.clone()),\n        position: Some(SourceStreamPosition {\n            source_subnet_id: Some(source_subnet_id.clone()),\n            certificate_id: expected_default_genesis_certificate.id.clone(),\n            position: 0,\n        }),\n    };\n\n    assert_eq!(response, expected_response);\n\n    let test_certificate = Certificate {\n        source_subnet_id: Some(source_subnet_id.clone()),\n        id: Some(certificate_id),\n        prev_id: Some(default_cert_id),\n        target_subnets: vec![],\n        state_root: [0u8; 32].to_vec(),\n        tx_root_hash: [0u8; 32].to_vec(),\n        receipts_root_hash: [0u8; 32].to_vec(),\n        verifier: 0,\n        proof: Some(StarkProof { value: Vec::new() }),\n        signature: Some(Default::default()),\n    };\n\n    match context\n        .api_grpc_client\n        .submit_certificate(SubmitCertificateRequest {\n            certificate: Some(test_certificate.clone()),\n        })\n        .await\n        .map(|r| r.into_inner())\n    {\n        Ok(response) => {\n            debug!(\"Successfully submitted the Certificate {:?}\", response);\n        }\n        Err(e) => {\n            error!(\"Unable to submit the certificate: {e:?}\");\n            return Err(Box::from(e));\n        }\n    };\n\n    // Test get source head certificate for non empty certificate history\n    let response = context\n        .api_grpc_client\n        .get_source_head(GetSourceHeadRequest {\n            subnet_id: Some(source_subnet_id.clone()),\n        })\n        .await\n        .map(|r| r.into_inner())\n        .unwrap();\n\n    // TODO: currently only delivered certificates are counted as\n    // head source certificate, so default certificate is expected\n    // Should be updated to count also pending certificates\n    let expected_response = GetSourceHeadResponse {\n        certificate: Some(expected_default_genesis_certificate.clone()),\n        position: Some(SourceStreamPosition {\n            source_subnet_id: Some(source_subnet_id.clone()),\n            certificate_id: expected_default_genesis_certificate.id,\n            position: 0,\n        }),\n    };\n    assert_eq!(response, expected_response);\n\n    info!(\"Shutting down TCE node client\");\n    context.shutdown().await?;\n    Ok(())\n}\n\n#[test(tokio::test)]\nasync fn test_tce_get_last_pending_certificates() -> Result<(), Box<dyn std::error::Error>> {\n    let mut context = start_node::partial_2(&[], NodeConfig::standalone()).await;\n\n    let source_subnet_id: SubnetId = SOURCE_SUBNET_ID_1.into();\n    let certificates = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 10);\n\n    // Test get last pending certificates for empty TCE history\n    // Reply should be empty\n    let response = context\n        .api_grpc_client\n        .get_last_pending_certificates(GetLastPendingCertificatesRequest {\n            subnet_ids: vec![source_subnet_id.clone()],\n        })\n        .await\n        .map(|r| r.into_inner())\n        .expect(\"valid response\");\n\n    let last_pending_certificates = vec![(\n        Base64::encode_string(&source_subnet_id.value),\n        LastPendingCertificate {\n            value: None,\n            index: 0,\n        },\n    )]\n    .into_iter()\n    .collect::<HashMap<String, LastPendingCertificate>>();\n\n    let expected_response = GetLastPendingCertificatesResponse {\n        last_pending_certificate: last_pending_certificates,\n    };\n\n    assert_eq!(response, expected_response);\n\n    for cert in &certificates {\n        match context\n            .api_grpc_client\n            .submit_certificate(SubmitCertificateRequest {\n                certificate: Some(cert.certificate.clone().into()),\n            })\n            .await\n            .map(|r| r.into_inner())\n        {\n            Ok(response) => {\n                debug!(\"Successfully submitted the Certificate {:?}\", response);\n            }\n            Err(e) => {\n                error!(\"Unable to submit the certificate: {e:?}\");\n                return Err(Box::from(e));\n            }\n        };\n    }\n\n    // Test get last pending certificate\n    let response = context\n        .api_grpc_client\n        .get_last_pending_certificates(GetLastPendingCertificatesRequest {\n            subnet_ids: vec![source_subnet_id.clone()],\n        })\n        .await\n        .map(|r| r.into_inner())\n        .expect(\"valid response\");\n\n    let expected_last_pending_certificates = vec![(\n        Base64::encode_string(&source_subnet_id.value),\n        LastPendingCertificate {\n            value: Some(\n                certificates\n                    .iter()\n                    .last()\n                    .unwrap()\n                    .clone()\n                    .certificate\n                    .into(),\n            ),\n            index: 10,\n        },\n    )]\n    .into_iter()\n    .collect::<HashMap<String, LastPendingCertificate>>();\n\n    let expected_response = GetLastPendingCertificatesResponse {\n        last_pending_certificate: expected_last_pending_certificates,\n    };\n    assert_eq!(response, expected_response);\n\n    info!(\"Shutting down TCE node client\");\n    context.shutdown().await?;\n    Ok(())\n}\n\n#[rstest]\n#[test(tokio::test)]\n#[timeout(Duration::from_secs(300))]\nasync fn test_tce_open_stream_with_checkpoint(\n    input_certificates: Vec<CertificateDelivered>,\n) -> Result<(), Box<dyn std::error::Error>> {\n    let mut context =\n        start_node::partial_2(&input_certificates[..], NodeConfig::standalone()).await;\n\n    let source_subnet_id_1: SubnetId = SubnetId {\n        value: SOURCE_SUBNET_ID_1.into(),\n    };\n    let source_subnet_id_1_stream_position = 4;\n    let source_subnet_id_1_prefilled_certificates =\n        &input_certificates[0..SOURCE_SUBNET_ID_1_NUMBER_OF_PREFILLED_CERTIFICATES];\n\n    let source_subnet_id_2: SubnetId = SubnetId {\n        value: SOURCE_SUBNET_ID_2.into(),\n    };\n    let source_subnet_id_2_stream_position = 2;\n    let source_subnet_id_2_prefilled_certificates =\n        &input_certificates[SOURCE_SUBNET_ID_1_NUMBER_OF_PREFILLED_CERTIFICATES..];\n\n    let target_subnet_id: SubnetId = SubnetId {\n        value: TARGET_SUBNET_ID_1.into(),\n    };\n\n    // Ask for target checkpoint for 2 subnets, one from position 4, other from position 2\n    let target_checkpoint = TargetCheckpoint {\n        target_subnet_ids: vec![target_subnet_id.clone()],\n        positions: vec![\n            TargetStreamPosition {\n                source_subnet_id: source_subnet_id_1.clone().into(),\n                target_subnet_id: target_subnet_id.clone().into(),\n                position: source_subnet_id_1_stream_position,\n                certificate_id: Some(\n                    source_subnet_id_1_prefilled_certificates[3]\n                        .certificate\n                        .id\n                        .into(),\n                ),\n            },\n            TargetStreamPosition {\n                source_subnet_id: source_subnet_id_2.clone().into(),\n                target_subnet_id: target_subnet_id.clone().into(),\n                position: source_subnet_id_2_stream_position,\n                certificate_id: Some(\n                    source_subnet_id_2_prefilled_certificates[1]\n                        .certificate\n                        .id\n                        .into(),\n                ),\n            },\n        ],\n    };\n\n    // Make list of expected certificate, first received certificate for every source subnet and its position\n    let mut expected_certs = HashMap::<SubnetId, (Certificate, u64)>::new();\n    expected_certs.insert(\n        input_certificates[4].certificate.source_subnet_id.into(),\n        (input_certificates[4].certificate.clone().into(), 4),\n    );\n    expected_certs.insert(\n        input_certificates[SOURCE_SUBNET_ID_1_NUMBER_OF_PREFILLED_CERTIFICATES + 2]\n            .certificate\n            .source_subnet_id\n            .into(),\n        (\n            input_certificates[SOURCE_SUBNET_ID_1_NUMBER_OF_PREFILLED_CERTIFICATES + 2]\n                .certificate\n                .clone()\n                .into(),\n            2,\n        ),\n    );\n\n    info!(\"Prefilled certificates:\");\n    let mut index = -1;\n    input_certificates\n        .iter()\n        .map(|c| c.certificate.id)\n        .collect::<Vec<_>>()\n        .iter()\n        .for_each(|id| {\n            index += 1;\n            info!(\"{index}: {id}\")\n        });\n\n    //Outbound stream\n    let in_stream = async_stream::stream! {\n        yield watch_certificates_request::OpenStream {\n            target_checkpoint: Some(target_checkpoint),\n            source_checkpoint: None\n        }.into()\n    };\n\n    let response = context\n        .api_grpc_client\n        .watch_certificates(in_stream)\n        .await\n        .unwrap();\n\n    let mut resp_stream = response.into_inner();\n\n    info!(\"TCE client: waiting for watch certificate response\");\n\n    while let Some(received) = resp_stream.next().await {\n        debug!(\"TCE client received: {:?}\", received);\n        let received = received.unwrap();\n        match received.event {\n            Some(watch_certificates_response::Event::CertificatePushed(CertificatePushed {\n                certificate: Some(received_certificate),\n                positions,\n            })) => {\n                if let Some((expected_first_certificate_from_subnet, expected_position)) =\n                    expected_certs.get(received_certificate.source_subnet_id.as_ref().unwrap())\n                {\n                    info!(\n                        \"\\n\\nCertificate received: {} source sid {}, target sid {}\",\n                        received_certificate.id.as_ref().unwrap(),\n                        received_certificate.source_subnet_id.as_ref().unwrap(),\n                        received_certificate.target_subnets[0]\n                    );\n                    assert_eq!(\n                        received_certificate,\n                        *expected_first_certificate_from_subnet\n                    );\n                    let received_position = positions.first().unwrap();\n                    assert_eq!(*expected_position, received_position.position);\n                    assert_eq!(\n                        received_position.target_subnet_id.as_ref().unwrap(),\n                        &received_certificate.target_subnets[0]\n                    );\n                    // First certificate received from source subnet, remove it from the expected list\n                    expected_certs.remove(received_certificate.source_subnet_id.as_ref().unwrap());\n                    info!(\n                        \"Received valid first certificate from source subnet {} certificate id {}\",\n                        received_certificate.source_subnet_id.as_ref().unwrap(),\n                        received_certificate.id.as_ref().unwrap(),\n                    );\n                } else {\n                    debug!(\n                        \"\\n\\nAdditional certificate received from the source subnet: {} source \\\n                         sid {}, target sid {}\",\n                        received_certificate.id.as_ref().unwrap(),\n                        received_certificate.source_subnet_id.as_ref().unwrap(),\n                        received_certificate.target_subnets[0]\n                    );\n                }\n\n                if expected_certs.is_empty() {\n                    info!(\"All expected certificates received\");\n                    break;\n                }\n            }\n            Some(watch_certificates_response::Event::StreamOpened(\n                watch_certificates_response::StreamOpened { subnet_ids },\n            )) => {\n                debug!(\"TCE client: stream opened for subnet_ids {:?}\", subnet_ids);\n                continue;\n            }\n            Some(watch_certificates_response::Event::CertificatePushed(CertificatePushed {\n                certificate: None,\n                ..\n            })) => {\n                panic!(\"TCE client: empty certificate received\");\n            }\n            _ => {\n                panic!(\"TCE client: something unexpected is received\");\n            }\n        }\n    }\n    info!(\"Shutting down TCE node client\");\n    context.shutdown().await?;\n    Ok(())\n}\n\n#[fixture]\nfn input_certificates() -> Vec<CertificateDelivered> {\n    let mut certificates = Vec::new();\n    certificates.append(&mut create_certificate_chain(\n        SOURCE_SUBNET_ID_1,\n        &[TARGET_SUBNET_ID_1],\n        SOURCE_SUBNET_ID_1_NUMBER_OF_PREFILLED_CERTIFICATES,\n    ));\n\n    certificates.append(&mut create_certificate_chain(\n        SOURCE_SUBNET_ID_2,\n        &[TARGET_SUBNET_ID_1],\n        SOURCE_SUBNET_ID_2_NUMBER_OF_PREFILLED_CERTIFICATES,\n    ));\n\n    certificates\n}\n\n#[test(tokio::test)]\nasync fn test_tce_proxy_submit_certificate() -> Result<(), Box<dyn std::error::Error>> {\n    let mut context = start_node::partial_2(&[], NodeConfig::standalone()).await;\n\n    let source_subnet_id = SOURCE_SUBNET_ID_1;\n    let target_subnet_stream_positions = Vec::new();\n\n    let mut certificates = Vec::new();\n    certificates.append(&mut create_certificate_chain(\n        SOURCE_SUBNET_ID_1,\n        &[TARGET_SUBNET_ID_1],\n        5,\n    ));\n    let last_sent_certificate = certificates.last().unwrap().clone().certificate;\n\n    // Create tce proxy client\n    let (tce_proxy_worker, _source_head_certificate_id) =\n        match TceProxyWorker::new(TceProxyConfig {\n            subnet_id: source_subnet_id,\n            tce_endpoint: context.api_entrypoint.clone(),\n            positions: target_subnet_stream_positions,\n        })\n        .await\n        {\n            Ok((tce_proxy_worker, mut source_head_certificate)) => {\n                if let Some((cert, _position)) = &mut source_head_certificate {\n                    if cert.id == CertificateId::default() {\n                        warn!(\n                            \"Tce has not provided source head certificate, starting from subnet \\\n                             genesis block...\"\n                        );\n                        source_head_certificate = None;\n                    }\n                }\n\n                info!(\n                    \"TCE proxy client is starting for the source subnet {:?} from the head {:?}\",\n                    source_subnet_id, source_head_certificate\n                );\n                let source_head_certificate_id =\n                    source_head_certificate.map(|(cert, position)| (cert.id, position));\n                (tce_proxy_worker, source_head_certificate_id)\n            }\n            Err(e) => {\n                panic!(\"Unable to create TCE Proxy: {e}\");\n            }\n        };\n\n    for (index, cert) in certificates.into_iter().enumerate() {\n        match tce_proxy_worker\n            .send_command(TceProxyCommand::SubmitCertificate {\n                cert: Box::new(cert.certificate),\n                ctx: Default::default(),\n            })\n            .await\n        {\n            Ok(_) => {\n                info!(\"Certificate {} successfully submitted\", index);\n            }\n            Err(e) => {\n                panic!(\"Error submitting certificate: {e}\");\n            }\n        }\n    }\n\n    // Wait for certificates to be submitted\n    tokio::time::sleep(Duration::from_secs(5)).await;\n\n    // Get last pending certificate to check that all certificates are submitted\n    let (mut tce_client, _receiving_certificate_stream) =\n        create_tce_client(&context.api_entrypoint, SOURCE_SUBNET_ID_1).await?;\n    match tce_client\n        .get_last_pending_certificates(vec![tce_client.get_subnet_id()])\n        .await\n    {\n        Ok(mut pending_certificates) => {\n            let pending_certificate = pending_certificates\n                .remove(&tce_client.get_subnet_id())\n                .unwrap_or_default();\n            info!(\"Last pending certificate: {:?}\", pending_certificate);\n            assert_eq!(pending_certificate.unwrap().0, last_sent_certificate);\n        }\n        Err(e) => {\n            panic!(\"Unable to retrieve latest pending certificate {e}\");\n        }\n    };\n\n    info!(\"Shutting down TCE node client\");\n    context.shutdown().await?;\n    Ok(())\n}\n\nasync fn create_tce_client(\n    endpoint: &str,\n    source_subnet_id: topos_core::uci::SubnetId,\n) -> Result<\n    (\n        TceClient,\n        impl futures::stream::Stream<\n            Item = (\n                topos_core::uci::Certificate,\n                topos_core::api::grpc::checkpoints::TargetStreamPosition,\n            ),\n        >,\n    ),\n    Box<dyn std::error::Error>,\n> {\n    let (evt_sender, _evt_rcv) = mpsc::channel::<TceProxyEvent>(128);\n    let (_tce_client_shutdown_channel, shutdown_receiver) = mpsc::channel::<oneshot::Sender<()>>(1);\n\n    let (tce_client, receiving_certificate_stream) = TceClientBuilder::default()\n        .set_subnet_id(source_subnet_id)\n        .set_tce_endpoint(endpoint)\n        .set_proxy_event_sender(evt_sender.clone())\n        .build_and_launch(shutdown_receiver)\n        .await?;\n\n    tce_client.open_stream(Vec::new()).await?;\n\n    Ok((tce_client, receiving_certificate_stream))\n}\n\n#[test(tokio::test)]\nasync fn test_tce_client_submit_and_get_last_pending_certificate(\n) -> Result<(), Box<dyn std::error::Error>> {\n    let mut context = start_node::partial_2(&[], NodeConfig::standalone()).await;\n\n    let mut certificates = Vec::new();\n    certificates.append(&mut create_certificate_chain(\n        SOURCE_SUBNET_ID_1,\n        &[TARGET_SUBNET_ID_1],\n        5,\n    ));\n    let last_sent_certificate = certificates.last().unwrap().clone().certificate;\n\n    let (mut tce_client, _receiving_certificate_stream) =\n        create_tce_client(&context.api_entrypoint, SOURCE_SUBNET_ID_1).await?;\n\n    // Create tce proxy client\n    for (index, cert) in certificates.into_iter().enumerate() {\n        match tce_client.send_certificate(cert.certificate).await {\n            Ok(_) => {\n                info!(\n                    \"Certificate {} successfully submitted by the tce client\",\n                    index\n                );\n            }\n            Err(e) => {\n                panic!(\"Error submitting certificate by the tce client: {e}\");\n            }\n        }\n    }\n\n    // Wait for certificates to be submitted\n    tokio::time::sleep(Duration::from_secs(5)).await;\n\n    // Get last pending certificate to check that all certificates are submitted\n    match tce_client\n        .get_last_pending_certificates(vec![tce_client.get_subnet_id()])\n        .await\n    {\n        Ok(mut pending_certificates) => {\n            let pending_certificate = pending_certificates\n                .remove(&tce_client.get_subnet_id())\n                .unwrap_or_default();\n            info!(\"Last pending certificate: {:?}\", pending_certificate);\n            assert_eq!(pending_certificate.unwrap().0, last_sent_certificate);\n        }\n        Err(e) => {\n            panic!(\"Unable to retrieve latest pending certificate {e}\");\n        }\n    };\n\n    info!(\"Shutting down TCE node client\");\n    context.shutdown().await?;\n    Ok(())\n}\n\n#[test(tokio::test)]\nasync fn test_tce_client_get_empty_history_source_head() -> Result<(), Box<dyn std::error::Error>> {\n    let mut context = start_node::partial_2(&[], NodeConfig::standalone()).await;\n\n    let (mut tce_client, _receiving_certificate_stream) =\n        create_tce_client(&context.api_entrypoint, SOURCE_SUBNET_ID_1).await?;\n\n    // Get source head certificate, check if it is empty\n    match tce_client.get_source_head().await {\n        Ok((source_head_cert, position)) => {\n            info!(\n                \"Source head certificate: {:?}, position {}\",\n                source_head_cert, position\n            );\n\n            assert_eq!(source_head_cert.id, CertificateId::from([0u8; 32]));\n            assert_eq!(position, 0);\n        }\n        Err(e) => {\n            panic!(\"Unable to retrieve latest pending certificate {e}\");\n        }\n    };\n    info!(\"Shutting down TCE node client\");\n    context.shutdown().await?;\n    Ok(())\n}\n\n#[rstest]\n#[test(tokio::test)]\nasync fn test_tce_client_get_source_head(\n    input_certificates: Vec<CertificateDelivered>,\n) -> Result<(), Box<dyn std::error::Error>> {\n    let mut context =\n        start_node::partial_2(&input_certificates[..], NodeConfig::standalone()).await;\n\n    // Tce is prefilled with delivered certificates\n    let source_subnet_id_1_prefilled_certificates =\n        &input_certificates[0..SOURCE_SUBNET_ID_1_NUMBER_OF_PREFILLED_CERTIFICATES];\n    let last_delivered_certificate = &source_subnet_id_1_prefilled_certificates\n        .last()\n        .unwrap()\n        .certificate;\n\n    let (mut tce_client, _receiving_certificate_stream) =\n        create_tce_client(&context.api_entrypoint, SOURCE_SUBNET_ID_1).await?;\n\n    // Get source head, check if it matches\n    match tce_client.get_source_head().await {\n        Ok((source_head_cert, position)) => {\n            info!(\n                \"Source head certificate: {:?}, position {}\",\n                source_head_cert, position\n            );\n            assert_eq!(source_head_cert, *last_delivered_certificate);\n            assert_eq!(\n                position,\n                SOURCE_SUBNET_ID_1_NUMBER_OF_PREFILLED_CERTIFICATES as u64 - 1\n            );\n        }\n        Err(e) => {\n            panic!(\"Unable to retrieve latest pending certificate {e}\");\n        }\n    };\n\n    // Last pending certificate should be empty\n    match tce_client\n        .get_last_pending_certificates(vec![tce_client.get_subnet_id()])\n        .await\n    {\n        Ok(mut pending_certificates) => {\n            let pending_certificate = pending_certificates\n                .remove(&tce_client.get_subnet_id())\n                .unwrap_or_default();\n            info!(\"Last pending certificates: {:?}\", pending_certificates);\n            assert_eq!(pending_certificate, None);\n        }\n        Err(e) => {\n            panic!(\"Unable to retrieve latest pending certificate {e}\");\n        }\n    };\n\n    info!(\"Shutting down TCE node client\");\n    context.shutdown().await?;\n    Ok(())\n}\n\n#[rstest]\n#[test(tokio::test)]\n#[timeout(Duration::from_secs(30))]\nasync fn test_tce_client_submit_and_get_certificate_delivered(\n) -> Result<(), Box<dyn std::error::Error>> {\n    let peers_context = topos_test_sdk::tce::create_network(5, &[]).await;\n    let mut peers = peers_context.into_iter();\n    let mut sending_tce: TceContext = peers.next().expect(\"valid peer 1\").1;\n    let mut receiving_tce: TceContext = peers.next().expect(\"valid peer 2\").1;\n\n    let mut certificates = Vec::new();\n    certificates.append(&mut create_certificate_chain(\n        SOURCE_SUBNET_ID_1,\n        &[TARGET_SUBNET_ID_1],\n        5,\n    ));\n    let expected_certs: HashSet<topos_core::uci::CertificateId> = certificates\n        .iter()\n        .map(|cert| cert.certificate.id)\n        .collect();\n\n    // Create tce proxy client for sending subnet\n    let (mut tce_client_source, _) =\n        create_tce_client(&sending_tce.api_entrypoint, SOURCE_SUBNET_ID_1).await?;\n\n    // Create tce proxy client for receiving subnet\n    let (_, mut target_receiving_certificate_stream) =\n        create_tce_client(&receiving_tce.api_entrypoint, TARGET_SUBNET_ID_1).await?;\n\n    // Send certificate from source subnet\n    for (index, cert) in certificates.into_iter().enumerate() {\n        match tce_client_source.send_certificate(cert.certificate).await {\n            Ok(_) => {\n                info!(\n                    \"Certificate {} successfully submitted by the tce client\",\n                    index\n                );\n            }\n            Err(e) => {\n                panic!(\"Error submitting certificate by the tce client: {e}\");\n            }\n        }\n    }\n\n    // Wait for certificates to be submitted\n    tokio::time::sleep(Duration::from_secs(5)).await;\n\n    // Listen for certificates on target subnet\n    info!(\"Waiting for certificates to be received on the target subnet\");\n    let mut received_certs = HashSet::new();\n    loop {\n        if let Some((certificate, target_position)) =\n            target_receiving_certificate_stream.next().await\n        {\n            info!(\n                \"Delivered certificate cert id {}, position {:?}\",\n                &certificate.id, target_position\n            );\n            received_certs.insert(certificate.id);\n            if received_certs.len() == expected_certs.len() && received_certs == expected_certs {\n                info!(\"All certificates successfully received\");\n                break;\n            }\n        }\n    }\n\n    info!(\"Shutting down TCE node client\");\n    sending_tce.shutdown().await?;\n    receiving_tce.shutdown().await?;\n    Ok(())\n}\n"
  },
  {
    "path": "crates/topos-tce-storage/Cargo.toml",
    "content": "[package]\nname = \"topos-tce-storage\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n[lints]\nworkspace = true\n\n[dependencies]\ntopos-core = { workspace = true, features = [\"uci\", \"api\"] }\ntopos-metrics = { workspace = true }\n\nasync-stream.workspace = true\nasync-trait.workspace = true\nbincode.workspace = true\nfutures.workspace = true\nserde.workspace = true\nthiserror.workspace = true\ntokio = { workspace = true, features = [\"full\"] }\ntokio-stream.workspace = true\ntracing.workspace = true\nlazy_static.workspace = true\n\nrocksdb = { version = \"0.20.1\", optional = true }\nserde_derive = \"1.0.145\"\nonce_cell = \"1.17\"\narc-swap = \"1.6.0\"\n\n[dev-dependencies]\nrand = { workspace = true, features = [\"default\"] }\nrstest = { workspace = true, features = [\"async-timeout\"] }\nuuid = { workspace = true, features = [\"v4\", \"serde\"] }\ntracing-subscriber = { workspace = true, features = [\"env-filter\", \"fmt\"] }\ntracing.workspace = true\ntest-log.workspace = true\nenv_logger.workspace = true\n\ntopos-test-sdk = { path = \"../topos-test-sdk/\" }\n\n[features]\ndefault = [\"rocksdb\", \"inmemory\"]\ninmemory = []\nrocksdb = [\"dep:rocksdb\"]\n"
  },
  {
    "path": "crates/topos-tce-storage/README.md",
    "content": "# topos-tce-storage\n\nThe library provides the storage layer for the Topos TCE.\nIt is responsible for storing and retrieving the [certificates](https://docs.topos.technology/content/module-1/4-protocol.html#certificates), managing the\npending certificates pool and the certificate status, storing different\nmetadata related to the protocol and the internal state of the TCE.\n\nThe storage layer is implemented using RocksDB.\nThe library exposes multiple stores that are used by the TCE.\n\n\n### Architecture\n\nThe storage layer is composed of multiple stores that are used by the TCE.\nEach store is described in detail in its own module.\n\nThose stores are mainly used in `topos-tce-broadcast`, `topos-tce-api` and\n`topos-tce-synchronizer`.\n\nAs an overview, the storage layer is composed of the following stores:\n\n<picture>\n <source media=\"(prefers-color-scheme: dark)\" srcset=\"https://github.com/topos-protocol/topos/assets/1394604/5bb3c9b1-ac5a-4f59-bd14-29a02163272e\">\n <img alt=\"Text changing depending on mode. Light: 'So light!' Dark: 'So dark!'\" src=\"https://github.com/topos-protocol/topos/assets/1394604/e4bd859e-2a6d-40dc-8e84-2a708aa8a2d8\">\n</picture>\n\n#### Definitions and Responsibilities\n\nAs illustrated above, multiple `stores` are exposed in the library using various `tables`.\n\nThe difference between a `store` and a `table` is that the `table` is responsible for storing\nthe data while the `store` manages the data access and its behavior.\n\nHere's the list of the different stores and their responsibilities:\n\n- The [`EpochValidatorsStore`](struct@epoch::EpochValidatorsStore) is responsible for managing the list of validators for each `epoch`.\n- The [`FullNodeStore`](struct@fullnode::FullNodeStore) is responsible for managing all persistent data such as [`Certificate`](struct@topos_core::uci::Certificate) delivered and associated `streams`.\n- The [`IndexStore`](struct@index::IndexStore) is responsible for managing indexes and collect information about the broadcast and the network.\n- The [`ValidatorStore`](struct@validator::ValidatorStore) is responsible for managing the pending data that one validator needs to keep track, such as the certificates pool.\n\nFor more information about a `store`, see the related doc.\n\nNext, we've the list of the different tables and their responsibilities:\n\n- The [`EpochValidatorsTables`](struct@epoch::EpochValidatorsTables) is responsible for storing the list of validators for each `epoch`.\n- The [`ValidatorPerpetualTables`](struct@validator::ValidatorPerpetualTables) is responsible for storing the delivered [`Certificate`](struct@topos_core::uci::Certificate)s and the persistent data related to the Broadcast.\n- The [`ValidatorPendingTables`](struct@validator::ValidatorPendingTables) is responsible for storing the pending data, such as the certificates pool.\n- The [`IndexTables`](struct@index::IndexTables) is responsible for storing indexes about the delivery of [`Certificate`](struct@topos_core::uci::Certificate)s such as `target subnet stream`.\n\n### Special Considerations\n\nWhen using the storage layer, be aware of the following:\n- The storage layer uses [rocksdb](https://rocksdb.org/) as the backend, which means don't need an external service, as `rocksdb` is an embedded key-value store.\n- The storage layer uses [`Arc`](struct@std::sync::Arc) to share the stores between threads. It also means that a `store` is only instantiated once.\n- Some storage methods are batching multiple writes into a single transaction.\n\n### Design Philosophy\n\nThe choice of using [rocksdb](https://rocksdb.org/) as a backend was made because it matches a lot of the conditions\nthat we were expected, such as being embedded and having good performances when reading and\nwriting our data.\n\nSplitting storage into multiple `stores` and `tables` allows us to have a strong separation of concerns directly at the storage level.\n\nHowever, `RocksDB` is not the best fit when it comes to compose or filter data based on the data\nitself.\n\nAs mentioned above, the different stores are using [`Arc`](struct@std::sync::Arc), allowing a single store to be instantiated once\nand then shared between threads. This is very useful when it comes to the [`FullNodeStore`](struct@fullnode::FullNodeStore) as it is used\nin various places but should provide single entry point to the data.\n\nIt also means that the store is immutable thus can be shared easily between threads,\nwhich is a good thing for the concurrency.\nHowever, some stores are implementing the [`WriteStore`](trait@store::WriteStore) trait in order to\ninsert or mutate data, managing locks on resources and preventing any other query to mutate the data\ncurrently in processing. For more information about the locks see [`locking`](module@fullnode::locking)\n\nThe rest of the mutation on the data are handled by [rocksdb](https://rocksdb.org/) itself.\n\n"
  },
  {
    "path": "crates/topos-tce-storage/src/client.rs",
    "content": "use std::sync::Arc;\n\nuse topos_core::types::stream::CertificateTargetStreamPosition;\nuse topos_core::types::CertificateDelivered;\nuse topos_core::uci::{Certificate, SubnetId};\n\nuse crate::store::ReadStore;\nuse crate::validator::ValidatorStore;\nuse crate::{\n    errors::StorageError, FetchCertificatesFilter, FetchCertificatesPosition, PendingCertificateId,\n};\n\n#[derive(Clone)]\npub struct StorageClient {\n    store: Arc<ValidatorStore>,\n}\n\nimpl StorageClient {\n    /// Create a new StorageClient\n    pub fn new(store: Arc<ValidatorStore>) -> Self {\n        Self { store }\n    }\n\n    /// Return the list of all source subnets that targeted the given target subnet\n    pub async fn get_target_source_subnet_list(\n        &self,\n        target_subnet_id: SubnetId,\n    ) -> Result<Vec<SubnetId>, StorageError> {\n        self.store.get_target_source_subnet_list(&target_subnet_id)\n    }\n\n    /// Fetch all pending certificates\n    ///\n    /// Return list of pending certificates\n    pub async fn get_pending_certificates(\n        &self,\n    ) -> Result<Vec<(PendingCertificateId, Certificate)>, StorageError> {\n        Ok(self.store.iter_pending_pool()?.collect())\n    }\n\n    pub async fn fetch_certificates(\n        &self,\n        filter: FetchCertificatesFilter,\n    ) -> Result<Vec<(CertificateDelivered, FetchCertificatesPosition)>, StorageError> {\n        match filter {\n            FetchCertificatesFilter::Source { .. } => unimplemented!(),\n            FetchCertificatesFilter::Target {\n                target_stream_position,\n                limit,\n            } => self\n                .store\n                .get_target_stream_certificates_from_position(\n                    CertificateTargetStreamPosition::new(\n                        target_stream_position.target_subnet_id,\n                        target_stream_position.source_subnet_id,\n                        target_stream_position.position,\n                    ),\n                    limit,\n                )\n                .map(|values| {\n                    values\n                        .into_iter()\n                        .map(|(certificate, position)| {\n                            (certificate, FetchCertificatesPosition::Target(position))\n                        })\n                        .collect()\n                }),\n        }\n    }\n\n    /// Fetch source head certificate for subnet\n    ///\n    /// Return position of the certificate and certificate itself\n    pub async fn get_source_head(\n        &self,\n        subnet_id: SubnetId,\n    ) -> Result<Option<(u64, Certificate)>, StorageError> {\n        Ok(self.store.get_source_head(&subnet_id)?.and_then(|head| {\n            self.store\n                .get_certificate(&head.certificate_id)\n                .ok()?\n                .map(|certificate| (*head.position, certificate.certificate))\n        }))\n    }\n}\n"
  },
  {
    "path": "crates/topos-tce-storage/src/constant.rs",
    "content": "pub(crate) mod cfs {\n    pub(crate) const CERTIFICATES: &str = \"certificates\";\n    pub(crate) const STREAMS: &str = \"streams\";\n    pub(crate) const EPOCH_CHAIN: &str = \"epoch_chain\";\n    pub(crate) const UNVERIFIED: &str = \"unverified\";\n\n    pub(crate) const PENDING_POOL: &str = \"pending_pool\";\n    pub(crate) const PENDING_POOL_INDEX: &str = \"pending_pool_index\";\n    pub(crate) const PRECEDENCE_POOL: &str = \"precedence_pool\";\n\n    pub(crate) const TARGET_STREAMS: &str = \"target_streams\";\n    pub(crate) const TARGET_SOURCE_LIST: &str = \"target_source_list\";\n    pub(crate) const SOURCE_LIST: &str = \"source_list\";\n    pub(crate) const DELIVERED_CERTIFICATES_PER_SOURCE_FOR_TARGET: &str =\n        \"delivered_certificates_per_source_for_target\";\n\n    pub(crate) const VALIDATORS: &str = \"validators\";\n\n    pub(crate) const EPOCH_SUMMARY: &str = \"epoch_summary\";\n    pub(crate) const BROADCAST_STATES: &str = \"broadcast_states\";\n}\n"
  },
  {
    "path": "crates/topos-tce-storage/src/epoch/mod.rs",
    "content": "use std::path::Path;\nuse std::sync::Arc;\nuse std::{collections::HashMap, sync::RwLock};\n\nuse arc_swap::ArcSwap;\n\nuse crate::errors::StorageError;\nuse crate::types::{EpochId, Validators};\n\npub use self::tables::EpochValidatorsTables;\npub use self::tables::ValidatorPerEpochTables;\n\nmod tables;\n\n/// Epoch contextualized data - can be purged at some point\npub struct ValidatorPerEpochStore {\n    #[allow(unused)]\n    epoch_id: EpochId,\n    #[allow(unused)]\n    validators: RwLock<Validators>,\n    #[allow(unused)]\n    tables: ValidatorPerEpochTables,\n}\n\nimpl ValidatorPerEpochStore {\n    pub fn new(epoch_id: EpochId, path: &Path) -> Result<ArcSwap<Self>, StorageError> {\n        let tables: ValidatorPerEpochTables = ValidatorPerEpochTables::open(epoch_id, path);\n        let store = ArcSwap::from(Arc::new(Self {\n            epoch_id,\n            validators: RwLock::new(Vec::new()),\n            tables,\n        }));\n\n        Ok(store)\n    }\n}\npub struct EpochValidatorsStore {\n    #[allow(unused)]\n    tables: EpochValidatorsTables,\n    #[allow(unused)]\n    caches: RwLock<HashMap<EpochId, Validators>>,\n}\n\nimpl EpochValidatorsStore {\n    pub fn new(path: &Path) -> Result<Arc<Self>, StorageError> {\n        let tables = EpochValidatorsTables::open(path);\n        let store = Arc::new(Self {\n            tables,\n            caches: RwLock::new(HashMap::new()),\n        });\n\n        Ok(store)\n    }\n}\n"
  },
  {
    "path": "crates/topos-tce-storage/src/epoch/tables.rs",
    "content": "use std::{fs::create_dir_all, path::Path};\n\nuse rocksdb::ColumnFamilyDescriptor;\nuse topos_core::uci::CertificateId;\nuse tracing::warn;\n\nuse crate::{\n    constant::cfs,\n    rocks::{\n        db::{default_options, init_db, init_with_cfs},\n        db_column::DBColumn,\n    },\n    types::{BroadcastState, EpochId, Validators, VerifiedCheckpointSummary},\n};\n\npub struct EpochValidatorsTables {\n    #[allow(unused)]\n    validators_map: DBColumn<EpochId, Validators>,\n}\n\nimpl EpochValidatorsTables {\n    pub(crate) fn open(path: &Path) -> Self {\n        let path = path.join(\"validators\");\n        let mut options = rocksdb::Options::default();\n        options.create_if_missing(true);\n        let db = init_db(&path, options).unwrap_or_else(|_| panic!(\"Cannot open DB at {:?}\", path));\n\n        Self {\n            validators_map: DBColumn::reopen(&db, cfs::VALIDATORS),\n        }\n    }\n}\n\n/// Epoch contextualized data - can be purged at some point\npub struct ValidatorPerEpochTables {\n    #[allow(unused)]\n    epoch_summary: DBColumn<EpochSummaryKey, EpochSummaryValue>,\n    #[allow(unused)]\n    broadcast_states: DBColumn<CertificateId, BroadcastState>,\n    #[allow(unused)]\n    validators: Vec<Validators>,\n}\n\nimpl ValidatorPerEpochTables {\n    pub(crate) fn open(epoch_id: EpochId, path: &Path) -> Self {\n        let path = path.join(\"epochs\").join(epoch_id.to_string());\n        if !path.exists() {\n            warn!(\"Path {:?} does not exist, creating it\", path);\n            create_dir_all(&path).expect(\"Cannot create ValidatorPerEpochTables directory\");\n        }\n        let cfs = vec![\n            ColumnFamilyDescriptor::new(cfs::EPOCH_SUMMARY, default_options()),\n            ColumnFamilyDescriptor::new(cfs::BROADCAST_STATES, default_options()),\n        ];\n\n        let db = init_with_cfs(&path, default_options(), cfs)\n            .unwrap_or_else(|_| panic!(\"Cannot open DB at {:?}\", path));\n\n        Self {\n            epoch_summary: DBColumn::reopen(&db, cfs::EPOCH_SUMMARY),\n            broadcast_states: DBColumn::reopen(&db, cfs::BROADCAST_STATES),\n            validators: Vec::new(),\n        }\n    }\n}\n\n#[allow(unused)]\nenum EpochSummaryKey {\n    EpochId,\n    StartCheckpoint,\n    EndCheckpoint,\n}\n\n#[allow(unused)]\nenum EpochSummaryValue {\n    EpochId(EpochId),\n    StartCheckpoint(VerifiedCheckpointSummary),\n    EndCheckpoint(VerifiedCheckpointSummary),\n}\n"
  },
  {
    "path": "crates/topos-tce-storage/src/errors.rs",
    "content": "use thiserror::Error;\nuse tokio::sync::{mpsc, oneshot};\nuse topos_core::{\n    types::stream::PositionError,\n    uci::{CertificateId, SubnetId, SUBNET_ID_LENGTH},\n};\n\n#[derive(Error, Debug)]\npub enum InternalStorageError {\n    #[error(\"The certificate already exists\")]\n    CertificateAlreadyExists,\n\n    #[error(\"The certificate is already in pending\")]\n    CertificateAlreadyPending,\n\n    #[error(\"Unable to find a certificate: {0:?}\")]\n    CertificateNotFound(CertificateId),\n\n    #[error(\"Unable to start storage\")]\n    UnableToStartStorage,\n\n    #[cfg(feature = \"rocksdb\")]\n    #[error(\"Unable to execute query: {0}\")]\n    RocksDBError(#[from] rocksdb::Error),\n\n    #[cfg(feature = \"rocksdb\")]\n    #[error(\"Accessing invalid column family: {0}\")]\n    InvalidColumnFamily(&'static str),\n\n    #[error(\"Unable to deserialize database value\")]\n    UnableToDeserializeValue,\n\n    #[error(\"Invalid query argument: {0}\")]\n    InvalidQueryArgument(&'static str),\n\n    #[error(\"Unexpected DB state: {0}\")]\n    UnexpectedDBState(&'static str),\n\n    #[error(transparent)]\n    Bincode(#[from] Box<bincode::ErrorKind>),\n\n    #[error(\"A concurrent DBBatch has been detected\")]\n    ConcurrentDBBatchDetected,\n\n    #[error(\"{0}: {1:?}\")]\n    PositionError(#[source] PositionError, [u8; SUBNET_ID_LENGTH]),\n\n    #[error(\"InvalidSubnetId\")]\n    InvalidSubnetId,\n\n    #[error(\"Missing head certificate for source subnet id {0}\")]\n    MissingHeadForSubnet(SubnetId),\n\n    #[error(\"Certificate already exists at position {0} for subnet {1}\")]\n    CertificateAlreadyExistsAtPosition(u64, SubnetId),\n}\n\n#[derive(Debug, Error)]\npub enum StorageError {\n    #[error(transparent)]\n    InternalStorage(#[from] InternalStorageError),\n\n    #[error(\"Unable to communicate with storage: closed\")]\n    CommunicationChannelClosed,\n\n    #[error(\"Unable to receive expected response from storage: {0}\")]\n    ResponseChannel(#[from] oneshot::error::RecvError),\n\n    #[error(\"Unable to execute shutdown on the storage service: {0}\")]\n    ShutdownCommunication(mpsc::error::SendError<oneshot::Sender<()>>),\n}\n"
  },
  {
    "path": "crates/topos-tce-storage/src/fullnode/locking.rs",
    "content": "use std::{\n    collections::{hash_map::RandomState, HashMap},\n    hash::{BuildHasher, Hash},\n    sync::Arc,\n};\n\nuse tokio::sync::{Mutex, RwLock};\n\nconst LOCK_SHARDING: usize = 2048;\n\ntype LocksVec<T> = Vec<RwLock<HashMap<T, Arc<Mutex<()>>>>>;\n\npub(crate) struct LockGuards<T: Hash + Eq + PartialEq> {\n    locks: Arc<LocksVec<T>>,\n    random_state: RandomState,\n}\n\nimpl<T: Hash + Eq + PartialEq> LockGuards<T> {\n    pub fn new() -> Self {\n        Self {\n            random_state: RandomState::new(),\n            locks: Arc::new(\n                (0..LOCK_SHARDING)\n                    .map(|_| RwLock::new(HashMap::new()))\n                    .collect(),\n            ),\n        }\n    }\n\n    pub async fn get_lock(&self, key: T) -> Arc<Mutex<()>> {\n        let hash = self.random_state.hash_one(&key) as usize;\n        let lock_shard = hash % self.locks.len();\n\n        let lock = {\n            let read = self.locks[lock_shard].read().await;\n\n            read.get(&key).cloned()\n        };\n\n        if let Some(lock) = lock {\n            lock\n        } else {\n            let lock = {\n                let mut write = self.locks[lock_shard].write().await;\n\n                write\n                    .entry(key)\n                    .or_insert_with(|| Arc::new(Mutex::new(())))\n                    .clone()\n            };\n\n            lock\n        }\n    }\n}\n"
  },
  {
    "path": "crates/topos-tce-storage/src/fullnode/mod.rs",
    "content": "use std::{collections::HashMap, path::Path, sync::Arc};\n\nuse arc_swap::ArcSwap;\nuse async_trait::async_trait;\n\nuse rocksdb::properties::ESTIMATE_NUM_KEYS;\nuse tokio::sync::OwnedMutexGuard;\nuse topos_core::{\n    types::{\n        stream::{CertificateSourceStreamPosition, CertificateTargetStreamPosition, Position},\n        CertificateDelivered,\n    },\n    uci::{CertificateId, SubnetId},\n};\nuse tracing::{error, info};\n\nuse crate::{\n    epoch::{EpochValidatorsStore, ValidatorPerEpochStore},\n    errors::{InternalStorageError, StorageError},\n    index::IndexTables,\n    rocks::{map::Map, TargetSourceListKey},\n    store::{ReadStore, WriteStore},\n    validator::ValidatorPerpetualTables,\n    CertificatePositions, SourceHead,\n};\n\nuse self::locking::LockGuards;\n\npub mod locking;\n\n/// Store to manage FullNode data\n///\n/// The [`FullNodeStore`] is responsible for storing and exposing the data that is\n/// needed by a full node to perform its duties.\n///\n/// The responsabilities of the [`FullNodeStore`] are:\n///\n/// - Store and expose the certificates that are delivered\n/// - Store and expose the state of the certificate streams\n///\n/// To do so, it implements [`ReadStore`] / [`WriteStore`] by using multiple tables and store such\n/// as [`ValidatorPerpetualTables`], [`EpochValidatorsStore`] and [`IndexTables`]\npub struct FullNodeStore {\n    certificate_lock_guards: LockGuards<CertificateId>,\n    subnet_lock_guards: LockGuards<SubnetId>,\n    #[allow(unused)]\n    epoch_store: ArcSwap<ValidatorPerEpochStore>,\n    #[allow(unused)]\n    validators_store: Arc<EpochValidatorsStore>,\n    pub(crate) perpetual_tables: Arc<ValidatorPerpetualTables>,\n    pub(crate) index_tables: Arc<IndexTables>,\n}\n\nimpl FullNodeStore {\n    /// Try to create a new instance of [`FullNodeStore`] based on the given path\n    pub fn new(path: &Path) -> Result<Arc<Self>, StorageError> {\n        let perpetual_tables = Arc::new(ValidatorPerpetualTables::open(path));\n        let index_tables = Arc::new(IndexTables::open(path));\n\n        let validators_store = EpochValidatorsStore::new(path)?;\n\n        let epoch_store = ValidatorPerEpochStore::new(0, path)?;\n\n        FullNodeStore::open(\n            epoch_store,\n            validators_store,\n            perpetual_tables,\n            index_tables,\n        )\n    }\n    pub fn open(\n        epoch_store: ArcSwap<ValidatorPerEpochStore>,\n        validators_store: Arc<EpochValidatorsStore>,\n        perpetual_tables: Arc<ValidatorPerpetualTables>,\n        index_tables: Arc<IndexTables>,\n    ) -> Result<Arc<Self>, StorageError> {\n        Ok(Arc::new(Self {\n            certificate_lock_guards: LockGuards::new(),\n            subnet_lock_guards: LockGuards::new(),\n            epoch_store,\n            validators_store,\n            perpetual_tables,\n            index_tables,\n        }))\n    }\n\n    /// Await for a [`LockGuards`] for the given certificate id\n    pub(crate) async fn certificate_lock_guard(\n        &self,\n        certificate_id: CertificateId,\n    ) -> OwnedMutexGuard<()> {\n        self.certificate_lock_guards\n            .get_lock(certificate_id)\n            .await\n            .lock_owned()\n            .await\n    }\n\n    /// Await for a [`LockGuards`] for the given subnet id\n    pub(crate) async fn subnet_lock_guard(&self, subnet_id: SubnetId) -> OwnedMutexGuard<()> {\n        self.subnet_lock_guards\n            .get_lock(subnet_id)\n            .await\n            .lock_owned()\n            .await\n    }\n}\n\n#[async_trait]\nimpl WriteStore for FullNodeStore {\n    async fn insert_certificate_delivered(\n        &self,\n        certificate: &CertificateDelivered,\n    ) -> Result<CertificatePositions, StorageError> {\n        // Lock resources for concurrency issues\n        let _cert_guard = self\n            .certificate_lock_guard(certificate.certificate.id)\n            .await;\n\n        let _subnet_guard = self\n            .subnet_lock_guard(certificate.certificate.source_subnet_id)\n            .await;\n\n        let subnet_id = certificate.certificate.source_subnet_id;\n        let certificate_id = certificate.certificate.id;\n        let expected_position = certificate.proof_of_delivery.delivery_position.clone();\n\n        let mut batch = self.perpetual_tables.certificates.batch();\n        let mut index_batch = self.index_tables.target_streams.batch();\n\n        // Check position already taken\n        if let Some(delivered_at_position) =\n            self.perpetual_tables.streams.get(&expected_position)?\n        {\n            if delivered_at_position != certificate_id {\n                error!(\n                    \"Expected position {} already taken by {}\",\n                    expected_position, delivered_at_position\n                );\n\n                return Err(StorageError::InternalStorage(\n                    InternalStorageError::CertificateAlreadyExistsAtPosition(\n                        *expected_position.position,\n                        expected_position.subnet_id,\n                    ),\n                ));\n            } else {\n                return Err(StorageError::InternalStorage(\n                    InternalStorageError::CertificateAlreadyExists,\n                ));\n            }\n        }\n\n        let update_stream_position = self\n            .index_tables\n            .source_list\n            .get(&subnet_id)?\n            .and_then(|(_certificate, pos)| {\n                if expected_position.position > pos {\n                    Some((certificate_id, expected_position.position))\n                } else {\n                    None\n                }\n            })\n            .or(Some((certificate_id, expected_position.position)));\n\n        batch = batch.insert_batch(\n            &self.perpetual_tables.certificates,\n            [(&certificate_id, certificate)],\n        )?;\n\n        // Adding the certificate to the stream\n        batch = batch.insert_batch(\n            &self.perpetual_tables.streams,\n            [(&expected_position, certificate_id)],\n        )?;\n\n        index_batch = if let Some(current_source_position) = update_stream_position {\n            index_batch.insert_batch(\n                &self.index_tables.source_list,\n                [(&subnet_id, &current_source_position)],\n            )?\n        } else {\n            index_batch\n        };\n\n        // Return list of new target stream positions of certificate that will be persisted\n        // Information is needed by sequencer/subnet contract to know from\n        // where to continue with streaming on restart\n        let mut target_subnet_stream_positions: HashMap<SubnetId, CertificateTargetStreamPosition> =\n            HashMap::new();\n\n        // Adding certificate to target_streams\n        // TODO: Add expected position instead of calculating on the go\n        let mut targets = Vec::new();\n        let source_list_per_target: Vec<_> = certificate\n            .certificate\n            .target_subnets\n            .iter()\n            .map(|target_subnet| ((*target_subnet, subnet_id), true))\n            .collect();\n\n        for target_subnet_id in &certificate.certificate.target_subnets {\n            let target = match self\n                .index_tables\n                .target_streams\n                .prefix_iter(&TargetSourceListKey(*target_subnet_id, subnet_id))?\n                .last()\n            {\n                None => CertificateTargetStreamPosition::new(\n                    *target_subnet_id,\n                    subnet_id,\n                    Position::ZERO,\n                ),\n                Some((mut target_stream_position, _)) => {\n                    target_stream_position.position = target_stream_position\n                        .position\n                        .increment()\n                        .map_err(|error| {\n                        InternalStorageError::PositionError(error, subnet_id.into())\n                    })?;\n                    target_stream_position\n                }\n            };\n\n            target_subnet_stream_positions.insert(*target_subnet_id, target);\n\n            index_batch = index_batch.insert_batch(\n                &self.index_tables.target_source_list,\n                [(\n                    TargetSourceListKey(*target_subnet_id, subnet_id),\n                    target.position,\n                )],\n            )?;\n\n            targets.push((target, certificate_id));\n        }\n\n        index_batch = index_batch.insert_batch(&self.index_tables.target_streams, targets)?;\n\n        index_batch = index_batch.insert_batch(\n            &self.index_tables.source_list_per_target,\n            source_list_per_target,\n        )?;\n        batch.write()?;\n        index_batch.write()?;\n\n        info!(\n            \"Certificate {} inserted at position {}\",\n            certificate.certificate.id, expected_position\n        );\n\n        Ok(CertificatePositions {\n            targets: target_subnet_stream_positions,\n            source: expected_position,\n        })\n    }\n\n    async fn insert_certificates_delivered(\n        &self,\n        certificates: &[CertificateDelivered],\n    ) -> Result<(), StorageError> {\n        for certificate in certificates {\n            _ = self.insert_certificate_delivered(certificate).await?;\n        }\n        Ok(())\n    }\n}\n\nimpl ReadStore for FullNodeStore {\n    fn count_certificates_delivered(&self) -> Result<u64, StorageError> {\n        Ok(self\n            .perpetual_tables\n            .certificates\n            .property_int_value(ESTIMATE_NUM_KEYS)?)\n    }\n\n    fn get_source_head(&self, subnet_id: &SubnetId) -> Result<Option<SourceHead>, StorageError> {\n        Ok(self\n            .index_tables\n            .source_list\n            .get(subnet_id)?\n            .map(|(certificate_id, position)| SourceHead {\n                certificate_id,\n                subnet_id: *subnet_id,\n                position,\n            }))\n    }\n\n    fn get_certificate(\n        &self,\n        certificate_id: &CertificateId,\n    ) -> Result<Option<CertificateDelivered>, StorageError> {\n        Ok(self.perpetual_tables.certificates.get(certificate_id)?)\n    }\n\n    fn get_certificates(\n        &self,\n        certificate_ids: &[CertificateId],\n    ) -> Result<Vec<Option<CertificateDelivered>>, StorageError> {\n        Ok(self\n            .perpetual_tables\n            .certificates\n            .multi_get(certificate_ids)?)\n    }\n\n    fn last_delivered_position_for_subnet(\n        &self,\n        subnet_id: &SubnetId,\n    ) -> Result<Option<CertificateSourceStreamPosition>, StorageError> {\n        Ok(self\n            .perpetual_tables\n            .streams\n            .prefix_iter(subnet_id)?\n            .last()\n            .map(|(k, _)| k))\n    }\n\n    fn get_checkpoint(&self) -> Result<HashMap<SubnetId, SourceHead>, StorageError> {\n        Ok(self\n            .index_tables\n            .source_list\n            .iter()?\n            .map(|(subnet_id, (certificate_id, position))| {\n                (\n                    subnet_id,\n                    SourceHead {\n                        certificate_id,\n                        subnet_id,\n                        position,\n                    },\n                )\n            })\n            .collect())\n    }\n\n    fn get_source_stream_certificates_from_position(\n        &self,\n        from: CertificateSourceStreamPosition,\n        limit: usize,\n    ) -> Result<Vec<(CertificateDelivered, CertificateSourceStreamPosition)>, StorageError> {\n        let starting_position = from.position;\n        let x: Vec<(CertificateId, CertificateSourceStreamPosition)> = self\n            .perpetual_tables\n            .streams\n            .prefix_iter(&from.subnet_id)?\n            .skip(starting_position.try_into().map_err(|_| {\n                StorageError::InternalStorage(InternalStorageError::InvalidQueryArgument(\n                    \"Unable to parse Position\",\n                ))\n            })?)\n            .take(limit)\n            .map(|(k, v)| (v, k))\n            .collect();\n\n        let certificate_ids: Vec<_> = x.iter().map(|(k, _)| k).cloned().collect();\n\n        let certificates = self\n            .perpetual_tables\n            .certificates\n            .multi_get(&certificate_ids[..])?;\n\n        Ok(x.into_iter()\n            .zip(certificates)\n            .filter_map(|((certificate_id, position), certificate)| {\n                certificate\n                    .filter(|c| c.certificate.id == certificate_id)\n                    .map(|cert| (cert, position))\n            })\n            .collect())\n    }\n\n    fn get_target_stream_certificates_from_position(\n        &self,\n        position: CertificateTargetStreamPosition,\n        limit: usize,\n    ) -> Result<Vec<(CertificateDelivered, CertificateTargetStreamPosition)>, StorageError> {\n        let starting_position = position.position;\n        let prefix = TargetSourceListKey(position.target_subnet_id, position.source_subnet_id);\n\n        let certs_with_positions: Vec<(CertificateId, CertificateTargetStreamPosition)> = self\n            .index_tables\n            .target_streams\n            .prefix_iter(&prefix)?\n            .skip(starting_position.try_into().map_err(|_| {\n                StorageError::InternalStorage(InternalStorageError::InvalidQueryArgument(\n                    \"Unable to parse Position\",\n                ))\n            })?)\n            .take(limit)\n            .map(|(k, v)| (v, k))\n            .collect();\n\n        let certificate_ids: Vec<_> = certs_with_positions\n            .iter()\n            .map(|(k, _)| k)\n            .cloned()\n            .collect();\n\n        let certificates = self\n            .perpetual_tables\n            .certificates\n            .multi_get(&certificate_ids[..])?;\n\n        Ok(certs_with_positions\n            .into_iter()\n            .zip(certificates)\n            .filter_map(|((certificate_id, position), certificate)| {\n                certificate\n                    .filter(|c| c.certificate.id == certificate_id)\n                    .map(|cert| (cert, position))\n            })\n            .collect())\n    }\n\n    fn get_target_source_subnet_list(\n        &self,\n        target_subnet_id: &SubnetId,\n    ) -> Result<Vec<SubnetId>, StorageError> {\n        Ok(self\n            .index_tables\n            .source_list_per_target\n            .prefix_iter(target_subnet_id)?\n            .map(|((_, source_subnet_id), _)| source_subnet_id)\n            .collect())\n    }\n}\n"
  },
  {
    "path": "crates/topos-tce-storage/src/index/mod.rs",
    "content": "use std::{fs::create_dir_all, path::Path};\n\nuse rocksdb::ColumnFamilyDescriptor;\nuse topos_core::{\n    types::stream::Position,\n    uci::{CertificateId, SubnetId},\n};\nuse tracing::warn;\n\nuse crate::{\n    constant::cfs,\n    rocks::{\n        constants,\n        db::{default_options, init_with_cfs},\n        db_column::DBColumn,\n    },\n    types::{TargetSourceListColumn, TargetStreamsColumn},\n};\n\npub struct IndexStore {}\n\npub struct IndexTables {\n    pub(crate) target_streams: TargetStreamsColumn,\n    pub(crate) target_source_list: TargetSourceListColumn,\n    pub(crate) source_list: DBColumn<SubnetId, (CertificateId, Position)>,\n    pub(crate) source_list_per_target: DBColumn<(SubnetId, SubnetId), bool>,\n}\n\nimpl IndexTables {\n    pub fn open(path: &Path) -> Self {\n        let path = path.join(\"index\");\n        if !path.exists() {\n            warn!(\"Path {:?} does not exist, creating it\", path);\n            create_dir_all(&path).expect(\"Cannot create IndexTables directory\");\n        }\n        let mut options_stream = default_options();\n        options_stream.set_prefix_extractor(rocksdb::SliceTransform::create_fixed_prefix(\n            constants::TARGET_STREAMS_PREFIX_SIZE,\n        ));\n\n        let cfs = vec![\n            ColumnFamilyDescriptor::new(cfs::TARGET_STREAMS, options_stream),\n            ColumnFamilyDescriptor::new(cfs::TARGET_SOURCE_LIST, default_options()),\n            ColumnFamilyDescriptor::new(cfs::SOURCE_LIST, default_options()),\n            ColumnFamilyDescriptor::new(\n                cfs::DELIVERED_CERTIFICATES_PER_SOURCE_FOR_TARGET,\n                default_options(),\n            ),\n        ];\n\n        let db = init_with_cfs(&path, default_options(), cfs)\n            .unwrap_or_else(|_| panic!(\"Cannot open DB at {:?}\", path));\n\n        Self {\n            target_streams: DBColumn::reopen(&db, cfs::TARGET_STREAMS),\n            target_source_list: DBColumn::reopen(&db, cfs::TARGET_SOURCE_LIST),\n            source_list: DBColumn::reopen(&db, cfs::SOURCE_LIST),\n            source_list_per_target: DBColumn::reopen(\n                &db,\n                cfs::DELIVERED_CERTIFICATES_PER_SOURCE_FOR_TARGET,\n            ),\n        }\n    }\n}\n"
  },
  {
    "path": "crates/topos-tce-storage/src/lib.rs",
    "content": "//! The library provides the storage layer for the Topos TCE.\n//! It is responsible for storing and retrieving the [certificates](https://docs.topos.technology/content/module-1/4-protocol.html#certificates), managing the\n//! pending certificates pool and the certificate status, storing different\n//! metadata related to the protocol and the internal state of the TCE.\n//!\n//! The storage layer is implemented using RocksDB.\n//! The library exposes multiple stores that are used by the TCE.\n//!\n//!\n//! ## Architecture\n//!\n//! The storage layer is composed of multiple stores that are used by the TCE.\n//! Each store is described in detail in its own module.\n//!\n//! Those stores are mainly used in `topos-tce-broadcast`, `topos-tce-api` and\n//! `topos-tce-synchronizer`.\n//!\n//! As an overview, the storage layer is composed of the following stores:\n//!\n//!<picture>\n//!  <source media=\"(prefers-color-scheme: dark)\" srcset=\"https://github.com/topos-protocol/topos/assets/1394604/5bb3c9b1-ac5a-4f59-bd14-29a02163272e\">\n//!  <img alt=\"Text changing depending on mode. Light: 'So light!' Dark: 'So dark!'\" src=\"https://github.com/topos-protocol/topos/assets/1394604/e4bd859e-2a6d-40dc-8e84-2a708aa8a2d8\">\n//!</picture>\n//!\n//! ### Definitions and Responsibilities\n//!\n//! As illustrated above, multiple `stores` are exposed in the library using various `tables`.\n//!\n//! The difference between a `store` and a `table` is that the `table` is responsible for storing\n//! the data while the `store` manages the data access and its behavior.\n//!\n//! Here's the list of the different stores and their responsibilities:\n//!\n//! - The [`EpochValidatorsStore`](struct@epoch::EpochValidatorsStore) is responsible for managing the list of validators for each `epoch`.\n//! - The [`FullNodeStore`](struct@fullnode::FullNodeStore) is responsible for managing all persistent data such as [`Certificate`](struct@topos_core::uci::Certificate) delivered and associated `streams`.\n//! - The [`IndexStore`](struct@index::IndexStore) is responsible for managing indexes and collect information about the broadcast and the network.\n//! - The [`ValidatorStore`](struct@validator::ValidatorStore) is responsible for managing the pending data that one validator needs to keep track, such as the certificates pool.\n//!\n//! For more information about a `store`, see the related doc.\n//!\n//! Next, we've the list of the different tables and their responsibilities:\n//!\n//! - The [`EpochValidatorsTables`](struct@epoch::EpochValidatorsTables) is responsible for storing the list of validators for each `epoch`.\n//! - The [`ValidatorPerpetualTables`](struct@validator::ValidatorPerpetualTables) is responsible for storing the delivered [`Certificate`](struct@topos_core::uci::Certificate)s and the persistent data related to the Broadcast.\n//! - The [`ValidatorPendingTables`](struct@validator::ValidatorPendingTables) is responsible for storing the pending data, such as the certificates pool.\n//! - The [`IndexTables`](struct@index::IndexTables) is responsible for storing indexes about the delivery of [`Certificate`](struct@topos_core::uci::Certificate)s such as `target subnet stream`.\n//!\n//! ## Special Considerations\n//!\n//! When using the storage layer, be aware of the following:\n//! - The storage layer uses [rocksdb](https://rocksdb.org/) as the backend, which means don't need an external service, as `rocksdb` is an embedded key-value store.\n//! - The storage layer uses [`Arc`](struct@std::sync::Arc) to share the stores between threads. It also means that a `store` is only instantiated once.\n//! - Some storage methods are batching multiple writes into a single transaction.\n//!\n//! ## Design Philosophy\n//!\n//! The choice of using [rocksdb](https://rocksdb.org/) as a backend was made because it matches a lot of the conditions\n//! that we were expected, such as being embedded and having good performances when reading and\n//! writing our data.\n//!\n//! Splitting storage into multiple `stores` and `tables` allows us to have a strong separation of concerns directly at the storage level.\n//!\n//! However, `RocksDB` is not the best fit when it comes to compose or filter data based on the data\n//! itself.\n//!\n//! As mentioned above, the different stores are using [`Arc`](struct@std::sync::Arc), allowing a single store to be instantiated once\n//! and then shared between threads. This is very useful when it comes to the [`FullNodeStore`](struct@fullnode::FullNodeStore) as it is used\n//! in various places but should provide single entry point to the data.\n//!\n//! It also means that the store is immutable thus can be shared easily between threads,\n//! which is a good thing for the concurrency.\n//! However, some stores are implementing the [`WriteStore`](trait@store::WriteStore) trait in order to\n//! insert or mutate data, managing locks on resources and preventing any other query to mutate the data\n//! currently in processing. For more information about the locks see [`locking`](module@fullnode::locking)\n//!\n//! The rest of the mutation on the data are handled by [rocksdb](https://rocksdb.org/) itself.\n//!\nuse serde::{Deserialize, Serialize};\nuse std::collections::HashMap;\n\nuse topos_core::{\n    types::stream::{CertificateSourceStreamPosition, CertificateTargetStreamPosition, Position},\n    uci::{CertificateId, SubnetId},\n};\n\n// v2\npub mod constant;\n/// Epoch related store\npub mod epoch;\n/// Fullnode store\npub mod fullnode;\npub mod index;\npub mod types;\npub mod validator;\n\n// v1\npub mod client;\npub mod errors;\n\n#[cfg(feature = \"rocksdb\")]\npub(crate) mod rocks;\n\n#[cfg(test)]\nmod tests;\n\npub use client::StorageClient;\n\npub mod store;\n\npub type PendingCertificateId = u64;\n\n#[derive(Debug)]\npub enum FetchCertificatesFilter {\n    Source {\n        source_stream_position: CertificateSourceStreamPosition,\n        limit: usize,\n    },\n\n    Target {\n        target_stream_position: CertificateTargetStreamPosition,\n        limit: usize,\n    },\n}\n\n#[derive(Debug)]\npub enum FetchCertificatesPosition {\n    Source(CertificateSourceStreamPosition),\n    Target(CertificateTargetStreamPosition),\n}\n\n#[derive(Debug, Clone)]\npub struct CertificatePositions {\n    pub targets: HashMap<SubnetId, CertificateTargetStreamPosition>,\n    pub source: CertificateSourceStreamPosition,\n}\n\n/// Uniquely identify the source certificate stream head of one subnet.\n/// The head represent the internal state of the TCE regarding a source subnet stream for\n/// certificates that it receives from local sequencer\n#[derive(Serialize, Deserialize, Debug, Clone)]\npub struct SourceHead {\n    /// Certificate id of the head\n    pub certificate_id: CertificateId,\n    /// Subnet id of the head\n    pub subnet_id: SubnetId,\n    /// Position of the Certificate\n    pub position: Position,\n}\n"
  },
  {
    "path": "crates/topos-tce-storage/src/rocks/constants.rs",
    "content": "//! This module is defining constant names for CFs\n\npub(crate) const PENDING_CERTIFICATES: &str = \"PENDING_CERTIFICATES\";\npub(crate) const CERTIFICATES: &str = \"CERTIFICATES\";\n\npub(crate) const SOURCE_STREAMS: &str = \"SOURCE_STREAMS\";\npub(crate) const TARGET_STREAMS: &str = \"TARGET_STREAMS\";\npub(crate) const TARGET_SOURCES: &str = \"TARGET_SOURCES\";\n\npub(crate) const TARGET_STREAMS_PREFIX_SIZE: usize = 32 * 2;\npub(crate) const SOURCE_STREAMS_PREFIX_SIZE: usize = 32;\n"
  },
  {
    "path": "crates/topos-tce-storage/src/rocks/db.rs",
    "content": "use rocksdb::MultiThreaded;\nuse std::{path::PathBuf, sync::Arc};\n\nuse rocksdb::{ColumnFamilyDescriptor, Options};\n\nuse crate::errors::InternalStorageError;\n\nuse super::constants;\n\npub(crate) type RocksDB = Arc<rocksdb::DBWithThreadMode<MultiThreaded>>;\n\npub(crate) fn init_with_cfs(\n    path: &PathBuf,\n    mut options: rocksdb::Options,\n    cfs: Vec<ColumnFamilyDescriptor>,\n) -> Result<RocksDB, InternalStorageError> {\n    options.create_missing_column_families(true);\n\n    Ok(Arc::new(\n        rocksdb::DBWithThreadMode::<MultiThreaded>::open_cf_descriptors(&options, path, cfs)?,\n    ))\n}\npub(crate) fn default_options() -> rocksdb::Options {\n    let mut options = Options::default();\n    options.create_if_missing(true);\n\n    options\n}\n\npub(crate) fn init_db(\n    path: &PathBuf,\n    options: rocksdb::Options,\n) -> Result<RocksDB, InternalStorageError> {\n    let mut options_source = default_options();\n    options_source.set_prefix_extractor(rocksdb::SliceTransform::create_fixed_prefix(\n        constants::SOURCE_STREAMS_PREFIX_SIZE,\n    ));\n\n    let mut options_target = Options::default();\n    options_target.create_if_missing(true);\n    options_target.set_prefix_extractor(rocksdb::SliceTransform::create_fixed_prefix(\n        constants::TARGET_STREAMS_PREFIX_SIZE,\n    ));\n    let cfs = vec![\n        ColumnFamilyDescriptor::new(constants::PENDING_CERTIFICATES, default_options()),\n        ColumnFamilyDescriptor::new(constants::CERTIFICATES, rocksdb::Options::default()),\n        ColumnFamilyDescriptor::new(constants::SOURCE_STREAMS, options_source),\n        ColumnFamilyDescriptor::new(constants::TARGET_STREAMS, options_target),\n        ColumnFamilyDescriptor::new(constants::TARGET_SOURCES, default_options()),\n    ];\n\n    init_with_cfs(path, options, cfs)\n}\n"
  },
  {
    "path": "crates/topos-tce-storage/src/rocks/db_column.rs",
    "content": "use std::{borrow::Borrow, marker::PhantomData, sync::Arc};\n\n#[cfg(test)]\nuse std::path::Path;\n\n#[cfg(test)]\nuse rocksdb::ColumnFamilyDescriptor;\nuse rocksdb::{\n    BoundColumnFamily, CStrLike, DBRawIteratorWithThreadMode, DBWithThreadMode, Direction,\n    IteratorMode, MultiThreaded, ReadOptions, WriteBatch,\n};\n\nuse bincode::Options;\nuse serde::{de::DeserializeOwned, Serialize};\n\nuse crate::errors::InternalStorageError;\n\nuse super::{iterator::ColumnIterator, map::Map, RocksDB};\n\n/// A DBColumn represents a CF structure\n#[derive(Clone, Debug)]\npub struct DBColumn<K, V> {\n    pub(crate) rocksdb: RocksDB,\n    _phantom: PhantomData<fn(K) -> V>,\n    cf: &'static str,\n}\n\nimpl<K, V> DBColumn<K, V> {\n    #[cfg(test)]\n    #[allow(dead_code)]\n    pub fn open<P: AsRef<Path>>(\n        path: P,\n        db_options: Option<rocksdb::Options>,\n        column: &'static str,\n    ) -> Result<Self, InternalStorageError> {\n        let mut options = db_options.unwrap_or_default();\n        let default_rocksdb_options = rocksdb::Options::default();\n\n        let primary = path.as_ref().to_path_buf();\n\n        let rocksdb = {\n            options.create_if_missing(true);\n            options.create_missing_column_families(true);\n            Arc::new(\n                rocksdb::DBWithThreadMode::<rocksdb::MultiThreaded>::open_cf_descriptors(\n                    &options,\n                    primary,\n                    vec![ColumnFamilyDescriptor::new(column, default_rocksdb_options)],\n                )?,\n            )\n        };\n\n        Ok(Self {\n            rocksdb,\n            _phantom: PhantomData,\n            cf: column,\n        })\n    }\n\n    pub fn reopen(db: &RocksDB, column: &'static str) -> Self {\n        Self {\n            rocksdb: db.clone(),\n            _phantom: PhantomData,\n            cf: column,\n        }\n    }\n\n    /// Returns the CF of the DBColumn, used to build queries.\n    pub(crate) fn cf(&self) -> Result<Arc<BoundColumnFamily<'_>>, InternalStorageError> {\n        self.rocksdb\n            .cf_handle(self.cf)\n            .ok_or(InternalStorageError::InvalidColumnFamily(self.cf))\n    }\n}\n\nimpl<K, V> DBColumn<K, V>\nwhere\n    K: DeserializeOwned + Serialize + std::fmt::Debug,\n    V: DeserializeOwned + Serialize + std::fmt::Debug,\n{\n    pub(crate) fn property_int_value(\n        &self,\n        property: impl CStrLike,\n    ) -> Result<u64, InternalStorageError> {\n        self.rocksdb\n            .property_int_value_cf(&self.cf()?, property)?\n            .ok_or(InternalStorageError::UnexpectedDBState(\n                \"Property not found\",\n            ))\n    }\n\n    /// Insert a record into the storage by passing a Key and a Value.\n    ///\n    /// Key are fixed length bincode serialized.\n    pub(crate) fn insert(&self, key: &K, value: &V) -> Result<(), InternalStorageError> {\n        let cf = self.cf()?;\n\n        let key_buf = be_fix_int_ser(key)?;\n\n        let value_buf = bincode::serialize(value)?;\n\n        self.rocksdb.put_cf(&cf, key_buf, value_buf)?;\n\n        Ok(())\n    }\n\n    /// Delete a record from the storage by passing a Key\n    ///\n    /// Key are fixed length bincode serialized.\n    pub(crate) fn delete(&self, key: &K) -> Result<(), InternalStorageError> {\n        let key_buf = be_fix_int_ser(key)?;\n\n        self.rocksdb.delete_cf(&self.cf()?, key_buf)?;\n\n        Ok(())\n    }\n\n    /// Get a record from the storage by passing a Key\n    ///\n    /// Key are fixed length bincode serialized.\n    pub(crate) fn get(&self, key: &K) -> Result<Option<V>, InternalStorageError> {\n        let key_buf = be_fix_int_ser(key)?;\n\n        self.rocksdb\n            .get_pinned_cf(&self.cf()?, key_buf)?\n            .map_or(Ok(None), |v| {\n                bincode::deserialize::<V>(&v)\n                    .map(|r| Some(r))\n                    .map_err(|_| InternalStorageError::UnableToDeserializeValue)\n            })\n    }\n\n    pub(crate) fn multi_insert(\n        &self,\n        key_value_pairs: impl IntoIterator<Item = (K, V)>,\n    ) -> Result<(), InternalStorageError> {\n        let batch = self.batch();\n\n        batch.insert_batch(self, key_value_pairs)?.write()\n    }\n\n    pub(crate) fn multi_get(&self, keys: &[K]) -> Result<Vec<Option<V>>, InternalStorageError> {\n        let keys: Result<Vec<_>, InternalStorageError> =\n            keys.iter().map(|k| be_fix_int_ser(k)).collect();\n\n        let results: Result<Vec<_>, InternalStorageError> = self\n            .rocksdb\n            .batched_multi_get_cf_opt(&self.cf()?, keys?, false, &ReadOptions::default())\n            .into_iter()\n            .map(|r| r.map_err(InternalStorageError::RocksDBError))\n            .collect();\n\n        results?\n            .into_iter()\n            .map(|e| match e {\n                Some(v) => bincode::deserialize(&v)\n                    .map_err(InternalStorageError::Bincode)\n                    .map(|v| Some(v)),\n                None => Ok(None),\n            })\n            .collect()\n    }\n\n    #[allow(unused)]\n    pub(crate) fn merge(&self, key: &K, value: V) -> Result<(), InternalStorageError> {\n        let key_buf = be_fix_int_ser(key)?;\n        let value_buf = bincode::serialize(&value)?;\n\n        Ok(self.rocksdb.merge_cf(&self.cf()?, key_buf, value_buf)?)\n    }\n\n    pub(crate) fn batch(&self) -> DBBatch {\n        DBBatch::new(&self.rocksdb)\n    }\n}\n\npub(crate) struct DBBatch {\n    rocksdb: Arc<DBWithThreadMode<MultiThreaded>>,\n    batch: WriteBatch,\n}\n\nimpl DBBatch {\n    fn new(rocksdb: &Arc<DBWithThreadMode<MultiThreaded>>) -> Self {\n        Self {\n            rocksdb: rocksdb.clone(),\n            batch: WriteBatch::default(),\n        }\n    }\n\n    pub(crate) fn insert_batch<K, V, Key, Value>(\n        mut self,\n        db: &DBColumn<K, V>,\n        values: impl IntoIterator<Item = (Key, Value)>,\n    ) -> Result<Self, InternalStorageError>\n    where\n        K: Serialize + std::fmt::Debug,\n        V: Serialize + std::fmt::Debug,\n        Key: Borrow<K>,\n        Value: Borrow<V>,\n    {\n        check_cross_batch(&self.rocksdb, &db.rocksdb)?;\n\n        values\n            .into_iter()\n            .try_for_each::<_, Result<(), InternalStorageError>>(|(k, v)| {\n                let key_buffer = be_fix_int_ser(k.borrow())?;\n                let value_buffer = bincode::serialize(v.borrow())?;\n                self.batch.put_cf(&db.cf()?, key_buffer, value_buffer);\n                Ok(())\n            })?;\n\n        Ok(self)\n    }\n\n    pub(crate) fn write(self) -> Result<(), InternalStorageError> {\n        self.rocksdb.write(self.batch)?;\n\n        Ok(())\n    }\n}\n\nimpl<'a, K, V> Map<'a, K, V> for DBColumn<K, V>\nwhere\n    K: Serialize + DeserializeOwned,\n    V: Serialize + DeserializeOwned,\n{\n    type Iterator = ColumnIterator<'a, K, V>;\n\n    fn iter(&'a self) -> Result<Self::Iterator, InternalStorageError> {\n        let mut raw_iterator = self.rocksdb.raw_iterator_cf(&self.cf()?);\n        raw_iterator.seek_to_first();\n\n        Ok(ColumnIterator::new(raw_iterator))\n    }\n\n    fn iter_at<I: Serialize>(&'a self, index: &I) -> Result<Self::Iterator, InternalStorageError> {\n        let mut raw_iterator = self.rocksdb.raw_iterator_cf(&self.cf()?);\n\n        raw_iterator.seek(be_fix_int_ser(index)?);\n        Ok(ColumnIterator::new(raw_iterator))\n    }\n\n    fn iter_with_mode(\n        &'a self,\n        mode: IteratorMode<'_>,\n    ) -> Result<Self::Iterator, InternalStorageError> {\n        let mut raw_iterator = self.rocksdb.raw_iterator_cf(&self.cf()?);\n\n        let direction = match mode {\n            IteratorMode::Start => {\n                raw_iterator.seek_to_first();\n                Direction::Forward\n            }\n            IteratorMode::End => {\n                raw_iterator.seek_to_last();\n                Direction::Forward\n            }\n            _ => unimplemented!(),\n        };\n\n        Ok(ColumnIterator::new_with_direction(raw_iterator, direction))\n    }\n\n    fn prefix_iter<P: Serialize>(\n        &'a self,\n        prefix: &P,\n    ) -> Result<Self::Iterator, InternalStorageError> {\n        let iterator = self\n            .rocksdb\n            .prefix_iterator_cf(&self.cf()?, be_fix_int_ser(prefix)?)\n            .into();\n\n        Ok(ColumnIterator::new(iterator))\n    }\n\n    fn prefix_iter_at<P: Serialize, I: Serialize>(\n        &'a self,\n        prefix: &P,\n        index: &I,\n    ) -> Result<Self::Iterator, InternalStorageError> {\n        let mut iterator: DBRawIteratorWithThreadMode<_> = self\n            .rocksdb\n            .prefix_iterator_cf(&self.cf()?, be_fix_int_ser(prefix)?)\n            .into();\n\n        iterator.seek(be_fix_int_ser(index)?);\n        Ok(ColumnIterator::new(iterator))\n    }\n}\n\n/// Serialize a value using a fix length serialize and a big endian endianness\npub(crate) fn be_fix_int_ser<S>(t: &S) -> Result<Vec<u8>, InternalStorageError>\nwhere\n    S: Serialize + ?Sized,\n{\n    Ok(bincode::DefaultOptions::new()\n        .with_big_endian()\n        .with_fixint_encoding()\n        .serialize(t)?)\n}\n\nfn check_cross_batch(base: &RocksDB, current: &RocksDB) -> Result<(), InternalStorageError> {\n    if !Arc::ptr_eq(base, current) {\n        return Err(InternalStorageError::ConcurrentDBBatchDetected);\n    }\n\n    Ok(())\n}\n"
  },
  {
    "path": "crates/topos-tce-storage/src/rocks/iterator.rs",
    "content": "use std::marker::PhantomData;\n\nuse bincode::Options;\nuse rocksdb::{DBRawIteratorWithThreadMode, DBWithThreadMode, Direction, MultiThreaded};\nuse serde::de::DeserializeOwned;\n\npub struct ColumnIterator<'a, K, V> {\n    iterator: DBRawIteratorWithThreadMode<'a, DBWithThreadMode<MultiThreaded>>,\n    direction: Direction,\n    _phantom: PhantomData<(K, V)>,\n}\n\nimpl<'a, K, V> ColumnIterator<'a, K, V> {\n    /// Creates a new ColumnIterator base on a DBRawIteratorWithThreadMode\n    pub fn new(iterator: DBRawIteratorWithThreadMode<'a, DBWithThreadMode<MultiThreaded>>) -> Self {\n        Self::new_with_direction(iterator, Direction::Forward)\n    }\n\n    pub fn new_with_direction(\n        iterator: DBRawIteratorWithThreadMode<'a, DBWithThreadMode<MultiThreaded>>,\n        direction: Direction,\n    ) -> Self {\n        Self {\n            iterator,\n            direction,\n            _phantom: PhantomData,\n        }\n    }\n}\n\nimpl<'a, K, V> Iterator for ColumnIterator<'a, K, V>\nwhere\n    K: DeserializeOwned,\n    V: DeserializeOwned,\n{\n    type Item = (K, V);\n\n    fn next(&mut self) -> Option<Self::Item> {\n        if self.iterator.valid() {\n            let config = bincode::DefaultOptions::new()\n                .with_big_endian()\n                .with_fixint_encoding();\n\n            let key = self.iterator.key().and_then(|k| config.deserialize(k).ok());\n            let value = self\n                .iterator\n                .value()\n                .and_then(|v| bincode::deserialize(v).ok());\n\n            match self.direction {\n                Direction::Forward => self.iterator.next(),\n                Direction::Reverse => self.iterator.prev(),\n            }\n\n            key.and_then(|k| value.map(|v| (k, v)))\n        } else {\n            None\n        }\n    }\n}\n"
  },
  {
    "path": "crates/topos-tce-storage/src/rocks/map.rs",
    "content": "use rocksdb::IteratorMode;\nuse serde::{de::DeserializeOwned, Serialize};\n\nuse crate::errors::InternalStorageError;\n\npub trait Map<'a, K, V>\nwhere\n    K: Serialize + DeserializeOwned + ?Sized,\n    V: Serialize + DeserializeOwned,\n{\n    type Iterator: Iterator<Item = (K, V)>;\n\n    /// Returns an Iterator over the whole CF\n    fn iter(&'a self) -> Result<Self::Iterator, InternalStorageError>;\n\n    /// Returns an Iterator over the CF starting from index\n    fn iter_at<I: Serialize>(&'a self, index: &I) -> Result<Self::Iterator, InternalStorageError>;\n\n    /// Returns an Iterator over the whole CF with mode configured\n    #[allow(dead_code)]\n    fn iter_with_mode(\n        &'a self,\n        mode: IteratorMode<'_>,\n    ) -> Result<Self::Iterator, InternalStorageError>;\n\n    /// Returns a prefixed Iterator over the CF\n    fn prefix_iter<P: Serialize>(\n        &'a self,\n        prefix: &P,\n    ) -> Result<Self::Iterator, InternalStorageError>;\n\n    /// Returns a prefixed Iterator over the CF starting from index\n    #[allow(dead_code)]\n    fn prefix_iter_at<P: Serialize, I: Serialize>(\n        &'a self,\n        prefix: &P,\n        index: &I,\n    ) -> Result<Self::Iterator, InternalStorageError>;\n}\n"
  },
  {
    "path": "crates/topos-tce-storage/src/rocks/types.rs",
    "content": "use serde::{Deserialize, Serialize};\n\nuse crate::SubnetId;\n\n#[derive(Debug, Serialize, Deserialize)]\npub(crate) struct TargetSourceListKey(\n    // Target subnet id\n    pub(crate) SubnetId,\n    // Source subnet id\n    pub(crate) SubnetId,\n);\n"
  },
  {
    "path": "crates/topos-tce-storage/src/rocks.rs",
    "content": "use self::db::RocksDB;\n\npub(crate) mod constants;\npub(crate) mod db;\npub(crate) mod db_column;\npub(crate) mod iterator;\npub(crate) mod map;\npub(crate) mod types;\n\npub(crate) use types::*;\n"
  },
  {
    "path": "crates/topos-tce-storage/src/store.rs",
    "content": "use std::collections::HashMap;\n\nuse async_trait::async_trait;\nuse topos_core::{\n    types::{stream::CertificateSourceStreamPosition, CertificateDelivered},\n    uci::{CertificateId, SubnetId},\n};\n\nuse crate::{\n    errors::StorageError, CertificatePositions, CertificateTargetStreamPosition, SourceHead,\n};\n\n/// This trait exposes common methods between\n/// [`ValidatorStore`](struct@super::validator::ValidatorStore) and\n/// [`FullNodeStore`](struct@super::fullnode::FullNodeStore) to write data.\n///\n/// All methods are `async` to allow the implementation to deal with write concurrency.\n#[async_trait]\npub trait WriteStore: Send {\n    /// Insert a [`CertificateDelivered`] in the storage. Returns its positions\n    /// in the source and target streams.\n    ///\n    /// The [`ValidatorStore`](struct@super::validator::ValidatorStore) implementation\n    /// checks for a [`PendingCertificateId`](type@super::PendingCertificateId) and remove it if\n    /// the certificate is successfully inserted.\n    async fn insert_certificate_delivered(\n        &self,\n        certificate: &CertificateDelivered,\n    ) -> Result<CertificatePositions, StorageError>;\n\n    /// Insert multiple [`CertificateDelivered`] in the storage.\n    ///\n    /// See [`insert_certificate_delivered`](fn@WriteStore::insert_certificate_delivered) for more\n    /// details\n    async fn insert_certificates_delivered(\n        &self,\n        certificates: &[CertificateDelivered],\n    ) -> Result<(), StorageError>;\n}\n\n/// This trait exposes common methods between\n/// [`ValidatorStore`](struct@super::validator::ValidatorStore) and\n/// [`FullNodeStore`](struct@super::fullnode::FullNodeStore) to read data.\npub trait ReadStore: Send {\n    /// Returns the number of certificates delivered\n    fn count_certificates_delivered(&self) -> Result<u64, StorageError>;\n\n    /// Try to get a SourceHead of a subnet\n    ///\n    /// Returns `Ok(None)` if the subnet is not found, meaning that no certificate are currently\n    /// delivered for this particular subnet.\n    fn get_source_head(&self, subnet_id: &SubnetId) -> Result<Option<SourceHead>, StorageError>;\n\n    /// Try to get a [`CertificateDelivered`]\n    ///\n    /// Returns `Ok(None)` if the certificate is not found, meaning that the certificate is either\n    /// inexisting or not yet delivered.\n    fn get_certificate(\n        &self,\n        certificate_id: &CertificateId,\n    ) -> Result<Option<CertificateDelivered>, StorageError>;\n\n    /// Try to get multiple [`CertificateDelivered`] at once.\n    ///\n    /// See [`get_certificate`](fn@ReadStore::get_certificate)\n    fn get_certificates(\n        &self,\n        certificate_ids: &[CertificateId],\n    ) -> Result<Vec<Option<CertificateDelivered>>, StorageError>;\n\n    /// Try to return the latest delivered position for a source subnet\n    fn last_delivered_position_for_subnet(\n        &self,\n        subnet_id: &SubnetId,\n    ) -> Result<Option<CertificateSourceStreamPosition>, StorageError>;\n\n    /// Returns the local checkpoint\n    ///\n    /// A `Checkpoint` is the representation of the state of delivery, it is a list of [`SubnetId`]\n    /// with the associated [`SourceHead`]\n    fn get_checkpoint(&self) -> Result<HashMap<SubnetId, SourceHead>, StorageError>;\n\n    /// Returns the certificates delivered by a source subnet from a position.\n    fn get_source_stream_certificates_from_position(\n        &self,\n        from: CertificateSourceStreamPosition,\n        limit: usize,\n    ) -> Result<Vec<(CertificateDelivered, CertificateSourceStreamPosition)>, StorageError>;\n\n    /// Returns the certificates delivered to a target subnet from a position.\n    fn get_target_stream_certificates_from_position(\n        &self,\n        position: CertificateTargetStreamPosition,\n        limit: usize,\n    ) -> Result<Vec<(CertificateDelivered, CertificateTargetStreamPosition)>, StorageError>;\n\n    /// Returns the list of source subnets that delivered certificates to a particular target subnet\n    fn get_target_source_subnet_list(\n        &self,\n        target_subnet_id: &SubnetId,\n    ) -> Result<Vec<SubnetId>, StorageError>;\n}\n"
  },
  {
    "path": "crates/topos-tce-storage/src/tests/checkpoints.rs",
    "content": "use std::{collections::HashMap, sync::Arc};\n\nuse rstest::rstest;\nuse topos_core::uci::SubnetId;\nuse topos_test_sdk::{\n    certificates::create_certificate_chain,\n    constants::{SOURCE_SUBNET_ID_1, SOURCE_SUBNET_ID_2, TARGET_SUBNET_ID_1},\n};\n\nuse super::support::store;\nuse crate::{\n    store::{ReadStore, WriteStore},\n    validator::ValidatorStore,\n};\n\n#[rstest]\n#[tokio::test]\nasync fn get_checkpoint_for_two_subnets(store: Arc<ValidatorStore>) {\n    let certificates_a = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 32);\n    let certificates_b = create_certificate_chain(SOURCE_SUBNET_ID_2, &[TARGET_SUBNET_ID_1], 24);\n\n    for cert in certificates_a {\n        _ = store.insert_certificate_delivered(&cert).await;\n    }\n\n    for cert in certificates_b {\n        _ = store.insert_certificate_delivered(&cert).await;\n    }\n\n    let checkpoint = store\n        .get_checkpoint()\n        .unwrap()\n        .into_iter()\n        .map(|(subnet, value)| (subnet, *value.position))\n        .collect::<HashMap<SubnetId, u64>>();\n\n    assert_eq!(checkpoint.len(), 2);\n    assert_eq!(*checkpoint.get(&SOURCE_SUBNET_ID_1).unwrap(), 31);\n    assert_eq!(*checkpoint.get(&SOURCE_SUBNET_ID_2).unwrap(), 23);\n}\n\n#[rstest]\n#[tokio::test]\nasync fn get_checkpoint_diff_with_no_input(store: Arc<ValidatorStore>) {\n    let certificates_a = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 32);\n    let certificates_b = create_certificate_chain(SOURCE_SUBNET_ID_2, &[TARGET_SUBNET_ID_1], 24);\n\n    for cert in certificates_a {\n        _ = store.insert_certificate_delivered(&cert).await;\n    }\n\n    for cert in certificates_b {\n        _ = store.insert_certificate_delivered(&cert).await;\n    }\n\n    let checkpoint = store\n        .get_checkpoint_diff(&[], 100)\n        .unwrap()\n        .into_iter()\n        .map(|(subnet, proofs)| {\n            (\n                subnet,\n                proofs\n                    .iter()\n                    .map(|proof| *proof.delivery_position.position)\n                    .collect::<Vec<_>>(),\n            )\n        })\n        .collect::<HashMap<SubnetId, _>>();\n\n    assert_eq!(checkpoint.len(), 2);\n    assert_eq!(\n        *checkpoint.get(&SOURCE_SUBNET_ID_1).unwrap(),\n        (0..=31).collect::<Vec<_>>()\n    );\n    assert_eq!(\n        *checkpoint.get(&SOURCE_SUBNET_ID_2).unwrap(),\n        (0..=23).collect::<Vec<_>>()\n    );\n}\n\n#[rstest]\n#[tokio::test]\nasync fn get_checkpoint_diff_with_input(store: Arc<ValidatorStore>) {\n    let certificates_a = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 32);\n    let certificates_b = create_certificate_chain(SOURCE_SUBNET_ID_2, &[TARGET_SUBNET_ID_1], 24);\n\n    let checkpoint = certificates_a.get(20).unwrap().proof_of_delivery.clone();\n    assert_eq!(*checkpoint.delivery_position.position, 20);\n\n    for cert in certificates_a {\n        _ = store.insert_certificate_delivered(&cert).await;\n    }\n\n    for cert in certificates_b {\n        _ = store.insert_certificate_delivered(&cert).await;\n    }\n\n    let checkpoint = store\n        .get_checkpoint_diff(&[checkpoint], 100)\n        .unwrap()\n        .into_iter()\n        .map(|(subnet, proofs)| {\n            (\n                subnet,\n                proofs\n                    .iter()\n                    .map(|proof| *proof.delivery_position.position)\n                    .collect::<Vec<_>>(),\n            )\n        })\n        .collect::<HashMap<SubnetId, _>>();\n\n    assert_eq!(checkpoint.len(), 2);\n    assert_eq!(\n        *checkpoint.get(&SOURCE_SUBNET_ID_1).unwrap(),\n        (21..=31).collect::<Vec<_>>()\n    );\n    assert_eq!(\n        *checkpoint.get(&SOURCE_SUBNET_ID_2).unwrap(),\n        (0..=23).collect::<Vec<_>>()\n    );\n}\n"
  },
  {
    "path": "crates/topos-tce-storage/src/tests/db_columns.rs",
    "content": "use rstest::rstest;\nuse test_log::test;\nuse topos_core::types::stream::CertificateSourceStreamPosition;\nuse topos_core::uci::Certificate;\nuse topos_test_sdk::certificates::create_certificate_at_position;\nuse topos_test_sdk::constants::SOURCE_SUBNET_ID_1;\n\nuse crate::rocks::map::Map;\nuse crate::tests::{PREV_CERTIFICATE_ID, SOURCE_STORAGE_SUBNET_ID};\nuse crate::types::{CertificatesColumn, PendingCertificatesColumn, StreamsColumn};\nuse crate::Position;\n\nuse super::support::columns::{certificates_column, pending_column, source_streams_column};\n\n#[rstest]\n#[test(tokio::test)]\nasync fn can_persist_a_pending_certificate(pending_column: PendingCertificatesColumn) {\n    let certificate =\n        Certificate::new_with_default_fields(PREV_CERTIFICATE_ID, SOURCE_SUBNET_ID_1, &[]).unwrap();\n\n    assert!(pending_column.insert(&0, &certificate).is_ok());\n    assert_eq!(pending_column.get(&0).unwrap(), Some(certificate));\n}\n\n#[rstest]\n#[test(tokio::test)]\nasync fn can_persist_a_delivered_certificate(certificates_column: CertificatesColumn) {\n    let certificate =\n        Certificate::new_with_default_fields(PREV_CERTIFICATE_ID, SOURCE_SUBNET_ID_1, &Vec::new())\n            .unwrap();\n\n    let certificate = create_certificate_at_position(Position::ZERO, certificate);\n    assert!(certificates_column\n        .insert(&certificate.certificate.id, &certificate)\n        .is_ok());\n    assert_eq!(\n        certificates_column\n            .get(&certificate.certificate.id)\n            .unwrap(),\n        Some(certificate)\n    );\n}\n\n#[rstest]\n#[test(tokio::test)]\nasync fn delivered_certificate_position_are_incremented(\n    certificates_column: CertificatesColumn,\n    source_streams_column: StreamsColumn,\n) {\n    let certificate =\n        Certificate::new_with_default_fields(PREV_CERTIFICATE_ID, SOURCE_SUBNET_ID_1, &[]).unwrap();\n\n    let certificate = create_certificate_at_position(Position::ZERO, certificate);\n    assert!(certificates_column\n        .insert(&certificate.certificate.id, &certificate)\n        .is_ok());\n    assert!(source_streams_column\n        .insert(\n            &CertificateSourceStreamPosition::new(SOURCE_STORAGE_SUBNET_ID, Position::ZERO),\n            &certificate.certificate.id\n        )\n        .is_ok());\n}\n\n#[rstest]\n#[test(tokio::test)]\nasync fn position_can_be_fetch_for_one_subnet(source_streams_column: StreamsColumn) {\n    let certificate =\n        Certificate::new_with_default_fields(PREV_CERTIFICATE_ID, SOURCE_SUBNET_ID_1, &[]).unwrap();\n\n    assert!(source_streams_column\n        .insert(\n            &CertificateSourceStreamPosition::new(SOURCE_STORAGE_SUBNET_ID, Position::ZERO),\n            &certificate.id\n        )\n        .is_ok());\n\n    assert!(matches!(\n        source_streams_column\n            .prefix_iter(&SOURCE_SUBNET_ID_1)\n            .unwrap()\n            .last(),\n        Some((\n            CertificateSourceStreamPosition {\n                position: Position::ZERO,\n                ..\n            },\n            _\n        ))\n    ));\n\n    let certificate =\n        Certificate::new_with_default_fields(PREV_CERTIFICATE_ID, SOURCE_SUBNET_ID_1, &[]).unwrap();\n\n    assert!(source_streams_column\n        .insert(\n            &CertificateSourceStreamPosition::new(SOURCE_STORAGE_SUBNET_ID, 1),\n            &certificate.id\n        )\n        .is_ok());\n\n    let expected_position: Position = 1.into();\n\n    assert!(matches!(\n        source_streams_column\n            .prefix_iter(&SOURCE_SUBNET_ID_1)\n            .unwrap()\n            .last(),\n        Some((\n            CertificateSourceStreamPosition {\n                position,\n                ..\n            },\n            _\n        )) if expected_position == position\n    ));\n}\n\n#[test(tokio::test)]\n#[ignore = \"not yet implemented\"]\nasync fn position_can_be_fetch_for_multiple_subnets() {}\n\n#[test(tokio::test)]\n#[ignore = \"not yet implemented\"]\nasync fn position_can_be_fetch_for_all_subnets() {}\n"
  },
  {
    "path": "crates/topos-tce-storage/src/tests/mod.rs",
    "content": "use rstest::rstest;\nuse std::sync::Arc;\nuse test_log::test;\nuse topos_core::{\n    types::{\n        stream::{CertificateSourceStreamPosition, CertificateTargetStreamPosition, Position},\n        CertificateDelivered, ProofOfDelivery,\n    },\n    uci::{Certificate, SubnetId},\n};\n\nuse crate::{\n    errors::StorageError,\n    rocks::map::Map,\n    store::{ReadStore, WriteStore},\n    validator::ValidatorStore,\n};\n\nuse self::support::store;\n\nuse topos_test_sdk::certificates::create_certificate_chain;\nuse topos_test_sdk::constants::*;\n\nmod checkpoints;\nmod db_columns;\nmod pending_certificates;\nmod position;\nmod rocks;\npub(crate) mod support;\n\nconst SOURCE_STORAGE_SUBNET_ID: SubnetId = SOURCE_SUBNET_ID_1;\nconst TARGET_STORAGE_SUBNET_ID_1: SubnetId = TARGET_SUBNET_ID_1;\nconst TARGET_STORAGE_SUBNET_ID_2: SubnetId = TARGET_SUBNET_ID_2;\n\n#[rstest]\n#[tokio::test]\nasync fn can_persist_a_pending_certificate(store: Arc<ValidatorStore>) {\n    let certificate =\n        Certificate::new_with_default_fields(PREV_CERTIFICATE_ID, SOURCE_SUBNET_ID_1, &[]).unwrap();\n\n    assert!(store.insert_pending_certificate(&certificate).await.is_ok());\n}\n\n#[rstest]\n#[test(tokio::test)]\nasync fn can_persist_a_delivered_certificate(store: Arc<ValidatorStore>) {\n    let certificate = Certificate::new_with_default_fields(\n        PREV_CERTIFICATE_ID,\n        SOURCE_SUBNET_ID_1,\n        &[TARGET_SUBNET_ID_1],\n    )\n    .unwrap();\n    let certificate_id = certificate.id;\n    let certificate = CertificateDelivered {\n        certificate,\n        proof_of_delivery: ProofOfDelivery {\n            delivery_position: CertificateSourceStreamPosition::new(\n                SOURCE_SUBNET_ID_1,\n                Position::ZERO,\n            ),\n            readies: vec![],\n            threshold: 0,\n            certificate_id,\n        },\n    };\n\n    store\n        .insert_certificate_delivered(&certificate)\n        .await\n        .unwrap();\n\n    let certificates_table = store.fullnode_store.perpetual_tables.certificates.clone();\n    let streams_table = store.fullnode_store.perpetual_tables.streams.clone();\n    let targets_streams_table = store.fullnode_store.index_tables.target_streams.clone();\n\n    assert!(certificates_table.get(&certificate.certificate.id).is_ok());\n\n    let stream_element = streams_table\n        .prefix_iter(&SOURCE_SUBNET_ID_1)\n        .unwrap()\n        .last()\n        .unwrap();\n\n    assert_eq!(stream_element.0.position, Position::ZERO);\n\n    let stream_element = targets_streams_table\n        .prefix_iter::<(SubnetId, SubnetId)>(&(\n            TARGET_STORAGE_SUBNET_ID_1,\n            SOURCE_STORAGE_SUBNET_ID,\n        ))\n        .unwrap()\n        .last()\n        .unwrap();\n\n    assert_eq!(stream_element.0.position, Position::ZERO);\n    assert_eq!(stream_element.1, certificate.certificate.id);\n}\n\n#[rstest]\n#[test(tokio::test)]\nasync fn cannot_persist_a_delivered_certificate_twice(store: Arc<ValidatorStore>) {\n    let certificate = Certificate::new_with_default_fields(\n        PREV_CERTIFICATE_ID,\n        SOURCE_SUBNET_ID_1,\n        &[TARGET_SUBNET_ID_1],\n    )\n    .unwrap();\n\n    let certificate_id = certificate.id;\n    let certificate = CertificateDelivered {\n        certificate,\n        proof_of_delivery: ProofOfDelivery {\n            delivery_position: CertificateSourceStreamPosition::new(\n                SOURCE_SUBNET_ID_1,\n                Position::ZERO,\n            ),\n            readies: vec![],\n            threshold: 0,\n            certificate_id,\n        },\n    };\n\n    store\n        .insert_certificate_delivered(&certificate)\n        .await\n        .unwrap();\n\n    let result = store.insert_certificate_delivered(&certificate).await;\n\n    assert!(result.is_err());\n    assert!(matches!(\n        result,\n        Err(StorageError::InternalStorage(\n            crate::errors::InternalStorageError::CertificateAlreadyExists\n        ))\n    ));\n}\n\n#[rstest]\n#[test(tokio::test)]\nasync fn cannot_persist_a_delivered_certificate_at_same_position(store: Arc<ValidatorStore>) {\n    let certificate = Certificate::new_with_default_fields(\n        PREV_CERTIFICATE_ID,\n        SOURCE_SUBNET_ID_1,\n        &[TARGET_SUBNET_ID_1],\n    )\n    .unwrap();\n\n    let certificate_2 = Certificate::new_with_default_fields(\n        PREV_CERTIFICATE_ID,\n        SOURCE_SUBNET_ID_1,\n        &[TARGET_SUBNET_ID_1, TARGET_SUBNET_ID_2],\n    )\n    .unwrap();\n\n    let certificate_id = certificate.id;\n    let certificate_id_2 = certificate_2.id;\n\n    assert_ne!(certificate_id, certificate_id_2);\n\n    let certificate = CertificateDelivered {\n        certificate,\n        proof_of_delivery: ProofOfDelivery {\n            delivery_position: CertificateSourceStreamPosition::new(\n                SOURCE_SUBNET_ID_1,\n                Position::ZERO,\n            ),\n            readies: vec![],\n            threshold: 0,\n            certificate_id,\n        },\n    };\n\n    let certificate_2 = CertificateDelivered {\n        certificate: certificate_2,\n        proof_of_delivery: ProofOfDelivery {\n            delivery_position: CertificateSourceStreamPosition::new(\n                SOURCE_SUBNET_ID_1,\n                Position::ZERO,\n            ),\n            readies: vec![],\n            threshold: 0,\n            certificate_id: certificate_id_2,\n        },\n    };\n\n    store\n        .insert_certificate_delivered(&certificate)\n        .await\n        .unwrap();\n\n    let result = store.insert_certificate_delivered(&certificate_2).await;\n\n    assert!(result.is_err());\n    assert!(matches!(\n        result,\n        Err(StorageError::InternalStorage(\n            crate::errors::InternalStorageError::CertificateAlreadyExistsAtPosition(_, _)\n        ))\n    ));\n}\n\n#[rstest]\n#[test(tokio::test)]\nasync fn delivered_certificate_are_added_to_target_stream(store: Arc<ValidatorStore>) {\n    let certificates_column = store.fullnode_store.perpetual_tables.certificates.clone();\n    let source_streams_column = store.fullnode_store.perpetual_tables.streams.clone();\n    let target_streams_column = store.fullnode_store.index_tables.target_streams.clone();\n\n    target_streams_column\n        .insert(\n            &CertificateTargetStreamPosition::new(\n                TARGET_STORAGE_SUBNET_ID_1,\n                SOURCE_STORAGE_SUBNET_ID,\n                Position::ZERO,\n            ),\n            &CERTIFICATE_ID_1,\n        )\n        .unwrap();\n\n    let certificate = Certificate::new_with_default_fields(\n        CERTIFICATE_ID_1,\n        SOURCE_SUBNET_ID_1,\n        &[TARGET_SUBNET_ID_1, TARGET_SUBNET_ID_2],\n    )\n    .unwrap();\n\n    let certificate_id = certificate.id;\n    let certificate = CertificateDelivered {\n        certificate,\n        proof_of_delivery: ProofOfDelivery {\n            delivery_position: CertificateSourceStreamPosition::new(\n                SOURCE_SUBNET_ID_1,\n                Position::ZERO,\n            ),\n            readies: vec![],\n            threshold: 0,\n            certificate_id,\n        },\n    };\n    store\n        .insert_certificate_delivered(&certificate)\n        .await\n        .unwrap();\n\n    assert!(certificates_column.get(&certificate_id).is_ok());\n\n    let stream_element = source_streams_column\n        .prefix_iter(&SOURCE_SUBNET_ID_1)\n        .unwrap()\n        .last()\n        .unwrap();\n\n    assert_eq!(stream_element.0.position, Position::ZERO);\n\n    let stream_element = target_streams_column\n        .prefix_iter(&(&TARGET_STORAGE_SUBNET_ID_1, &SOURCE_STORAGE_SUBNET_ID))\n        .unwrap()\n        .last()\n        .unwrap();\n\n    assert_eq!(*stream_element.0.position, 1);\n\n    let stream_element = target_streams_column\n        .prefix_iter(&(&TARGET_STORAGE_SUBNET_ID_2, &SOURCE_STORAGE_SUBNET_ID))\n        .unwrap()\n        .last()\n        .unwrap();\n\n    assert_eq!(stream_element.0.position, Position::ZERO);\n}\n\n#[rstest]\n#[test(tokio::test)]\nasync fn pending_certificate_are_removed_during_persist_action(store: Arc<ValidatorStore>) {\n    let pending_column = store.pending_tables.pending_pool.clone();\n\n    let certificate = Certificate::new_with_default_fields(\n        PREV_CERTIFICATE_ID,\n        SOURCE_SUBNET_ID_1,\n        &[TARGET_SUBNET_ID_1],\n    )\n    .unwrap();\n\n    let certificate_id = certificate.id;\n    let pending_id = store\n        .insert_pending_certificate(&certificate)\n        .await\n        .unwrap()\n        .unwrap();\n\n    let certificate = CertificateDelivered {\n        certificate,\n        proof_of_delivery: ProofOfDelivery {\n            certificate_id,\n            delivery_position: CertificateSourceStreamPosition::new(\n                SOURCE_SUBNET_ID_1,\n                Position::ZERO,\n            ),\n            readies: vec![],\n            threshold: 0,\n        },\n    };\n    assert!(pending_column.get(&pending_id).is_ok());\n    store\n        .insert_certificate_delivered(&certificate)\n        .await\n        .unwrap();\n\n    assert!(matches!(pending_column.get(&pending_id), Ok(None)));\n}\n\n#[rstest]\n#[test(tokio::test)]\nasync fn fetch_certificates_for_subnets(store: Arc<ValidatorStore>) {\n    let other_certificate = Certificate::new_with_default_fields(\n        PREV_CERTIFICATE_ID,\n        TARGET_SUBNET_ID_2,\n        &[TARGET_SUBNET_ID_1],\n    )\n    .unwrap();\n\n    let certificate_id = other_certificate.id;\n    let other_certificate = CertificateDelivered {\n        certificate: other_certificate,\n        proof_of_delivery: ProofOfDelivery {\n            certificate_id,\n            delivery_position: CertificateSourceStreamPosition::new(\n                TARGET_SUBNET_ID_2,\n                Position::ZERO,\n            ),\n            readies: vec![],\n            threshold: 0,\n        },\n    };\n\n    store\n        .insert_certificate_delivered(&other_certificate)\n        .await\n        .unwrap();\n\n    let mut expected_certificates =\n        create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 10)\n            .into_iter()\n            .enumerate()\n            .map(|(index, v)| CertificateDelivered {\n                certificate: v.certificate.clone(),\n                proof_of_delivery: ProofOfDelivery {\n                    certificate_id: v.certificate.id,\n                    delivery_position: CertificateSourceStreamPosition::new(\n                        SOURCE_SUBNET_ID_1,\n                        index as u64,\n                    ),\n                    readies: vec![],\n                    threshold: 0,\n                },\n            })\n            .collect::<Vec<_>>();\n\n    for cert in &expected_certificates {\n        store.insert_certificate_delivered(cert).await.unwrap();\n    }\n\n    let mut certificate_ids = store\n        .get_source_stream_certificates_from_position(\n            CertificateSourceStreamPosition::new(SOURCE_STORAGE_SUBNET_ID, Position::ZERO),\n            5,\n        )\n        .unwrap()\n        .into_iter()\n        .map(|(certificate, _)| certificate.certificate.id)\n        .collect::<Vec<_>>();\n\n    assert_eq!(5, certificate_ids.len());\n\n    let certificate_ids_second = store\n        .get_source_stream_certificates_from_position(\n            CertificateSourceStreamPosition::new(SOURCE_STORAGE_SUBNET_ID, 5),\n            5,\n        )\n        .unwrap()\n        .into_iter()\n        .map(|(certificate, _)| certificate.certificate.id)\n        .collect::<Vec<_>>();\n\n    assert_eq!(5, certificate_ids_second.len());\n\n    certificate_ids.extend(certificate_ids_second.into_iter());\n\n    let certificates = store\n        .get_certificates(&certificate_ids[..])\n        .unwrap()\n        .into_iter()\n        .flatten()\n        .collect::<Vec<_>>();\n\n    assert_eq!(expected_certificates, certificates);\n\n    let mut certificate_ids = store\n        .get_target_stream_certificates_from_position(\n            CertificateTargetStreamPosition::new(\n                TARGET_STORAGE_SUBNET_ID_1,\n                SOURCE_STORAGE_SUBNET_ID,\n                Position::ZERO,\n            ),\n            100,\n        )\n        .unwrap()\n        .into_iter()\n        .map(|(c, _)| c.certificate.id)\n        .collect::<Vec<_>>();\n\n    certificate_ids.extend(\n        store\n            .get_target_stream_certificates_from_position(\n                CertificateTargetStreamPosition::new(\n                    TARGET_STORAGE_SUBNET_ID_1,\n                    TARGET_STORAGE_SUBNET_ID_2,\n                    Position::ZERO,\n                ),\n                100,\n            )\n            .unwrap()\n            .into_iter()\n            .map(|(c, _)| c.certificate.id),\n    );\n\n    assert_eq!(11, certificate_ids.len());\n\n    let certificates = store\n        .get_certificates(&certificate_ids[..])\n        .unwrap()\n        .into_iter()\n        .flatten()\n        .collect::<Vec<_>>();\n\n    expected_certificates.push(other_certificate);\n\n    assert_eq!(expected_certificates, certificates);\n}\n\n#[rstest]\n#[test(tokio::test)]\nasync fn pending_certificate_can_be_removed(store: Arc<ValidatorStore>) {\n    let pending_column = store.pending_tables.pending_pool.clone();\n\n    let certificate = Certificate::new_with_default_fields(\n        PREV_CERTIFICATE_ID,\n        SOURCE_SUBNET_ID_1,\n        &[TARGET_SUBNET_ID_1],\n    )\n    .unwrap();\n\n    let pending_id = store\n        .insert_pending_certificate(&certificate)\n        .await\n        .unwrap()\n        .unwrap();\n\n    assert!(pending_column.get(&pending_id).is_ok());\n    store.delete_pending_certificate(&pending_id).unwrap();\n\n    assert!(matches!(pending_column.get(&pending_id), Ok(None)));\n\n    let pending_id = store\n        .insert_pending_certificate(&certificate)\n        .await\n        .unwrap()\n        .unwrap();\n\n    assert!(matches!(\n        store.insert_pending_certificate(&certificate).await,\n        Err(StorageError::InternalStorage(\n            crate::errors::InternalStorageError::CertificateAlreadyPending\n        ))\n    ));\n\n    assert!(pending_column.get(&pending_id).is_ok());\n    store.delete_pending_certificate(&pending_id).unwrap();\n\n    assert!(pending_column.iter().unwrap().next().is_none());\n}\n\n#[rstest]\n#[test(tokio::test)]\nasync fn get_source_head_for_subnet(store: Arc<ValidatorStore>) {\n    let expected_certificates_for_source_subnet_1: Vec<_> =\n        create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_2], 10);\n\n    store\n        .insert_certificates_delivered(&expected_certificates_for_source_subnet_1[..])\n        .await\n        .unwrap();\n\n    let expected_certificates_for_source_subnet_2 =\n        create_certificate_chain(SOURCE_SUBNET_ID_2, &[TARGET_SUBNET_ID_2], 10);\n\n    store\n        .insert_certificates_delivered(&expected_certificates_for_source_subnet_2[..])\n        .await\n        .unwrap();\n\n    let last_certificate_source_subnet_1 =\n        store.get_source_head(&SOURCE_SUBNET_ID_1).unwrap().unwrap();\n    let last_certificate_source_subnet_2 =\n        store.get_source_head(&SOURCE_SUBNET_ID_2).unwrap().unwrap();\n\n    assert_eq!(\n        expected_certificates_for_source_subnet_1\n            .last()\n            .unwrap()\n            .certificate\n            .id,\n        last_certificate_source_subnet_1.certificate_id\n    );\n    assert_eq!(9, *last_certificate_source_subnet_1.position); //check position\n    assert_eq!(\n        expected_certificates_for_source_subnet_2\n            .last()\n            .unwrap()\n            .certificate\n            .id,\n        last_certificate_source_subnet_2.certificate_id\n    );\n    assert_eq!(9, *last_certificate_source_subnet_2.position); //check position\n\n    let certificate = Certificate::new_with_default_fields(\n        expected_certificates_for_source_subnet_1\n            .last()\n            .unwrap()\n            .certificate\n            .id,\n        SOURCE_SUBNET_ID_1,\n        &[TARGET_SUBNET_ID_1],\n    )\n    .unwrap();\n\n    let new_certificate_source_subnet_1 = CertificateDelivered {\n        certificate: certificate.clone(),\n        proof_of_delivery: ProofOfDelivery {\n            certificate_id: certificate.id,\n            delivery_position: CertificateSourceStreamPosition::new(SOURCE_SUBNET_ID_1, 10),\n            readies: vec![],\n            threshold: 0,\n        },\n    };\n\n    store\n        .insert_certificate_delivered(&new_certificate_source_subnet_1)\n        .await\n        .unwrap();\n\n    let last_certificate_subnet_1 = store.get_source_head(&SOURCE_SUBNET_ID_1).unwrap().unwrap();\n\n    assert_eq!(\n        new_certificate_source_subnet_1.certificate.id,\n        last_certificate_subnet_1.certificate_id\n    );\n    assert_eq!(10, *last_certificate_subnet_1.position); //check position\n\n    let other_certificate_2 = Certificate::new_with_default_fields(\n        new_certificate_source_subnet_1.certificate.id,\n        SOURCE_SUBNET_ID_1,\n        &[TARGET_SUBNET_ID_2, TARGET_SUBNET_ID_1],\n    )\n    .unwrap();\n    let other_certificate_2 = CertificateDelivered {\n        certificate: other_certificate_2.clone(),\n        proof_of_delivery: ProofOfDelivery {\n            certificate_id: other_certificate_2.id,\n            delivery_position: CertificateSourceStreamPosition::new(SOURCE_SUBNET_ID_1, 11),\n            readies: vec![],\n            threshold: 0,\n        },\n    };\n\n    store\n        .insert_certificate_delivered(&other_certificate_2)\n        .await\n        .unwrap();\n\n    let last_certificate_subnet_2 = store.get_source_head(&SOURCE_SUBNET_ID_1).unwrap().unwrap();\n    assert_eq!(\n        other_certificate_2.certificate.id,\n        last_certificate_subnet_2.certificate_id\n    );\n    assert_eq!(11, *last_certificate_subnet_2.position); //check position\n}\n\n#[rstest]\n#[test(tokio::test)]\nasync fn get_pending_certificates(store: Arc<ValidatorStore>) {\n    let certificates_for_source_subnet_1 =\n        create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_2], 15);\n    let certificates_for_source_subnet_2 =\n        create_certificate_chain(SOURCE_SUBNET_ID_2, &[TARGET_SUBNET_ID_2], 15);\n\n    // Persist the first 10 Cert of each Subnets\n    store\n        .insert_certificates_delivered(&certificates_for_source_subnet_1[..10])\n        .await\n        .unwrap();\n    store\n        .insert_certificates_delivered(&certificates_for_source_subnet_2[..10])\n        .await\n        .unwrap();\n\n    let mut expected_pending_certificates = certificates_for_source_subnet_1[10..]\n        .iter()\n        .enumerate()\n        .map(|(index, certificate)| ((index as u64 + 1), certificate.certificate.clone()))\n        .collect::<Vec<_>>();\n\n    expected_pending_certificates.extend(\n        certificates_for_source_subnet_2[10..]\n            .iter()\n            .enumerate()\n            .map(|(index, certificate)| {\n                (\n                    (index as u64 + 1) + expected_pending_certificates.len() as u64,\n                    certificate.certificate.clone(),\n                )\n            })\n            .collect::<Vec<_>>(),\n    );\n\n    // Add the last 5 cert of each Subnet as pending certificate\n    store\n        .insert_pending_certificates(\n            &certificates_for_source_subnet_1[10..]\n                .iter()\n                .map(|certificate| certificate.certificate.clone())\n                .collect::<Vec<_>>(),\n        )\n        .unwrap();\n\n    store\n        .insert_pending_certificates(\n            &certificates_for_source_subnet_2[10..]\n                .iter()\n                .map(|certificate| certificate.certificate.clone())\n                .collect::<Vec<_>>(),\n        )\n        .unwrap();\n\n    let pending_certificates = store.iter_pending_pool().unwrap().collect::<Vec<_>>();\n    assert_eq!(\n        expected_pending_certificates.len(),\n        pending_certificates.len()\n    );\n    assert_eq!(expected_pending_certificates, pending_certificates);\n\n    // Remove some pending certificates, check again\n    let cert_to_remove = expected_pending_certificates.remove(5);\n    store.delete_pending_certificate(&cert_to_remove.0).unwrap();\n\n    let cert_to_remove = expected_pending_certificates.remove(8);\n    store.delete_pending_certificate(&cert_to_remove.0).unwrap();\n\n    let pending_certificates = store.iter_pending_pool().unwrap().collect::<Vec<_>>();\n    assert_eq!(\n        expected_pending_certificates.len(),\n        pending_certificates.len()\n    );\n    assert_eq!(expected_pending_certificates, pending_certificates);\n}\n\n#[rstest]\n#[tokio::test]\nasync fn fetch_source_subnet_certificates_in_order(store: Arc<ValidatorStore>) {\n    let certificates_for_source_subnet_1 =\n        create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_2], 10);\n    // Persist the first 10 Cert of each Subnets\n    store\n        .insert_certificates_delivered(&certificates_for_source_subnet_1[..10])\n        .await\n        .unwrap();\n\n    let res = store\n        .get_source_stream_certificates_from_position(\n            crate::CertificateSourceStreamPosition {\n                subnet_id: SOURCE_SUBNET_ID_1,\n                position: Position::ZERO,\n            },\n            100,\n        )\n        .unwrap();\n\n    let mut prev = PREV_CERTIFICATE_ID;\n\n    for (index, (cert, position)) in res.iter().enumerate() {\n        let cert = &cert.certificate;\n        assert_eq!(cert.prev_id, prev);\n        assert!(matches!(\n            position,\n            CertificateSourceStreamPosition {\n                subnet_id: SOURCE_SUBNET_ID_1,\n                position: current_pos\n            } if **current_pos == index as u64\n        ));\n\n        prev = cert.id;\n    }\n}\n"
  },
  {
    "path": "crates/topos-tce-storage/src/tests/pending_certificates.rs",
    "content": "use std::{sync::Arc, time::Duration};\n\nuse rstest::rstest;\nuse topos_core::uci::{Certificate, INITIAL_CERTIFICATE_ID};\nuse topos_test_sdk::{\n    certificates::{create_certificate_at_position, create_certificate_chain},\n    constants::{SOURCE_SUBNET_ID_1, TARGET_SUBNET_ID_1},\n};\n\nuse super::support::store;\nuse crate::{store::WriteStore, validator::ValidatorStore};\n\n#[rstest]\n#[tokio::test]\nasync fn adding_genesis_pending_certificate(store: Arc<ValidatorStore>) {\n    let certificate = Certificate::new_with_default_fields(\n        INITIAL_CERTIFICATE_ID,\n        SOURCE_SUBNET_ID_1,\n        &[TARGET_SUBNET_ID_1],\n    )\n    .unwrap();\n\n    let pending_id = store\n        .insert_pending_certificate(&certificate)\n        .await\n        .unwrap()\n        .unwrap();\n\n    assert_eq!(\n        store.get_pending_certificate(&pending_id).unwrap().unwrap(),\n        certificate\n    );\n\n    assert_eq!(\n        store.get_pending_id(&certificate.id).unwrap().unwrap(),\n        pending_id\n    );\n}\n\n#[rstest]\n#[tokio::test]\nasync fn adding_pending_certificate_with_precedence_check_fail(store: Arc<ValidatorStore>) {\n    let initial_certificate_delivered = create_certificate_at_position::default();\n\n    let certificate = Certificate::new_with_default_fields(\n        initial_certificate_delivered.certificate.id,\n        SOURCE_SUBNET_ID_1,\n        &[TARGET_SUBNET_ID_1],\n    )\n    .unwrap();\n\n    assert!(store\n        .insert_pending_certificate(&certificate)\n        .await\n        .unwrap()\n        .is_none());\n\n    assert!(store.get_pending_id(&certificate.id).unwrap().is_none());\n    assert!(store\n        .pending_tables\n        .precedence_pool\n        .get(&certificate.prev_id)\n        .unwrap()\n        .is_some());\n    store\n        .insert_certificate_delivered(&initial_certificate_delivered)\n        .await\n        .unwrap();\n\n    let pending_id = store.get_pending_id(&certificate.id).unwrap().unwrap();\n\n    assert_eq!(\n        store.get_pending_certificate(&pending_id).unwrap().unwrap(),\n        certificate\n    );\n}\n\n#[rstest]\n#[tokio::test]\nasync fn adding_pending_certificate_already_delivered(store: Arc<ValidatorStore>) {\n    let initial_certificate_delivered = create_certificate_at_position::default();\n\n    store\n        .insert_certificate_delivered(&initial_certificate_delivered)\n        .await\n        .unwrap();\n\n    assert!(store\n        .insert_pending_certificate(&initial_certificate_delivered.certificate)\n        .await\n        .is_err());\n}\n\n/// This test is covering a corner case which involves the delivery of a prev certificate\n/// and a child certificate.\n///\n/// The scenario is this one:\n/// - A `prev` certificate (`C1`) has been delivered (by the broadcast) and need to be persisted\n///   The persist method will hold a lock while performing multiple insert/delete to avoid\n///   insert race condition.\n/// - At the same time, another node is sending a certificate (`C2`) which have `C1` as `prev_id`.\n///   `C2` is looking at the storage to find if the `prev_id` `C1` is delivered but find nothing as\n///   the `persist` method is still working at creating the `WriteBatch`. It led the node to put\n///   `C2` in the `precedence_pool` waiting for `C1` to be delivered while it is in fact already\n///   delivered.\n///\n/// To avoid that and as a first step, when trying to insert a certificate in the pending pool,\n/// The node will try to acquire a lock guard on the certificate but also on the prev_id.\nmod concurrency {\n    use crate::errors::StorageError;\n\n    use super::*;\n\n    #[rstest]\n    #[tokio::test]\n    async fn adding_pending_certificate_but_prev_fail(store: Arc<ValidatorStore>) {\n        let mut certs = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 2);\n        let cert = certs.pop().unwrap();\n        let parent = certs.pop().unwrap();\n\n        assert!(certs.is_empty());\n\n        // The lock guard simulate the start of the certificate insertion in the table.\n        let lock_guard_certificate = store\n            .fullnode_store\n            .certificate_lock_guard(parent.certificate.id)\n            .await;\n\n        tokio::spawn(async move {\n            tokio::time::sleep(Duration::from_millis(100)).await;\n            // Drop the lock_guard of the prev_id without inserting it\n            drop(lock_guard_certificate);\n        });\n\n        assert!(matches!(\n            store.insert_pending_certificate(&cert.certificate).await,\n            Ok(None)\n        ));\n    }\n\n    #[rstest]\n    #[tokio::test]\n    async fn certificate_in_delivery(store: Arc<ValidatorStore>) {\n        let mut certs = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 1);\n        let cert = certs.pop().unwrap();\n\n        assert!(certs.is_empty());\n\n        // The lock guard simulate the start of the certificate insertion in the table.\n        let lock_guard_subnet = store\n            .fullnode_store\n            .subnet_lock_guard(cert.certificate.source_subnet_id)\n            .await;\n\n        tokio::spawn(async move {\n            tokio::time::sleep(Duration::from_millis(200)).await;\n            // Drop the lock_guard of the certificate without inserting it\n            drop(lock_guard_subnet);\n        });\n\n        let store_deliver = store.clone();\n        let delivered = cert.clone();\n        tokio::spawn(async move {\n            _ = store_deliver\n                .insert_certificate_delivered(&delivered)\n                .await\n                .unwrap();\n        });\n\n        tokio::time::sleep(Duration::from_millis(100)).await;\n        assert!(matches!(\n            store.insert_pending_certificate(&cert.certificate).await,\n            Err(StorageError::InternalStorage(\n                crate::errors::InternalStorageError::CertificateAlreadyExists\n            ))\n        ));\n    }\n\n    #[rstest]\n    #[tokio::test]\n    async fn prev_certificate_in_delivery(store: Arc<ValidatorStore>) {\n        let mut certs = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 2);\n        let cert = certs.pop().unwrap();\n        let prev = certs.pop().unwrap();\n\n        assert!(certs.is_empty());\n\n        // The lock guard simulate the start of the certificate insertion in the table.\n        let lock_guard_subnet = store\n            .fullnode_store\n            .subnet_lock_guard(cert.certificate.source_subnet_id)\n            .await;\n\n        tokio::spawn(async move {\n            tokio::time::sleep(Duration::from_millis(200)).await;\n            // Drop the lock_guard of the certificate without inserting it\n            drop(lock_guard_subnet);\n        });\n\n        let store_deliver = store.clone();\n        tokio::spawn(async move {\n            _ = store_deliver\n                .insert_certificate_delivered(&prev)\n                .await\n                .unwrap();\n        });\n\n        tokio::time::sleep(Duration::from_millis(100)).await;\n        assert!(matches!(\n            store.insert_pending_certificate(&cert.certificate).await,\n            Ok(Some(_))\n        ));\n    }\n}\n"
  },
  {
    "path": "crates/topos-tce-storage/src/tests/position.rs",
    "content": "use test_log::test;\n\n#[test(tokio::test)]\n#[ignore = \"not yet implemented\"]\nasync fn position_can_be_fetch_for_multiple_subnets() {}\n\n#[test(tokio::test)]\n#[ignore = \"not yet implemented\"]\nasync fn position_can_be_fetch_for_all_subnets() {}\n"
  },
  {
    "path": "crates/topos-tce-storage/src/tests/rocks.rs",
    "content": "use std::thread;\n\nuse rstest::rstest;\n\nuse crate::rocks::db_column::DBColumn;\nuse crate::tests::support::database_name;\nuse crate::tests::support::rocks_db;\n\n#[cfg(test)]\nuse test_log::test;\n\n#[rstest]\n#[test(tokio::test)]\nasync fn create_batch_multithread(database_name: &'static str) {\n    let db = rocks_db(database_name);\n    let column: DBColumn<String, String> = DBColumn::reopen(&db, \"default\");\n\n    let column_clone = column.clone();\n\n    let batch = column\n        .batch()\n        .insert_batch(\n            &column,\n            [(\"key1\", \"thread_1_value\"), (\"key2\", \"thread_1_value\")]\n                .map(|(k, v)| (k.to_string(), v.to_string())),\n        )\n        .unwrap();\n\n    let join = thread::spawn(move || {\n        let column = column_clone;\n        column\n            .batch()\n            .insert_batch(\n                &column,\n                [(\"key1\", \"thread_2_value\"), (\"key2\", \"thread_2_value\")]\n                    .map(|(k, v)| (k.to_string(), v.to_string())),\n            )\n            .unwrap()\n    });\n\n    batch.write().unwrap();\n\n    assert_eq!(\n        column.get(&\"key1\".to_string()).unwrap().unwrap(),\n        \"thread_1_value\"\n    );\n\n    join.join().unwrap().write().unwrap();\n\n    assert_eq!(\n        column.get(&\"key1\".to_string()).unwrap().unwrap(),\n        \"thread_2_value\"\n    );\n}\n"
  },
  {
    "path": "crates/topos-tce-storage/src/tests/support/columns.rs",
    "content": "use rstest::fixture;\n\nuse crate::rocks::{constants, db_column::DBColumn};\nuse crate::types::{\n    CertificatesColumn, PendingCertificatesColumn, StreamsColumn, TargetSourceListColumn,\n    TargetStreamsColumn,\n};\n\nuse super::database_name;\nuse super::rocks_db;\n\n#[fixture]\npub(crate) fn pending_column(database_name: &'static str) -> PendingCertificatesColumn {\n    DBColumn::reopen(&rocks_db(database_name), constants::PENDING_CERTIFICATES)\n}\n\n#[fixture]\npub(crate) fn certificates_column(database_name: &'static str) -> CertificatesColumn {\n    DBColumn::reopen(&rocks_db(database_name), constants::CERTIFICATES)\n}\n\n#[fixture]\npub(crate) fn source_streams_column(database_name: &'static str) -> StreamsColumn {\n    DBColumn::reopen(&rocks_db(database_name), constants::SOURCE_STREAMS)\n}\n\n#[fixture]\npub(crate) fn target_streams_column(database_name: &'static str) -> TargetStreamsColumn {\n    DBColumn::reopen(&rocks_db(database_name), constants::TARGET_STREAMS)\n}\n\n#[fixture]\npub(crate) fn target_source_list_column(database_name: &'static str) -> TargetSourceListColumn {\n    DBColumn::reopen(&rocks_db(database_name), constants::TARGET_SOURCES)\n}\n"
  },
  {
    "path": "crates/topos-tce-storage/src/tests/support/folder.rs",
    "content": "use std::{\n    fs,\n    path::{Path, PathBuf},\n    thread,\n};\n\nuse rstest::fixture;\n\n#[fixture]\npub(crate) fn random_path() -> Box<PathBuf> {\n    let temp_dir = topos_test_sdk::storage::create_folder(thread::current().name().unwrap());\n    Box::new(temp_dir)\n}\n\npub(crate) fn created_folder(random_path: &Path) {\n    fs::create_dir_all(random_path).unwrap();\n}\n"
  },
  {
    "path": "crates/topos-tce-storage/src/tests/support/mod.rs",
    "content": "use std::{\n    collections::HashMap,\n    path::PathBuf,\n    str::FromStr,\n    sync::{Arc, Mutex},\n    thread,\n};\n\nuse once_cell::sync::Lazy;\nuse rocksdb::Options;\nuse rstest::fixture;\nuse topos_test_sdk::storage::create_folder;\n\nuse crate::{\n    epoch::{EpochValidatorsStore, ValidatorPerEpochStore},\n    fullnode::FullNodeStore,\n    index::IndexTables,\n    rocks::{db::init_db, db::RocksDB},\n    validator::{ValidatorPerpetualTables, ValidatorStore},\n};\n\nuse self::folder::created_folder;\n\npub(crate) mod columns;\npub(crate) mod folder;\n\npub(crate) static DB: Lazy<Mutex<HashMap<&'static str, Arc<RocksDB>>>> =\n    Lazy::new(|| Mutex::new(HashMap::new()));\n\n#[fixture]\npub(crate) fn database_name() -> &'static str {\n    Box::leak(Box::new(\n        topos_test_sdk::storage::create_folder(thread::current().name().unwrap())\n            .to_str()\n            .unwrap()\n            .replace(\"::\", \"_\"),\n    ))\n}\n\n#[fixture]\npub(crate) fn store() -> Arc<ValidatorStore> {\n    let temp_dir = create_folder::default();\n    let perpetual_tables = Arc::new(ValidatorPerpetualTables::open(&temp_dir));\n    let index_tables = Arc::new(IndexTables::open(&temp_dir));\n\n    let participants_store =\n        EpochValidatorsStore::new(&temp_dir).expect(\"Unable to create Participant store\");\n\n    let epoch_store =\n        ValidatorPerEpochStore::new(0, &temp_dir).expect(\"Unable to create Per epoch store\");\n\n    let store = FullNodeStore::open(\n        epoch_store,\n        participants_store,\n        perpetual_tables,\n        index_tables,\n    )\n    .expect(\"Unable to create full node store\");\n\n    ValidatorStore::open(&temp_dir, store).unwrap()\n}\n\n#[fixture]\npub(crate) fn rocks_db(database_name: &'static str) -> Arc<RocksDB> {\n    let mut dbs = DB.lock().unwrap();\n\n    dbs.entry(database_name)\n        .or_insert_with(|| {\n            let path = PathBuf::from_str(database_name).unwrap();\n            created_folder(&path);\n            let mut options = Options::default();\n            options.create_if_missing(true);\n            options.create_missing_column_families(true);\n\n            Arc::new(init_db(&path, options).unwrap())\n        })\n        .clone()\n}\n"
  },
  {
    "path": "crates/topos-tce-storage/src/types.rs",
    "content": "use topos_core::{\n    api::grpc::checkpoints::SourceStreamPosition,\n    types::{\n        stream::{CertificateSourceStreamPosition, CertificateTargetStreamPosition, Position},\n        CertificateDelivered, Ready, Signature,\n    },\n    uci::{Certificate, CertificateId},\n};\n\nuse crate::{\n    rocks::{db_column::DBColumn, TargetSourceListKey},\n    CertificatePositions, PendingCertificateId,\n};\n\npub type Echo = String;\n\npub type CertificateSequenceNumber = u64;\npub type EpochId = u64;\npub type Validators = Vec<String>;\n\n/// Column that keeps certificates that are not yet delivered\npub(crate) type PendingCertificatesColumn = DBColumn<u64, Certificate>;\n/// Column that keeps list of all certificates retrievable by their id\npub(crate) type CertificatesColumn = DBColumn<CertificateId, CertificateDelivered>;\n/// Column that keeps list of certificates received from particular subnet and\n/// maps (source subnet id, source certificate position) to certificate id\npub(crate) type StreamsColumn = DBColumn<CertificateSourceStreamPosition, CertificateId>;\n/// Column that keeps list of certificates that are delivered to target subnet,\n/// and maps their target (target subnet, source subnet and position/count per source subnet)\n/// to certificate id\npub(crate) type TargetStreamsColumn = DBColumn<CertificateTargetStreamPosition, CertificateId>;\n/// Keeps position for particular target subnet id <- source subnet id column in TargetStreamsColumn\npub(crate) type TargetSourceListColumn = DBColumn<TargetSourceListKey, Position>;\n\n#[derive(Debug, Clone)]\npub enum PendingResult {\n    AlreadyDelivered,\n    AlreadyPending,\n    AwaitPrecedence,\n    InPending(PendingCertificateId),\n}\n\n#[derive(Debug, Clone)]\npub struct CertificateDeliveredWithPositions(pub CertificateDelivered, pub CertificatePositions);\n\n#[allow(unused)]\npub struct EpochSummary {\n    epoch_id: EpochId,\n    start_checkpoint: VerifiedCheckpointSummary,\n    end_checkpoint: Option<VerifiedCheckpointSummary>,\n}\n\n#[allow(unused)]\npub struct CheckpointSummary {\n    epoch: EpochId,\n    sequence_number: usize,\n    checkpoint_data: Vec<SourceStreamPosition>,\n}\n#[allow(unused)]\npub struct VerifiedCheckpointSummary(CheckpointSummary, ValidatorQuorumSignatureInfo);\n\n#[allow(unused)]\npub struct ValidatorQuorumSignatureInfo {\n    epoch: EpochId,\n    signature: [u8; 32],\n}\n\n#[allow(unused)]\npub struct BroadcastState {\n    echoes: Vec<(Echo, Signature)>,\n    readies: Vec<(Ready, Signature)>,\n    delivered: bool,\n}\n"
  },
  {
    "path": "crates/topos-tce-storage/src/validator/mod.rs",
    "content": "//! Validator's context store and storage\n//!\n//! The [`ValidatorStore`] is responsible for managing the various kind of data that are required by the\n//! TCE network in order to broadcast certificates. It is composed of two main parts:\n//!\n//! - a [`FullNodeStore`]\n//! - a [`ValidatorPendingTables`]\n//!\n//! ## Responsibilities\n//!\n//! This store is used in places where the [`FullNodeStore`] is not enough, it allows to access the\n//! different pending pools and to manage them but also to access the [`FullNodeStore`] in order to\n//! persist or update [`Certificate`] or `streams`.\n//!\n//! Pending pools and their behavior are described in the [`ValidatorPendingTables`] documentation.\n//!\nuse std::{\n    collections::HashMap,\n    path::Path,\n    sync::{atomic::Ordering, Arc},\n};\n\nuse async_trait::async_trait;\n\nuse rocksdb::properties::ESTIMATE_NUM_KEYS;\nuse topos_core::{\n    types::{\n        stream::{CertificateSourceStreamPosition, Position},\n        CertificateDelivered, ProofOfDelivery,\n    },\n    uci::{Certificate, CertificateId, SubnetId, INITIAL_CERTIFICATE_ID},\n};\nuse topos_metrics::{STORAGE_PENDING_POOL_COUNT, STORAGE_PRECEDENCE_POOL_COUNT};\nuse tracing::{debug, error, info, instrument, warn};\n\nuse crate::{\n    errors::{InternalStorageError, StorageError},\n    fullnode::FullNodeStore,\n    rocks::map::Map,\n    store::{ReadStore, WriteStore},\n    CertificatePositions, CertificateTargetStreamPosition, PendingCertificateId, SourceHead,\n};\n\npub use self::tables::ValidatorPendingTables;\npub use self::tables::ValidatorPerpetualTables;\n\nmod tables;\n\n/// Store to manage Validator data\n///\n/// The [`ValidatorStore`] is composed of a [`FullNodeStore`] and a [`ValidatorPendingTables`].\n///\n/// As the [`FullNodeStore`] is responsible of keeping and managing data that are persistent,\n/// the [`ValidatorStore`] is delegating to it many of the [`WriteStore`] and [`ReadStore`]\n/// functionality.\n///\n/// The key point is that the [`ValidatorStore`] is managing the different pending pools using a [`ValidatorPendingTables`].\n///\n/// Pending pools and how they behave are described in the [`ValidatorPendingTables`] documentation.\n///\npub struct ValidatorStore {\n    pub(crate) pending_tables: ValidatorPendingTables,\n    pub(crate) fullnode_store: Arc<FullNodeStore>,\n}\n\nimpl ValidatorStore {\n    /// Try to create a new instance of [`ValidatorStore`] based on the given path\n    pub fn new(path: &Path) -> Result<Arc<Self>, StorageError> {\n        let fullnode_store = FullNodeStore::new(path)?;\n\n        Self::open(path, fullnode_store)\n    }\n\n    /// Open a [`ValidatorStore`] at the given `path` and using the given [`FullNodeStore`]\n    pub fn open(\n        path: &Path,\n        fullnode_store: Arc<FullNodeStore>,\n    ) -> Result<Arc<Self>, StorageError> {\n        let pending_tables: ValidatorPendingTables = ValidatorPendingTables::open(path);\n\n        let store = Arc::new(Self {\n            pending_tables,\n            fullnode_store,\n        });\n\n        store.pending_tables.pending_pool.rocksdb.compact_range_cf(\n            &store.pending_tables.pending_pool.cf()?,\n            None::<&[u8]>,\n            None::<&[u8]>,\n        );\n        store\n            .pending_tables\n            .precedence_pool\n            .rocksdb\n            .compact_range_cf(\n                &store.pending_tables.precedence_pool.cf()?,\n                None::<&[u8]>,\n                None::<&[u8]>,\n            );\n\n        let pending_count: i64 = store.pending_pool_size()?.try_into().map_err(|error| {\n            error!(\"Failed to convert estimate-num-keys to i64: {}\", error);\n            StorageError::InternalStorage(InternalStorageError::UnexpectedDBState(\n                \"Failed to convert estimate-num-keys to i64\",\n            ))\n        })?;\n\n        let precedence_count: i64 = store.precedence_pool_size()?.try_into().map_err(|error| {\n            error!(\"Failed to convert estimate-num-keys to i64: {}\", error);\n            StorageError::InternalStorage(InternalStorageError::UnexpectedDBState(\n                \"Failed to convert estimate-num-keys to i64\",\n            ))\n        })?;\n\n        STORAGE_PENDING_POOL_COUNT.set(pending_count);\n        STORAGE_PRECEDENCE_POOL_COUNT.set(precedence_count);\n\n        Ok(store)\n    }\n\n    /// Returns the [`FullNodeStore`] used by the [`ValidatorStore`]\n    pub fn fullnode_store(&self) -> Arc<FullNodeStore> {\n        self.fullnode_store.clone()\n    }\n\n    /// Returns the number of certificates in the pending pool\n    pub fn pending_pool_size(&self) -> Result<u64, StorageError> {\n        Ok(self\n            .pending_tables\n            .pending_pool\n            .property_int_value(ESTIMATE_NUM_KEYS)?)\n    }\n\n    /// Returns the number of certificates in the precedence pool\n    pub fn precedence_pool_size(&self) -> Result<u64, StorageError> {\n        Ok(self\n            .pending_tables\n            .precedence_pool\n            .property_int_value(ESTIMATE_NUM_KEYS)?)\n    }\n\n    /// Try to return the [`PendingCertificateId`] for a [`CertificateId`]\n    ///\n    /// Return `Ok(None)` if the `certificate_id` is not found.\n    pub fn get_pending_id(\n        &self,\n        certificate_id: &CertificateId,\n    ) -> Result<Option<PendingCertificateId>, StorageError> {\n        Ok(self.pending_tables.pending_pool_index.get(certificate_id)?)\n    }\n\n    /// Try to return the [`Certificate`] for a [`PendingCertificateId`]\n    ///\n    /// Return `Ok(None)` if the `pending_id` is not found.\n    pub fn get_pending_certificate(\n        &self,\n        pending_id: &PendingCertificateId,\n    ) -> Result<Option<Certificate>, StorageError> {\n        Ok(self.pending_tables.pending_pool.get(pending_id)?)\n    }\n\n    /// Returns an iterator over the pending pool\n    ///\n    /// Note: this can be slow on large datasets.\n    #[doc(hidden)]\n    pub fn iter_pending_pool(\n        &self,\n    ) -> Result<impl Iterator<Item = (PendingCertificateId, Certificate)> + '_, StorageError> {\n        Ok(self.pending_tables.pending_pool.iter()?)\n    }\n\n    /// Returns an iterator over the pending pool starting at a given `PendingCertificateId`\n    ///\n    /// Note: this can be slow on large datasets.\n    #[doc(hidden)]\n    pub fn iter_pending_pool_at(\n        &self,\n        pending_id: &PendingCertificateId,\n    ) -> Result<impl Iterator<Item = (PendingCertificateId, Certificate)> + '_, StorageError> {\n        Ok(self.pending_tables.pending_pool.iter_at(pending_id)?)\n    }\n\n    /// Returns an iterator over the precedence pool\n    ///\n    /// Note: this can be slow on large datasets.\n    #[doc(hidden)]\n    pub fn iter_precedence_pool(\n        &self,\n    ) -> Result<impl Iterator<Item = (CertificateId, Certificate)> + '_, StorageError> {\n        Ok(self.pending_tables.precedence_pool.iter()?)\n    }\n\n    pub fn get_next_pending_certificates(\n        &self,\n        from: &PendingCertificateId,\n        number: usize,\n    ) -> Result<Vec<(PendingCertificateId, Certificate)>, StorageError> {\n        debug!(\n            \"Get next pending certificates from {} (max: {})\",\n            from, number\n        );\n        Ok(self\n            .pending_tables\n            .pending_pool\n            .iter_at(from)?\n            .take(number)\n            .collect())\n    }\n\n    /// Returns the [Certificate] (if any) that is currently in the precedence pool for the given [CertificateId]\n    pub fn check_precedence(\n        &self,\n        certificate_id: &CertificateId,\n    ) -> Result<Option<Certificate>, StorageError> {\n        Ok(self.pending_tables.precedence_pool.get(certificate_id)?)\n    }\n\n    // TODO: Performance issue on this one as we iter over all the pending certificates\n    // We need to improve how we request the pending certificates.\n    pub fn get_pending_certificates_for_subnets(\n        &self,\n        subnets: &[SubnetId],\n    ) -> Result<HashMap<SubnetId, (u64, Option<Certificate>)>, StorageError> {\n        let mut result: HashMap<SubnetId, (u64, Option<Certificate>)> = subnets\n            .iter()\n            .enumerate()\n            .map(|(_, s)| (*s, (0, None)))\n            .collect();\n\n        for (_, certificate) in self.pending_tables.pending_pool.iter()? {\n            if !subnets.contains(&certificate.source_subnet_id) {\n                continue;\n            }\n\n            let mut latest_cert = certificate;\n            let entry = result\n                .entry(latest_cert.source_subnet_id)\n                .or_insert((0, None));\n\n            entry.0 += 1;\n            while let Some(certificate) =\n                self.pending_tables.precedence_pool.get(&latest_cert.id)?\n            {\n                latest_cert = certificate;\n                entry.0 += 1;\n            }\n\n            entry.1 = Some(latest_cert);\n        }\n\n        Ok(result)\n    }\n\n    #[cfg(test)]\n    pub(crate) fn insert_pending_certificates(\n        &self,\n        certificates: &[Certificate],\n    ) -> Result<Vec<PendingCertificateId>, StorageError> {\n        let id = self\n            .pending_tables\n            .next_pending_id\n            .fetch_add(certificates.len() as u64, Ordering::Relaxed);\n\n        let mut batch = self.pending_tables.pending_pool.batch();\n\n        let (values, index, ids) = certificates.iter().enumerate().fold(\n            (Vec::new(), Vec::new(), Vec::new()),\n            |(mut values, mut index, mut ids), (idx, certificate)| {\n                let id = id + idx as u64;\n\n                index.push((certificate.id, id));\n                values.push((id, certificate));\n                ids.push(id);\n\n                (values, index, ids)\n            },\n        );\n\n        batch = batch.insert_batch(&self.pending_tables.pending_pool, values)?;\n        batch = batch.insert_batch(&self.pending_tables.pending_pool_index, index)?;\n\n        batch.write()?;\n\n        STORAGE_PENDING_POOL_COUNT.add(ids.len() as i64);\n\n        Ok(ids)\n    }\n\n    pub async fn insert_pending_certificate(\n        &self,\n        certificate: &Certificate,\n    ) -> Result<Option<PendingCertificateId>, StorageError> {\n        // A lock guard is taken during the insertion of a pending certificate (C1)\n        // to avoid race condition when this certificate C1 is delivered by the network\n        // and in the process of being inserted into the precedence tables.\n        let _certificate_guard = self\n            .fullnode_store\n            .certificate_lock_guard(certificate.id)\n            .await;\n\n        if self.get_certificate(&certificate.id)?.is_some() {\n            debug!(\"Certificate {} is already delivered\", certificate.id);\n            return Err(StorageError::InternalStorage(\n                InternalStorageError::CertificateAlreadyExists,\n            ));\n        }\n\n        if self\n            .pending_tables\n            .pending_pool_index\n            .get(&certificate.id)?\n            .is_some()\n        {\n            debug!(\n                \"Certificate {} is already in the pending pool\",\n                certificate.id\n            );\n            return Err(StorageError::InternalStorage(\n                InternalStorageError::CertificateAlreadyPending,\n            ));\n        }\n\n        // A lock guard is taken during the insertion of a pending certificate\n        // to avoid race condition when a certificate is being added to the\n        // pending pool while its parent is currently being inserted as delivered\n        let _prev_certificate_guard = self\n            .fullnode_store\n            .certificate_lock_guard(certificate.prev_id)\n            .await;\n\n        let prev_delivered = certificate.prev_id == INITIAL_CERTIFICATE_ID\n            || self.get_certificate(&certificate.prev_id)?.is_some();\n\n        if prev_delivered {\n            let id = self\n                .pending_tables\n                .next_pending_id\n                .fetch_add(1, Ordering::Relaxed);\n\n            self.pending_tables.pending_pool.insert(&id, certificate)?;\n            self.pending_tables\n                .pending_pool_index\n                .insert(&certificate.id, &id)?;\n\n            STORAGE_PENDING_POOL_COUNT.inc();\n            debug!(\n                \"Certificate {} is now in the pending pool at index: {}\",\n                certificate.id, id\n            );\n            Ok(Some(id))\n        } else {\n            self.pending_tables\n                .precedence_pool\n                .insert(&certificate.prev_id, certificate)?;\n\n            STORAGE_PRECEDENCE_POOL_COUNT.inc();\n            debug!(\n                \"Certificate {} is now in the precedence pool, because the previous certificate \\\n                 {} isn't delivered yet\",\n                certificate.id, certificate.prev_id\n            );\n\n            Ok(None)\n        }\n    }\n\n    #[instrument(skip(self, proofs))]\n    pub fn insert_unverified_proofs(\n        &self,\n        proofs: Vec<ProofOfDelivery>,\n    ) -> Result<Vec<CertificateId>, StorageError> {\n        let certs: Vec<CertificateId> = proofs.iter().map(|proof| proof.certificate_id).collect();\n\n        let unverified: Vec<(CertificateId, ProofOfDelivery)> = proofs\n            .into_iter()\n            .map(|proof| {\n                debug!(\n                    \"Certificate Sync: unverified proof for {} inserted\",\n                    proof.certificate_id\n                );\n                (proof.certificate_id, proof)\n            })\n            .collect();\n\n        self.fullnode_store\n            .perpetual_tables\n            .unverified\n            .multi_insert(unverified)?;\n\n        Ok(certs)\n    }\n\n    #[instrument(skip(self, certificate))]\n    pub async fn synchronize_certificate(\n        &self,\n        certificate: Certificate,\n    ) -> Result<(), StorageError> {\n        if let Ok(Some(proof_of_delivery)) = self.get_unverified_proof(&certificate.id) {\n            let certificate_id = certificate.id;\n            debug!(\n                \"Certificate Sync: certificate {} is now defined as delivered\",\n                certificate_id\n            );\n            self.insert_certificate_delivered(&CertificateDelivered {\n                certificate,\n                proof_of_delivery,\n            })\n            .await?;\n\n            debug!(\n                \"Certificate Sync: unverified proof has been removed for {}\",\n                certificate_id\n            );\n            self.fullnode_store\n                .perpetual_tables\n                .unverified\n                .delete(&certificate_id)?;\n\n            Ok(())\n        } else {\n            debug!(\"Certificate Sync: Proof not found for {}\", certificate.id);\n            Err(StorageError::InternalStorage(\n                crate::errors::InternalStorageError::InvalidQueryArgument(\"Proof not found\"),\n            ))\n        }\n    }\n\n    pub fn get_unverified_proof(\n        &self,\n        certificate_id: &CertificateId,\n    ) -> Result<Option<ProofOfDelivery>, StorageError> {\n        Ok(self\n            .fullnode_store\n            .perpetual_tables\n            .unverified\n            .get(certificate_id)?)\n    }\n\n    /// Returns the difference between the `from` list of [ProofOfDelivery] and the local head\n    /// checkpoint. This is used to define the list of certificates that are missing between the\n    /// `from` and the local head checkpoint.\n    /// The maximum number of [ProofOfDelivery] returned per [SubnetId] is 100.\n    /// If the `from` is missing a local subnet, the list of [ProofOfDelivery] for this subnet will\n    /// start from [Position] `0`.\n    pub fn get_checkpoint_diff(\n        &self,\n        from: &[ProofOfDelivery],\n        limit_per_subnet: usize,\n    ) -> Result<HashMap<SubnetId, Vec<ProofOfDelivery>>, StorageError> {\n        // Parse the from in order to extract the different position per subnets\n        let from_positions: HashMap<SubnetId, &ProofOfDelivery> = from\n            .iter()\n            .map(|v| (v.delivery_position.subnet_id, v))\n            .collect();\n\n        let mut output: HashMap<SubnetId, Vec<ProofOfDelivery>> = HashMap::new();\n\n        // Request the local head checkpoint\n        let subnets: HashMap<SubnetId, Position> = self\n            .fullnode_store\n            .index_tables\n            .source_list\n            .iter()?\n            .map(|(subnet_id, (_, position))| (subnet_id, position))\n            .collect();\n\n        // For every local known subnets we want to iterate and check if there\n        // is a delta between the from_position and our head position.\n        for (subnet, local_position) in subnets {\n            let certs: Vec<_> = if let Some(position) = from_positions.get(&subnet) {\n                if local_position <= position.delivery_position.position {\n                    continue;\n                }\n\n                self.fullnode_store\n                    .perpetual_tables\n                    .streams\n                    .prefix_iter(&(&subnet, &position.delivery_position.position))?\n                    .skip(1)\n                    .take(limit_per_subnet)\n                    .map(|(_, v)| v)\n                    .collect()\n            } else {\n                self.fullnode_store\n                    .perpetual_tables\n                    .streams\n                    .prefix_iter(&(&subnet, Position::ZERO))?\n                    .take(limit_per_subnet)\n                    .map(|(_, v)| v)\n                    .collect()\n            };\n\n            let proofs: Vec<_> = self\n                .fullnode_store\n                .get_certificates(&certs)?\n                .into_iter()\n                .filter_map(|v| v.map(|c| c.proof_of_delivery))\n                .collect();\n\n            info!(\n                \"Certificate Sync: distance between from and head for {} subnet is {}\",\n                subnet,\n                proofs.len()\n            );\n\n            if let Some(old_value) = output.insert(subnet, proofs) {\n                error!(\n                    \"Certificate Sync: This should not happen, we are overwriting a value during \\\n                     sync of {subnet}. Overwriting {}\",\n                    old_value.len()\n                );\n            }\n        }\n\n        Ok(output)\n    }\n\n    #[cfg(test)]\n    pub(crate) fn delete_pending_certificate(\n        &self,\n        pending_id: &PendingCertificateId,\n    ) -> Result<Certificate, StorageError> {\n        if let Some(certificate) = self.pending_tables.pending_pool.get(pending_id)? {\n            self.pending_tables.pending_pool.delete(pending_id)?;\n            self.pending_tables\n                .pending_pool_index\n                .delete(&certificate.id)?;\n\n            STORAGE_PENDING_POOL_COUNT.dec();\n            Ok(certificate)\n        } else {\n            Err(StorageError::InternalStorage(\n                crate::errors::InternalStorageError::InvalidQueryArgument(\n                    \"No certificate for pending_id\",\n                ),\n            ))\n        }\n    }\n}\nimpl ReadStore for ValidatorStore {\n    fn count_certificates_delivered(&self) -> Result<u64, StorageError> {\n        self.fullnode_store.count_certificates_delivered()\n    }\n\n    fn get_source_head(&self, subnet_id: &SubnetId) -> Result<Option<SourceHead>, StorageError> {\n        self.fullnode_store.get_source_head(subnet_id)\n    }\n\n    fn get_certificate(\n        &self,\n        certificate_id: &CertificateId,\n    ) -> Result<Option<CertificateDelivered>, StorageError> {\n        self.fullnode_store.get_certificate(certificate_id)\n    }\n\n    fn get_certificates(\n        &self,\n        certificate_ids: &[CertificateId],\n    ) -> Result<Vec<Option<CertificateDelivered>>, StorageError> {\n        self.fullnode_store.get_certificates(certificate_ids)\n    }\n\n    fn last_delivered_position_for_subnet(\n        &self,\n        subnet_id: &SubnetId,\n    ) -> Result<Option<CertificateSourceStreamPosition>, StorageError> {\n        Ok(self\n            .fullnode_store\n            .index_tables\n            .source_list\n            .get(subnet_id)?\n            .map(|(_, position)| CertificateSourceStreamPosition {\n                subnet_id: *subnet_id,\n                position,\n            }))\n    }\n\n    fn get_checkpoint(&self) -> Result<HashMap<SubnetId, SourceHead>, StorageError> {\n        self.fullnode_store.get_checkpoint()\n    }\n\n    fn get_source_stream_certificates_from_position(\n        &self,\n        from: CertificateSourceStreamPosition,\n        limit: usize,\n    ) -> Result<Vec<(CertificateDelivered, CertificateSourceStreamPosition)>, StorageError> {\n        self.fullnode_store\n            .get_source_stream_certificates_from_position(from, limit)\n    }\n\n    fn get_target_stream_certificates_from_position(\n        &self,\n        position: CertificateTargetStreamPosition,\n        limit: usize,\n    ) -> Result<Vec<(CertificateDelivered, CertificateTargetStreamPosition)>, StorageError> {\n        self.fullnode_store\n            .get_target_stream_certificates_from_position(position, limit)\n    }\n\n    fn get_target_source_subnet_list(\n        &self,\n        target_subnet_id: &SubnetId,\n    ) -> Result<Vec<SubnetId>, StorageError> {\n        self.fullnode_store\n            .get_target_source_subnet_list(target_subnet_id)\n    }\n}\n\n#[async_trait]\nimpl WriteStore for ValidatorStore {\n    async fn insert_certificate_delivered(\n        &self,\n        certificate: &CertificateDelivered,\n    ) -> Result<CertificatePositions, StorageError> {\n        let position = self\n            .fullnode_store\n            .insert_certificate_delivered(certificate)\n            .await?;\n\n        if let Ok(Some(pending_id)) = self\n            .pending_tables\n            .pending_pool_index\n            .get(&certificate.certificate.id)\n        {\n            _ = self.pending_tables.pending_pool.delete(&pending_id);\n            _ = self\n                .pending_tables\n                .pending_pool_index\n                .delete(&certificate.certificate.id);\n\n            STORAGE_PENDING_POOL_COUNT.dec();\n        }\n\n        if let Ok(Some(next_certificate)) = self\n            .pending_tables\n            .precedence_pool\n            .get(&certificate.certificate.id)\n        {\n            debug!(\n                \"Delivered certificate {} unlocks {} for broadcast\",\n                certificate.certificate.id, next_certificate.id\n            );\n            self.insert_pending_certificate(&next_certificate).await?;\n            self.pending_tables\n                .precedence_pool\n                .delete(&certificate.certificate.id)?;\n\n            STORAGE_PRECEDENCE_POOL_COUNT.dec();\n            STORAGE_PENDING_POOL_COUNT.inc();\n        }\n\n        Ok(position)\n    }\n\n    async fn insert_certificates_delivered(\n        &self,\n        certificates: &[CertificateDelivered],\n    ) -> Result<(), StorageError> {\n        self.fullnode_store\n            .insert_certificates_delivered(certificates)\n            .await\n    }\n}\n"
  },
  {
    "path": "crates/topos-tce-storage/src/validator/tables.rs",
    "content": "use std::{\n    fs::create_dir_all,\n    path::Path,\n    sync::atomic::{AtomicU64, Ordering},\n};\n\nuse bincode::Options;\nuse rocksdb::ColumnFamilyDescriptor;\nuse topos_core::{\n    types::ProofOfDelivery,\n    uci::{Certificate, CertificateId},\n};\nuse tracing::warn;\n\nuse crate::{\n    constant::cfs,\n    rocks::{\n        constants,\n        db::{default_options, init_with_cfs},\n        db_column::DBColumn,\n    },\n    types::{CertificatesColumn, EpochId, EpochSummary, PendingCertificatesColumn, StreamsColumn},\n    PendingCertificateId,\n};\n\n/// Pending data used by Validator\n///\n/// It contains data that is not yet delivered.\n///\n/// When a [`Certificate`] is received, it can either be added to the pending\n/// pool or to the precedence pool.\n///\n/// Prior to be inserted in either of the pending or precedence pools, a [`Certificate`]\n/// needs to be validated. A validated certificate means that the proof of the certificate\n/// has be verified using FROST.\n///\n/// ## Pending pool\n///\n/// The pending pool stores certificates that are ready to be broadcast.\n/// A [`Certificate`] is ready to be broadcast when it has been validated and its previous [`Certificate`] is\n/// already delivered.\n///\n/// The ordering inside the pending pool is a FIFO queue, each [`Certificate`] in the pool gets\n/// assigned to a unique [`PendingCertificateId`](type@crate::PendingCertificateId).\n///\n/// ## Precedence pool\n///\n/// The precedence pool stores certificates that are not yet ready to be broadcast.\n/// Typically waiting for its previous [`Certificate`] to be delivered.\n/// However, the [`Certificate`] is already validated.\n///\n/// When a [`Certificate`] is delivered, the [`ValidatorStore`](struct@super::ValidatorStore) will\n/// check for any child [`Certificate`] in the precedence pool waiting to be promoted to the\n/// pending pool in order to be broadcast.\n///\npub struct ValidatorPendingTables {\n    pub(crate) next_pending_id: AtomicU64,\n    pub(crate) pending_pool: PendingCertificatesColumn,\n    pub(crate) pending_pool_index: DBColumn<CertificateId, PendingCertificateId>,\n    pub(crate) precedence_pool: DBColumn<CertificateId, Certificate>,\n}\n\nimpl ValidatorPendingTables {\n    /// Open the [`ValidatorPendingTables`] at the given path.\n    pub fn open(path: &Path) -> Self {\n        let path = path.join(\"pending\");\n        if !path.exists() {\n            warn!(\"Path {:?} does not exist, creating it\", path);\n            create_dir_all(&path).expect(\"Cannot create ValidatorPendingTables directory\");\n        }\n        let cfs = vec![\n            ColumnFamilyDescriptor::new(cfs::PENDING_POOL, default_options()),\n            ColumnFamilyDescriptor::new(cfs::PENDING_POOL_INDEX, default_options()),\n            ColumnFamilyDescriptor::new(cfs::PRECEDENCE_POOL, default_options()),\n        ];\n\n        let db = init_with_cfs(&path, default_options(), cfs)\n            .unwrap_or_else(|_| panic!(\"Cannot open DB at {:?}\", path));\n        let pending_pool = DBColumn::reopen(&db, cfs::PENDING_POOL);\n        let next_pending_id = {\n            let cf = pending_pool\n                .rocksdb\n                .cf_handle(cfs::PENDING_POOL)\n                .expect(\"Cannot get cf handle for pending pool\");\n            let mut pending_iterator = pending_pool.rocksdb.raw_iterator_cf(&cf);\n\n            pending_iterator.seek_to_last();\n            if pending_iterator.valid() {\n                AtomicU64::new(\n                    pending_iterator\n                        .key()\n                        .map(|key| {\n                            bincode::DefaultOptions::new()\n                                .with_big_endian()\n                                .with_fixint_encoding()\n                                .deserialize(key)\n                                .unwrap_or(0)\n                        })\n                        .unwrap_or(0),\n                )\n            } else {\n                AtomicU64::new(0)\n            }\n        };\n\n        next_pending_id.fetch_add(1, Ordering::Relaxed);\n\n        Self {\n            next_pending_id,\n            pending_pool,\n            pending_pool_index: DBColumn::reopen(&db, cfs::PENDING_POOL_INDEX),\n            precedence_pool: DBColumn::reopen(&db, cfs::PRECEDENCE_POOL),\n        }\n    }\n}\n\n/// Data that shouldn't be purged at all.\n// TODO: TP-774: Rename and move to FullNode domain\npub struct ValidatorPerpetualTables {\n    pub(crate) certificates: CertificatesColumn,\n    pub(crate) streams: StreamsColumn,\n    #[allow(unused)]\n    epoch_chain: DBColumn<EpochId, EpochSummary>,\n    pub(crate) unverified: DBColumn<CertificateId, ProofOfDelivery>,\n}\n\nimpl ValidatorPerpetualTables {\n    pub fn open(path: &Path) -> Self {\n        let path = path.join(\"perpetual\");\n        if !path.exists() {\n            warn!(\"Path {:?} does not exist, creating it\", path);\n            create_dir_all(&path).expect(\"Cannot create ValidatorPerpetualTables directory\");\n        }\n        let mut options_stream = default_options();\n        options_stream.set_prefix_extractor(rocksdb::SliceTransform::create_fixed_prefix(\n            constants::SOURCE_STREAMS_PREFIX_SIZE,\n        ));\n\n        let cfs = vec![\n            ColumnFamilyDescriptor::new(cfs::CERTIFICATES, default_options()),\n            ColumnFamilyDescriptor::new(cfs::STREAMS, options_stream),\n            ColumnFamilyDescriptor::new(cfs::EPOCH_CHAIN, default_options()),\n            ColumnFamilyDescriptor::new(cfs::UNVERIFIED, default_options()),\n        ];\n\n        let db = init_with_cfs(&path, default_options(), cfs).unwrap_or_else(|e| {\n            panic!(\"Cannot open DB at {:?} => error {:?}\", path, e);\n        });\n\n        Self {\n            certificates: DBColumn::reopen(&db, cfs::CERTIFICATES),\n            streams: DBColumn::reopen(&db, cfs::STREAMS),\n            epoch_chain: DBColumn::reopen(&db, cfs::EPOCH_CHAIN),\n            unverified: DBColumn::reopen(&db, cfs::UNVERIFIED),\n        }\n    }\n}\n"
  },
  {
    "path": "crates/topos-tce-synchronizer/Cargo.toml",
    "content": "[package]\nname = \"topos-tce-synchronizer\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n[lints]\nworkspace = true\n\n[dependencies]\nasync-trait.workspace = true\nfutures.workspace = true\nthiserror.workspace = true\ntokio = { workspace = true, features = [\"full\"] }\ntokio-stream.workspace = true\ntokio-util.workspace = true\ntonic.workspace = true\ntracing-subscriber = { workspace = true, features = [\"env-filter\", \"json\", \"ansi\", \"fmt\"] }\ntracing.workspace = true\nuuid = { workspace = true, features = [\"v4\", \"serde\"] }\n\ntopos-core = { workspace = true, features = [\"api\"] }\ntopos-config = { path = \"../topos-config/\" }\ntopos-p2p = { path = \"../topos-p2p\" }\ntopos-tce-gatekeeper = { path = \"../topos-tce-gatekeeper/\" }\ntopos-tce-storage = { path = \"../topos-tce-storage/\" }\n\n[dev-dependencies]\nlibp2p.workspace = true\nmockall = \"0.11\"\nasync-trait.workspace = true\ntopos-test-sdk = { path = \"../topos-test-sdk/\" }\nrstest.workspace = true\n\ntest-log.workspace = true\n\nenv_logger.workspace = true\n"
  },
  {
    "path": "crates/topos-tce-synchronizer/src/builder.rs",
    "content": "use std::{future::IntoFuture, sync::Arc};\n\nuse tokio::{spawn, sync::mpsc};\nuse tokio_stream::wrappers::ReceiverStream;\nuse tokio_util::sync::CancellationToken;\nuse topos_p2p::NetworkClient;\nuse topos_tce_storage::validator::ValidatorStore;\nuse tracing::Instrument;\n\nuse crate::{\n    checkpoints_collector::{CheckpointSynchronizer, CheckpointsCollectorError},\n    Synchronizer, SynchronizerError, SynchronizerEvent,\n};\nuse topos_config::tce::synchronization::SynchronizationConfig;\n\npub struct SynchronizerBuilder {\n    network_client: Option<NetworkClient>,\n    store: Option<Arc<ValidatorStore>>,\n    config: SynchronizationConfig,\n    /// Size of the channel producing events (default: 100)\n    event_channel_size: usize,\n    /// CancellationToken used to trigger shutdown of the Synchronizer\n    shutdown: Option<CancellationToken>,\n}\n\nimpl Default for SynchronizerBuilder {\n    fn default() -> Self {\n        Self {\n            network_client: None,\n            store: None,\n            config: SynchronizationConfig::default(),\n            event_channel_size: 100,\n            shutdown: None,\n        }\n    }\n}\n\nimpl SynchronizerBuilder {\n    pub fn build(\n        mut self,\n    ) -> Result<(Synchronizer, ReceiverStream<SynchronizerEvent>), SynchronizerError> {\n        let shutdown = if let Some(shutdown) = self.shutdown.take() {\n            shutdown\n        } else {\n            return Err(SynchronizerError::CheckpointsCollectorError(\n                CheckpointsCollectorError::NoStore,\n            ))?;\n        };\n        let (events, events_recv) = mpsc::channel(self.event_channel_size);\n        let (sync_events, checkpoints_collector_stream) = mpsc::channel(self.event_channel_size);\n\n        let checkpoints_collector_stream = ReceiverStream::new(checkpoints_collector_stream);\n\n        spawn(\n            CheckpointSynchronizer {\n                config: self.config,\n                network: if let Some(network) = self.network_client {\n                    network\n                } else {\n                    return Err(SynchronizerError::CheckpointsCollectorError(\n                        CheckpointsCollectorError::NoNetworkClient,\n                    ))?;\n                },\n\n                store: if let Some(store) = self.store {\n                    store\n                } else {\n                    return Err(SynchronizerError::CheckpointsCollectorError(\n                        CheckpointsCollectorError::NoStore,\n                    ))?;\n                },\n                current_request_id: None,\n                shutdown: shutdown.child_token(),\n                events: sync_events,\n            }\n            .into_future()\n            .in_current_span(),\n        );\n\n        Ok((\n            Synchronizer {\n                shutdown,\n                events,\n                checkpoints_collector_stream,\n            },\n            ReceiverStream::new(events_recv),\n        ))\n    }\n}\n\nimpl SynchronizerBuilder {\n    pub fn with_store(mut self, store: Arc<ValidatorStore>) -> Self {\n        self.store = Some(store);\n\n        self\n    }\n\n    pub fn with_network_client(mut self, network_client: NetworkClient) -> Self {\n        self.network_client = Some(network_client);\n\n        self\n    }\n\n    pub fn with_config(mut self, config: SynchronizationConfig) -> Self {\n        self.config = config;\n\n        self\n    }\n\n    pub fn with_shutdown(mut self, shutdown: CancellationToken) -> Self {\n        self.shutdown = Some(shutdown);\n\n        self\n    }\n}\n"
  },
  {
    "path": "crates/topos-tce-synchronizer/src/checkpoints_collector/error.rs",
    "content": "use thiserror::Error;\nuse tokio::sync::oneshot::error::RecvError;\n\n#[derive(Error, Debug)]\npub enum CheckpointsCollectorError {\n    #[error(\"Unable to start the CheckpointsCollector\")]\n    UnableToStart,\n\n    #[error(\"Unable to start the CheckpointsCollector: No gatekeeper client provided\")]\n    NoGatekeeperClient,\n\n    #[error(\"Unable to start the CheckpointsCollector: No network client provided\")]\n    NoNetworkClient,\n\n    #[error(\"Error while dealing with Start command: already starting\")]\n    AlreadyStarting,\n\n    #[error(\"Error while trying to fetch random peers\")]\n    UnableToFetchRandomPeer,\n\n    #[error(transparent)]\n    OneshotCommunicationChannel(#[from] RecvError),\n\n    #[error(\"Unable to start the CheckpointsCollector: No store provided\")]\n    NoStore,\n}\n"
  },
  {
    "path": "crates/topos-tce-synchronizer/src/checkpoints_collector/mod.rs",
    "content": "use std::{\n    collections::{HashMap, HashSet},\n    future::IntoFuture,\n    str::FromStr,\n    sync::Arc,\n};\n\nuse futures::{future::BoxFuture, FutureExt};\nuse tokio::sync::mpsc;\nuse tokio_util::sync::CancellationToken;\nuse tonic::Status;\nuse topos_core::{\n    api::grpc::{\n        self,\n        shared::v1::Uuid as APIUuid,\n        tce::v1::{\n            synchronizer_service_client::SynchronizerServiceClient,\n            synchronizer_service_server::SynchronizerServiceServer, CheckpointRequest,\n            CheckpointResponse, FetchCertificatesRequest,\n        },\n    },\n    errors::GrpcParsingError,\n    types::ProofOfDelivery,\n    uci::{Certificate, CertificateId, SubnetId},\n};\n\nuse topos_config::tce::synchronization::SynchronizationConfig;\nuse topos_p2p::{error::P2PError, NetworkClient, PeerId};\nuse topos_tce_storage::{errors::StorageError, store::ReadStore, validator::ValidatorStore};\nuse tracing::{debug, error, info, warn};\nuse uuid::Uuid;\n\nmod error;\n#[cfg(test)]\nmod tests;\n\npub use error::CheckpointsCollectorError;\n\nuse crate::SynchronizerService;\n\npub struct CheckpointSynchronizer {\n    pub(crate) config: SynchronizationConfig,\n\n    pub(crate) network: NetworkClient,\n    #[allow(unused)]\n    pub(crate) store: Arc<ValidatorStore>,\n\n    pub(crate) current_request_id: Option<APIUuid>,\n\n    pub(crate) shutdown: CancellationToken,\n\n    #[allow(dead_code)]\n    pub(crate) events: mpsc::Sender<CheckpointsCollectorEvent>,\n}\n\nimpl IntoFuture for CheckpointSynchronizer {\n    type Output = Result<(), CheckpointsCollectorError>;\n\n    type IntoFuture = BoxFuture<'static, Self::Output>;\n\n    fn into_future(mut self) -> Self::IntoFuture {\n        async move {\n            let mut interval = tokio::time::interval(tokio::time::Duration::from_secs(\n                self.config.interval_seconds,\n            ));\n\n            loop {\n                tokio::select! {\n                    _tick = interval.tick() => {\n                        // On every tick, checking if there is a pending synchronization\n                        // If there is, skip\n                        // If there is not,\n                        //  1. Ask a random peer for the diff between local and its latest checkpoint\n                        //  2. Validate the PoD diff, if fail, go back to 1\n                        //  3. Based on the diff, check if we already have some of the certs\n                        //      - Fetch every missing certs from one peer\n                        //      - Each certs triggers a precedence check\n                        if self.current_request_id.is_none() {\n                            if let Err(error) = self.initiate_request().await {\n                                warn!(\"Unsuccessful sync due to: {}\", error);\n                            }\n                        }\n                    }\n\n                    _ = self.shutdown.cancelled() => { break; }\n\n                }\n            }\n\n            Ok(())\n        }\n        .boxed()\n    }\n}\n\n#[derive(Debug, thiserror::Error)]\nenum SyncError {\n    #[error(\"Unable to fetch target peer from network layer\")]\n    UnableToFetchTargetPeer,\n\n    #[error(\"Unable to parse subnet id\")]\n    // TODO: Check if needed after full merge of grpc over p2p\n    #[allow(unused)]\n    UnableToParseSubnetId,\n\n    #[error(transparent)]\n    GrpcParsingError(#[from] GrpcParsingError),\n\n    #[error(transparent)]\n    CertificateConversion(#[from] topos_core::api::grpc::shared::v1_conversions_certificate::Error),\n\n    #[error(transparent)]\n    SubnetConversion(#[from] topos_core::api::grpc::shared::v1_conversions_subnet::Error),\n\n    #[error(transparent)]\n    Store(#[from] StorageError),\n\n    #[error(transparent)]\n    Network(#[from] P2PError),\n\n    #[error(transparent)]\n    Grpc(#[from] Status),\n}\n\nimpl CheckpointSynchronizer {\n    async fn ask_for_checkpoint(\n        &self,\n        peer: PeerId,\n    ) -> Result<HashMap<SubnetId, Vec<ProofOfDelivery>>, SyncError> {\n        let request_id = Uuid::new_v4();\n\n        let checkpoint: Vec<grpc::tce::v1::ProofOfDelivery> = {\n            let certificate_ids = self\n                .store\n                .get_checkpoint()?\n                .values()\n                .map(|head| head.certificate_id)\n                .collect::<Vec<_>>();\n\n            self.store\n                .get_certificates(&certificate_ids[..])?\n                .into_iter()\n                .filter_map(|value| {\n                    value.map(|delivered_certificate| delivered_certificate.proof_of_delivery)\n                })\n                .map(Into::into)\n                .collect()\n        };\n\n        debug!(\n            \"Asking {} for latest checkpoint (request_id: {}), with local checkpoint: {:?}\",\n            peer, request_id, checkpoint\n        );\n\n        let req = CheckpointRequest {\n            request_id: Some(request_id.into()),\n            checkpoint,\n            limit_per_subnet: self\n                .config\n                .limit_per_subnet\n                .try_into()\n                .unwrap_or(SynchronizationConfig::LIMIT_PER_SUBNET as u64),\n        };\n\n        let mut client: SynchronizerServiceClient<_> = self\n            .network\n            .new_grpc_client::<SynchronizerServiceClient<_>, SynchronizerServiceServer<SynchronizerService>>(peer)\n            .await?;\n\n        let response: CheckpointResponse = client.fetch_checkpoint(req).await?.into_inner();\n\n        let diff = response\n            .checkpoint_diff\n            .into_iter()\n            .map(|v| {\n                let subnet =\n                    SubnetId::from_str(&v.key[..]).map_err(|_| SyncError::UnableToParseSubnetId)?;\n\n                let proofs = v\n                    .value\n                    .into_iter()\n                    .map(TryInto::try_into)\n                    .collect::<Result<Vec<_>, _>>()?;\n                Ok::<_, SyncError>((subnet, proofs))\n            })\n            .collect::<Result<HashMap<_, _>, _>>()?;\n\n        Ok(diff)\n    }\n\n    fn insert_unverified_proofs(\n        &self,\n        diff: HashMap<SubnetId, Vec<ProofOfDelivery>>,\n    ) -> Result<Vec<Vec<CertificateId>>, SyncError> {\n        let mut certs: HashSet<CertificateId> = HashSet::new();\n        for (subnet, proofs) in diff {\n            let len = proofs.len();\n            let unverified_certs = self.store.insert_unverified_proofs(proofs)?;\n\n            debug!(\n                \"Persist {} unverified proof of delivery for {}\",\n                len, subnet\n            );\n            certs.extend(&unverified_certs[..]);\n        }\n\n        // Chunk certs\n        let mut chunked_certs: Vec<Vec<CertificateId>> = vec![];\n\n        let certs = certs.into_iter().collect::<Vec<_>>();\n\n        for certs in certs.chunks(10) {\n            chunked_certs.push(certs.to_vec());\n        }\n\n        Ok(chunked_certs)\n    }\n\n    async fn fetch_certificates(\n        &self,\n        certificate_ids: Vec<CertificateId>,\n    ) -> Result<Vec<Certificate>, SyncError> {\n        let target_peer = self\n            .network\n            .random_known_peer()\n            .await\n            .map_err(|_| SyncError::UnableToFetchTargetPeer)?;\n\n        let request_id: Option<APIUuid> = Some(Uuid::new_v4().into());\n        let req = FetchCertificatesRequest {\n            request_id,\n            certificates: certificate_ids\n                .iter()\n                .map(|cert| (*cert.as_array()).into())\n                .collect(),\n        };\n\n        debug!(\n            \"Ask {} for certificates payload: {:?}\",\n            target_peer, certificate_ids\n        );\n        let mut client: SynchronizerServiceClient<_> = self\n            .network\n            .new_grpc_client::<SynchronizerServiceClient<_>, SynchronizerServiceServer<SynchronizerService>>(target_peer)\n            .await?;\n\n        let response = client.fetch_certificates(req).await?.into_inner();\n\n        let certificates: Result<Vec<Certificate>, _> = response\n            .certificates\n            .into_iter()\n            .map(TryInto::try_into)\n            .collect();\n\n        Ok(certificates?)\n    }\n\n    async fn initiate_request(&mut self) -> Result<(), SyncError> {\n        //  1. Ask a random peer for the diff between local and its latest checkpoint\n        let target_peer = self\n            .network\n            .random_known_peer()\n            .await\n            .map_err(|_| SyncError::UnableToFetchTargetPeer)?;\n\n        let diff = self.ask_for_checkpoint(target_peer).await?;\n\n        let certificates_to_catchup = self.insert_unverified_proofs(diff)?;\n        info!(\"Certificates to catchup: {}\", certificates_to_catchup.len());\n\n        for certificates in certificates_to_catchup {\n            let certificates = self.fetch_certificates(certificates).await?;\n\n            // TODO: verify every certificates\n            for certificate in certificates {\n                let store = self.store.clone();\n                tokio::spawn(async move {\n                    // Validate\n                    // Check precedence\n                    let certificate_id = certificate.id;\n                    match store.synchronize_certificate(certificate).await {\n                        Ok(_) => debug!(\"Certificate {} synchronized\", certificate_id),\n                        Err(StorageError::InternalStorage(topos_tce_storage::errors::InternalStorageError::CertificateAlreadyExists)) => {}\n                        Err(e) => error!(\"Failed to sync because of: {:?}\", e),\n                    }\n                });\n            }\n        }\n        Ok(())\n    }\n}\n\npub enum CheckpointsCollectorEvent {}\n"
  },
  {
    "path": "crates/topos-tce-synchronizer/src/checkpoints_collector/tests/integration.rs",
    "content": "use std::time::Duration;\n\nuse rstest::rstest;\nuse test_log::test;\nuse topos_core::{\n    api::grpc::tce::v1::{\n        synchronizer_service_client::SynchronizerServiceClient,\n        synchronizer_service_server::SynchronizerServiceServer, FetchCertificatesRequest,\n    },\n    types::CertificateDelivered,\n};\n\nuse topos_test_sdk::{\n    certificates::create_certificate_chain,\n    tce::{create_network, NodeConfig},\n};\nuse uuid::Uuid;\n\nuse crate::SynchronizerService;\n\n#[rstest]\n#[test(tokio::test)]\n#[timeout(Duration::from_secs(5))]\nasync fn network_test() {\n    let subnet = topos_test_sdk::constants::SOURCE_SUBNET_ID_1;\n    let certificates: Vec<CertificateDelivered> =\n        create_certificate_chain(subnet, &[topos_test_sdk::constants::TARGET_SUBNET_ID_1], 1);\n\n    let boot_node = NodeConfig::from_seed(1);\n    let cluster = create_network(5, &certificates[..]).await;\n    let boot_node = cluster\n        .get(&boot_node.keypair.public().to_peer_id())\n        .unwrap()\n        .node_config\n        .clone();\n\n    let cfg = NodeConfig {\n        seed: 6,\n        minimum_cluster_size: 1,\n        ..Default::default()\n    };\n\n    let (client, _, _) = cfg\n        .bootstrap(&[cfg.clone(), boot_node.clone()], None)\n        .await\n        .unwrap();\n    use topos_core::api::grpc::shared::v1::Uuid as APIUuid;\n\n    let peer = boot_node.keypair.public().to_peer_id();\n\n    let mut client: SynchronizerServiceClient<_> = client\n        .new_grpc_client::<SynchronizerServiceClient<_>, SynchronizerServiceServer<SynchronizerService>>(\n            peer,\n        )\n        .await\n        .unwrap();\n\n    let request_id: APIUuid = Uuid::new_v4().into();\n    let req = FetchCertificatesRequest {\n        request_id: Some(request_id),\n        certificates: certificates\n            .clone()\n            .into_iter()\n            .map(|c| c.certificate.id.into())\n            .collect(),\n    };\n\n    let res = client.fetch_certificates(req).await.unwrap().into_inner();\n\n    let expected = certificates\n        .into_iter()\n        .map(|c| c.certificate.into())\n        .collect::<Vec<topos_core::api::grpc::uci::v1::Certificate>>();\n\n    assert_eq!(res.certificates, expected);\n}\n"
  },
  {
    "path": "crates/topos-tce-synchronizer/src/checkpoints_collector/tests.rs",
    "content": "use std::time::Duration;\n\nuse rstest::rstest;\nuse topos_core::{\n    api::grpc::tce::v1::{\n        synchronizer_service_client::SynchronizerServiceClient,\n        synchronizer_service_server::SynchronizerServiceServer, CheckpointMapFieldEntry,\n        CheckpointRequest, CheckpointResponse, FetchCertificatesRequest,\n    },\n    types::CertificateDelivered,\n};\n\nuse topos_p2p::GrpcRouter;\nuse topos_test_sdk::{\n    certificates::create_certificate_chain,\n    storage::{create_fullnode_store, create_validator_store},\n    tce::{create_network, NodeConfig},\n};\n\nuse uuid::Uuid;\n\nuse crate::SynchronizerService;\n\nmod integration;\n\n#[test]\nfn encode() {\n    use topos_core::api::grpc::shared::v1::Uuid as APIUuid;\n    let request_id: APIUuid = Uuid::new_v4().into();\n    let req = CheckpointRequest {\n        request_id: Some(request_id),\n        checkpoint: vec![],\n        limit_per_subnet: 100,\n    };\n\n    let x: Vec<u8> = req.clone().into();\n    let y: CheckpointRequest = x.try_into().unwrap();\n    assert_eq!(y, req);\n\n    let subnet = topos_test_sdk::constants::SOURCE_SUBNET_ID_1;\n    let certificates: Vec<CertificateDelivered> =\n        create_certificate_chain(subnet, &[topos_test_sdk::constants::TARGET_SUBNET_ID_1], 1);\n\n    let cert = certificates.first().cloned().unwrap();\n    let request_id: APIUuid = Uuid::new_v4().into();\n    let req = CheckpointResponse {\n        request_id: Some(request_id),\n        checkpoint_diff: vec![CheckpointMapFieldEntry {\n            key: subnet.to_string(),\n            value: vec![cert.proof_of_delivery.into()],\n        }],\n    };\n\n    let x: Vec<u8> = req.clone().into();\n    let y: CheckpointResponse = x.try_into().unwrap();\n    assert_eq!(y, req);\n}\n\n#[rstest]\n#[test_log::test(tokio::test)]\n#[timeout(Duration::from_secs(10))]\nasync fn check_fetch_certificates() {\n    let subnet = topos_test_sdk::constants::SOURCE_SUBNET_ID_1;\n    let certificates: Vec<CertificateDelivered> =\n        create_certificate_chain(subnet, &[topos_test_sdk::constants::TARGET_SUBNET_ID_1], 1);\n\n    let boot_node = NodeConfig::from_seed(1);\n    let cluster = create_network(5, &certificates[..]).await;\n    let boot_node = cluster\n        .get(&boot_node.keypair.public().to_peer_id())\n        .unwrap()\n        .node_config\n        .clone();\n\n    let cfg = NodeConfig {\n        seed: 6,\n        minimum_cluster_size: 3,\n        ..Default::default()\n    };\n\n    let fullnode_store = create_fullnode_store(&[]).await;\n    let validator_store =\n        create_validator_store(&[], futures::future::ready(fullnode_store.clone())).await;\n\n    let router = GrpcRouter::new(tonic::transport::Server::builder()).add_service(\n        SynchronizerServiceServer::new(SynchronizerService {\n            validator_store: validator_store.clone(),\n        }),\n    );\n\n    let (client, _, _) = cfg\n        .bootstrap(&[cfg.clone(), boot_node.clone()], Some(router))\n        .await\n        .unwrap();\n\n    use topos_core::api::grpc::shared::v1::Uuid as APIUuid;\n\n    let request_id: APIUuid = Uuid::new_v4().into();\n    let req = FetchCertificatesRequest {\n        request_id: Some(request_id),\n        certificates: certificates\n            .clone()\n            .into_iter()\n            .map(|c| c.certificate.id.into())\n            .collect(),\n    };\n\n    let mut client: SynchronizerServiceClient<_> = client\n        .new_grpc_client::<SynchronizerServiceClient<_>, SynchronizerServiceServer<SynchronizerService>>(boot_node.keypair.public().to_peer_id())\n        .await\n        .unwrap();\n\n    let res = client.fetch_certificates(req).await;\n    assert!(res.is_ok());\n    let res = res.unwrap().into_inner();\n\n    let expected = certificates\n        .into_iter()\n        .map(|c| c.certificate.into())\n        .collect::<Vec<topos_core::api::grpc::uci::v1::Certificate>>();\n\n    assert_eq!(res.certificates, expected);\n}\n\n#[test]\nfn sync_unordered_certificates() {}\n\n#[test]\nfn sync_conflicting_certificate() {}\n\n#[test]\nfn fetch_certificate_failure() {}\n\n#[test]\nfn missing_certificate_for_pod() {}\n"
  },
  {
    "path": "crates/topos-tce-synchronizer/src/lib.rs",
    "content": "use std::{cmp::max, future::IntoFuture, sync::Arc};\n\nuse builder::SynchronizerBuilder;\nuse checkpoints_collector::{CheckpointsCollectorError, CheckpointsCollectorEvent};\nuse futures::{future::BoxFuture, FutureExt};\nuse thiserror::Error;\nuse tokio::sync::{\n    mpsc,\n    oneshot::{self, error::RecvError},\n};\nuse tokio_stream::StreamExt;\n\nmod builder;\nmod checkpoints_collector;\n\nuse tokio_stream::wrappers::ReceiverStream;\nuse tokio_util::sync::CancellationToken;\nuse tonic::{Request, Response, Status};\nuse topos_config::tce::synchronization::SynchronizationConfig;\nuse topos_core::{\n    api::grpc::{\n        shared::v1::positions::SourceStreamPosition,\n        tce::v1::{\n            synchronizer_service_server::SynchronizerService as GrpcSynchronizerService,\n            CheckpointMapFieldEntry, CheckpointRequest, CheckpointResponse,\n            FetchCertificatesRequest, FetchCertificatesResponse, ProofOfDelivery, SignedReady,\n        },\n    },\n    uci::CertificateId,\n};\nuse topos_tce_storage::{store::ReadStore, validator::ValidatorStore};\nuse tracing::{debug, error, info, trace, warn};\nuse uuid::Uuid;\n\npub struct Synchronizer {\n    pub(crate) shutdown: CancellationToken,\n    #[allow(dead_code)]\n    pub(crate) events: mpsc::Sender<SynchronizerEvent>,\n\n    pub(crate) checkpoints_collector_stream: ReceiverStream<CheckpointsCollectorEvent>,\n}\n\nimpl IntoFuture for Synchronizer {\n    type Output = Result<(), SynchronizerError>;\n\n    type IntoFuture = BoxFuture<'static, Self::Output>;\n\n    fn into_future(mut self) -> Self::IntoFuture {\n        async move {\n            let shutdowned: Option<SynchronizerError> = loop {\n                tokio::select! {\n                    _ = self.shutdown.cancelled() => {\n                        break None\n                    }\n\n                    _checkpoint_event = self.checkpoints_collector_stream.next() => {}\n                }\n            };\n\n            if let Some(_error) = shutdowned {\n                warn!(\"Shutting down Synchronizer due to error...\");\n            } else {\n                info!(\"Shutting down Synchronizer...\");\n            }\n\n            Ok(())\n        }\n        .boxed()\n    }\n}\n\nimpl Synchronizer {\n    pub fn builder() -> SynchronizerBuilder {\n        SynchronizerBuilder::default()\n    }\n}\n\n#[derive(Error, Debug)]\npub enum SynchronizerError {\n    #[error(\"Error while dealing with CheckpointsCollector: {0}\")]\n    CheckpointsCollectorError(#[from] CheckpointsCollectorError),\n\n    #[error(\"Error while dealing with Start command: unable to start\")]\n    UnableToStart,\n\n    #[error(\"Error while dealing with Start command: already starting\")]\n    AlreadyStarting,\n\n    #[error(\"Error while dealing with state locking: unable to lock status\")]\n    UnableToLockStatus,\n\n    #[error(transparent)]\n    OneshotCommunicationChannel(#[from] RecvError),\n\n    #[error(\"Unable to execute shutdown on the Synchronizer: {0}\")]\n    ShutdownCommunication(mpsc::error::SendError<oneshot::Sender<()>>),\n\n    #[error(\"No network protocol receiver set\")]\n    NoProtocolReceiver,\n}\n\npub enum SynchronizerEvent {}\n\n#[derive(Clone)]\npub struct SynchronizerService {\n    pub validator_store: Arc<ValidatorStore>,\n}\n\n#[async_trait::async_trait]\nimpl GrpcSynchronizerService for SynchronizerService {\n    async fn fetch_certificates(\n        &self,\n        request: Request<FetchCertificatesRequest>,\n    ) -> Result<Response<FetchCertificatesResponse>, Status> {\n        let request = request.into_inner();\n        let certificate_ids: Vec<CertificateId> = request\n            .certificates\n            .into_iter()\n            .map(|c| c.try_into())\n            .collect::<Result<Vec<_>, _>>()\n            .map_err(|_| Status::invalid_argument(\"Unable to parse certificates\"))?;\n\n        let response =\n            if let Ok(certs) = self.validator_store.get_certificates(&certificate_ids[..]) {\n                let certs: Vec<_> = certs\n                    .into_iter()\n                    .filter_map(|v| v.map(|c| c.certificate.into()))\n                    .collect::<Vec<_>>();\n\n                FetchCertificatesResponse {\n                    request_id: request.request_id,\n                    certificates: certs,\n                }\n            } else {\n                FetchCertificatesResponse {\n                    request_id: request.request_id,\n                    certificates: vec![],\n                }\n            };\n        Ok(Response::new(response))\n    }\n\n    async fn fetch_checkpoint(\n        &self,\n        request: Request<CheckpointRequest>,\n    ) -> Result<Response<CheckpointResponse>, Status> {\n        let request = request.into_inner();\n        let id = request\n            .request_id\n            .map(|id| id.into())\n            .unwrap_or(Uuid::new_v4());\n        debug!(\"Received request for checkpoint (request_id: {})\", id);\n\n        let limit_per_subnet: usize = max(\n            request\n                .limit_per_subnet\n                .try_into()\n                .unwrap_or(SynchronizationConfig::LIMIT_PER_SUBNET),\n            SynchronizationConfig::LIMIT_PER_SUBNET,\n        );\n\n        let res: Result<Vec<_>, _> = request\n            .checkpoint\n            .into_iter()\n            .map(|v| v.try_into())\n            .collect();\n\n        let res = match res {\n            Err(error) => {\n                error!(\"Invalid checkpoint for request {}: {}\", id, error);\n                return Err(Status::invalid_argument(\"Invalid checkpoint\"));\n            }\n            Ok(value) => value,\n        };\n\n        debug!(\"Request {} contains {} proof_of_delivery\", id, res.len());\n        trace!(\"Request {} contains {:?}\", id, res);\n        let diff = match self\n            .validator_store\n            .get_checkpoint_diff(&res, limit_per_subnet)\n        {\n            Ok(diff) => {\n                debug!(\n                    \"Fetched checkpoint diff from storage for request {}, got {:?}\",\n                    id, diff\n                );\n                diff.into_iter()\n                    .map(|(key, value)| {\n                        let v: Vec<_> = value\n                            .into_iter()\n                            .map(|v| ProofOfDelivery {\n                                delivery_position: Some(SourceStreamPosition {\n                                    source_subnet_id: Some(v.delivery_position.subnet_id.into()),\n                                    position: *v.delivery_position.position,\n                                    certificate_id: Some(v.certificate_id.into()),\n                                }),\n                                readies: v\n                                    .readies\n                                    .into_iter()\n                                    .map(|(ready, signature)| SignedReady { ready, signature })\n                                    .collect(),\n                                threshold: v.threshold,\n                            })\n                            .collect();\n                        CheckpointMapFieldEntry {\n                            key: key.to_string(),\n                            value: v,\n                        }\n                    })\n                    .collect()\n            }\n            Err(error) => {\n                error!(\n                    \"Error while fetching checkpoint diff for request {}: {}\",\n                    id, error\n                );\n                Vec::new()\n            }\n        };\n\n        debug!(\n            \"Responding to request {} with checkpoint diff containing {:?}\",\n            id,\n            diff.iter()\n                .map(|v| (v.key.clone(), v.value.len()))\n                .collect::<Vec<_>>()\n        );\n\n        let response = CheckpointResponse {\n            request_id: request.request_id,\n            checkpoint_diff: diff,\n        };\n\n        Ok(Response::new(response))\n    }\n}\n"
  },
  {
    "path": "crates/topos-telemetry/Cargo.toml",
    "content": "[package]\nname = \"topos-telemetry\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n[lints]\nworkspace = true\n\n[dependencies]\nopentelemetry.workspace = true\nopentelemetry_sdk = { workspace = true, features = [\"rt-tokio\"] }\ntracing-opentelemetry.workspace = true\ntracing.workspace = true\ntonic.workspace = true\n\ntracing-subscriber = { optional = true, workspace = true, features = [\"env-filter\", \"json\", \"ansi\", \"fmt\"] }\nopentelemetry-otlp = { optional = true, workspace = true, features = [\"grpc-tonic\", \"metrics\", \"tls-roots\"] }\n\nserde = { workspace = true, features = [\"derive\", \"std\"] }\n\n[features]\ntracing = [\"tracing-subscriber\", \"opentelemetry-otlp\"]\n"
  },
  {
    "path": "crates/topos-telemetry/src/lib.rs",
    "content": "use std::{collections::HashMap, str::FromStr};\n\nuse ::tracing::warn;\nuse opentelemetry::{\n    global,\n    propagation::{Extractor, Injector},\n    Context,\n};\nuse serde::{Deserialize, Serialize};\nuse tonic::metadata::MetadataKey;\n\n#[cfg(feature = \"tracing\")]\npub mod tracing;\n\npub struct TonicMetaInjector<'a>(pub &'a mut tonic::metadata::MetadataMap);\npub struct TonicMetaExtractor<'a>(pub &'a tonic::metadata::MetadataMap);\n\nimpl<'a> TonicMetaExtractor<'a> {\n    pub fn extract(&self) -> opentelemetry::Context {\n        global::get_text_map_propagator(|propagator| propagator.extract(self))\n    }\n}\n\nimpl<'a> TonicMetaInjector<'a> {\n    pub fn inject(&mut self, context: &Context) {\n        global::get_text_map_propagator(|propagator| {\n            propagator.inject_context(context, self);\n        })\n    }\n}\n\nimpl<'a> Injector for TonicMetaInjector<'a> {\n    /// Set a key and value in the MetadataMap.  Does nothing if the key or value are not valid inputs\n    fn set(&mut self, key: &str, value: String) {\n        if let Ok(key) = MetadataKey::from_str(key) {\n            if let Ok(val) = value.parse() {\n                self.0.insert(key, val);\n            } else {\n                warn!(\"Invalid value: {}\", value);\n            }\n        } else {\n            warn!(\"Invalid key: {}\", key);\n        }\n    }\n}\n\nimpl<'a> Extractor for TonicMetaExtractor<'a> {\n    fn get(&self, key: &str) -> Option<&str> {\n        self.0.get(key).and_then(|v| v.to_str().ok())\n    }\n\n    fn keys(&self) -> Vec<&str> {\n        self.0\n            .keys()\n            .map(|k| match k {\n                tonic::metadata::KeyRef::Ascii(k) => k.as_str(),\n                tonic::metadata::KeyRef::Binary(k) => k.as_str(),\n            })\n            .collect()\n    }\n}\n\n#[derive(Default, Debug, Clone, Serialize, Deserialize)]\npub struct PropagationContext {\n    context: HashMap<String, String>,\n}\n\nimpl PropagationContext {\n    pub fn inject(context: &Context) -> Self {\n        global::get_text_map_propagator(|propagator| {\n            let mut propagation_context = PropagationContext::default();\n            propagator.inject_context(context, &mut propagation_context);\n            propagation_context\n        })\n    }\n\n    pub fn extract(&self) -> opentelemetry::Context {\n        global::get_text_map_propagator(|propagator| propagator.extract(self))\n    }\n}\n\nimpl Injector for PropagationContext {\n    fn set(&mut self, key: &str, value: String) {\n        self.context.insert(key.to_string(), value);\n    }\n}\n\nimpl Extractor for PropagationContext {\n    fn get(&self, key: &str) -> Option<&str> {\n        self.context.get(key).map(|s| s.as_ref())\n    }\n\n    fn keys(&self) -> Vec<&str> {\n        self.context.keys().map(|k| k.as_ref()).collect()\n    }\n}\n"
  },
  {
    "path": "crates/topos-telemetry/src/tracing.rs",
    "content": "use opentelemetry::trace::TracerProvider;\nuse opentelemetry::{global, KeyValue};\nuse opentelemetry_otlp::{SpanExporterBuilder, WithExportConfig};\nuse opentelemetry_sdk::trace::{BatchConfigBuilder, BatchSpanProcessor, SpanLimits};\nuse opentelemetry_sdk::{propagation::TraceContextPropagator, trace::Sampler, Resource};\nuse std::time::Duration;\nuse tracing::Level;\nuse tracing_subscriber::util::TryInitError;\nuse tracing_subscriber::{\n    prelude::__tracing_subscriber_SubscriberExt, util::SubscriberInitExt, EnvFilter, Layer,\n};\n\nfn verbose_to_level(verbose: u8) -> Level {\n    match verbose {\n        0 => Level::ERROR,\n        1 => Level::WARN,\n        2 => Level::INFO,\n        3 => Level::DEBUG,\n        _ => Level::TRACE,\n    }\n}\n\nfn build_resources(otlp_service_name: String, version: &'static str) -> Vec<KeyValue> {\n    let mut resources = Vec::new();\n\n    resources.push(KeyValue::new(\"service.name\", otlp_service_name));\n    resources.push(KeyValue::new(\"service.version\", version));\n\n    let custom_resources: Vec<_> = std::env::var(\"TOPOS_OTLP_TAGS\")\n        .unwrap_or_default()\n        .split(',')\n        // NOTE: limit to 10 tags to avoid exploit\n        .take(10)\n        .filter_map(|tag_raw| {\n            let mut v = tag_raw.splitn(2, '=');\n            match (v.next(), v.next()) {\n                (Some(key), Some(value)) if !key.trim().is_empty() && !value.trim().is_empty() => {\n                    Some(KeyValue::new(\n                        key.trim().to_string(),\n                        value.trim().to_string(),\n                    ))\n                }\n                _ => None,\n            }\n        })\n        .collect();\n\n    resources.extend(custom_resources);\n\n    resources\n}\n\nfn create_filter(verbose: u8) -> EnvFilter {\n    if verbose > 0 {\n        EnvFilter::try_new(format!(\"warn,topos={}\", verbose_to_level(verbose).as_str())).unwrap()\n    } else {\n        EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new(\"warn,topos=info\"))\n    }\n}\n\n// Setup tracing\n// If otlp agent and otlp service name are provided, opentelemetry collection will be used\npub fn setup_tracing(\n    verbose: u8,\n    no_color: bool,\n    otlp_agent: Option<String>,\n    otlp_service_name: Option<String>,\n    version: &'static str,\n) -> Result<(), TryInitError> {\n    let mut layers = Vec::new();\n\n    let ansi = !no_color;\n\n    layers.push(\n        match std::env::var(\"TOPOS_LOG_FORMAT\")\n            .map(|f| f.to_lowercase())\n            .as_ref()\n            .map(|s| s.as_str())\n        {\n            Ok(\"json\") => tracing_subscriber::fmt::layer()\n                .json()\n                .with_ansi(ansi)\n                .with_filter(create_filter(verbose))\n                .boxed(),\n            Ok(\"pretty\") => tracing_subscriber::fmt::layer()\n                .pretty()\n                .with_ansi(ansi)\n                .with_filter(create_filter(verbose))\n                .boxed(),\n            _ => tracing_subscriber::fmt::layer()\n                .compact()\n                .with_ansi(ansi)\n                .with_filter(create_filter(verbose))\n                .boxed(),\n        },\n    );\n\n    // Setup instrumentation if both otlp agent and otlp service name are provided as arguments\n    if let (Some(otlp_agent), Some(otlp_service_name)) = (otlp_agent, otlp_service_name) {\n        let resources = build_resources(otlp_service_name, version);\n\n        let mut trace_config = opentelemetry_sdk::trace::config();\n\n        trace_config = trace_config.with_sampler(Sampler::AlwaysOn);\n        trace_config = trace_config.with_max_events_per_span(\n            match std::env::var(\"OTLP_MAX_EVENTS_PER_SPAN\") {\n                Ok(v) => v\n                    .parse::<u32>()\n                    .unwrap_or(SpanLimits::default().max_events_per_span),\n                _ => SpanLimits::default().max_events_per_span,\n            },\n        );\n        trace_config = trace_config.with_max_attributes_per_span(\n            match std::env::var(\"OTLP_MAX_ATTRIBUTES_PER_SPAN\") {\n                Ok(v) => v\n                    .parse::<u32>()\n                    .unwrap_or(SpanLimits::default().max_attributes_per_span),\n                _ => SpanLimits::default().max_attributes_per_span,\n            },\n        );\n        trace_config =\n            trace_config.with_max_links_per_span(match std::env::var(\"OTLP_MAX_LINK_PER_SPAN\") {\n                Ok(v) => v\n                    .parse::<u32>()\n                    .unwrap_or(SpanLimits::default().max_links_per_span),\n                _ => SpanLimits::default().max_links_per_span,\n            });\n        trace_config = trace_config.with_max_attributes_per_event(\n            match std::env::var(\"OTLP_MAX_ATTRIBUTES_PER_EVENT\") {\n                Ok(v) => v\n                    .parse::<u32>()\n                    .unwrap_or(SpanLimits::default().max_attributes_per_event),\n                _ => SpanLimits::default().max_attributes_per_event,\n            },\n        );\n\n        trace_config = trace_config.with_max_attributes_per_link(\n            match std::env::var(\"OTLP_MAX_ATTRIBUTES_PER_LINK\") {\n                Ok(v) => v\n                    .parse::<u32>()\n                    .unwrap_or(SpanLimits::default().max_attributes_per_link),\n                _ => SpanLimits::default().max_attributes_per_link,\n            },\n        );\n\n        trace_config = trace_config.with_resource(Resource::new(resources));\n\n        let exporter = opentelemetry_otlp::new_exporter()\n            .tonic()\n            .with_endpoint(otlp_agent);\n\n        let batch_processor_config = BatchConfigBuilder::default()\n            .with_scheduled_delay(match std::env::var(\"OTLP_BATCH_SCHEDULED_DELAY\") {\n                Ok(v) => Duration::from_millis(v.parse::<u64>().unwrap_or(5_000)),\n                _ => Duration::from_millis(5_000),\n            })\n            .with_max_queue_size(match std::env::var(\"OTLP_BATCH_MAX_QUEUE_SIZE\") {\n                Ok(v) => v.parse::<usize>().unwrap_or(2048),\n                _ => 2048,\n            })\n            .with_max_export_batch_size(match std::env::var(\"OTLP_BATCH_MAX_EXPORTER_BATCH_SIZE\") {\n                Ok(v) => v.parse::<usize>().unwrap_or(512),\n                _ => 512,\n            })\n            .with_max_export_timeout(match std::env::var(\"OTLP_BATCH_EXPORT_TIMEOUT\") {\n                Ok(v) => Duration::from_millis(v.parse::<u64>().unwrap_or(30_000)),\n                _ => Duration::from_millis(30_000),\n            })\n            .with_max_concurrent_exports(\n                match std::env::var(\"OTLP_BATCH_MAX_CONCURRENT_EXPORTS\") {\n                    Ok(v) => v.parse::<usize>().unwrap_or(1),\n                    _ => 1,\n                },\n            );\n\n        let span_exporter: SpanExporterBuilder = exporter.into();\n        let mut provider_builder = opentelemetry_sdk::trace::TracerProvider::builder()\n            .with_span_processor(\n                BatchSpanProcessor::builder(\n                    span_exporter.build_span_exporter().unwrap(),\n                    opentelemetry_sdk::runtime::Tokio,\n                )\n                .with_batch_config(batch_processor_config.build())\n                .build(),\n            );\n\n        provider_builder = provider_builder.with_config(trace_config);\n        let provider = provider_builder.build();\n\n        let tracer = provider.versioned_tracer(\n            \"opentelemetry-otlp\",\n            Some(env!(\"CARGO_PKG_VERSION\")),\n            None::<&str>,\n            None,\n        );\n\n        let _ = global::set_tracer_provider(provider);\n\n        layers.push(\n            tracing_opentelemetry::layer()\n                .with_tracer(tracer)\n                .with_filter(create_filter(verbose))\n                .boxed(),\n        );\n\n        opentelemetry::global::set_text_map_propagator(TraceContextPropagator::new());\n\n        global::set_text_map_propagator(TraceContextPropagator::new());\n    }\n\n    tracing_subscriber::registry().with(layers).try_init()?;\n\n    Ok(())\n}\n"
  },
  {
    "path": "crates/topos-test-sdk/Cargo.toml",
    "content": "[package]\nname = \"topos-test-sdk\"\nversion = \"0.1.0\"\nedition = \"2021\"\nbuild = \"build.rs\"\n\n[lints]\nworkspace = true\n\n[dependencies]\ntopos-core = { workspace = true, features = [\"uci\", \"api\"] }\ntopos-crypto = { path = \"../topos-crypto/\" }\ntopos-config = { path = \"../topos-config/\" }\ntopos-p2p = { path = \"../topos-p2p/\" }\ntopos-tce = { path = \"../topos-tce/\" }\ntopos-tce-api = { path = \"../topos-tce-api/\" }\ntopos-tce-broadcast = { path = \"../topos-tce-broadcast/\" }\ntopos-tce-gatekeeper = { path = \"../topos-tce-gatekeeper/\" }\ntopos-tce-storage = { path = \"../topos-tce-storage/\" }\ntopos-tce-synchronizer = { path = \"../topos-tce-synchronizer/\" }\n\nhex.workspace = true\nethers.workspace = true\nasync-trait.workspace = true\nfutures.workspace = true\nlazy_static = { version = \"1.4.0\" }\nlibp2p = { workspace = true, features = [\"macros\"] }\nproc_macro_sdk = { path = \"./proc_macro_sdk/\" }\nrand.workspace = true\nrstest.workspace = true\ntokio-stream.workspace = true\nprost.workspace = true\ntonic = { workspace = true, default-features = false, features = [\n    \"prost\",\n    \"codegen\",\n    \"transport\",\n] }\n\ntower.workspace = true\ntokio-util.workspace = true\ntokio.workspace = true\ntracing.workspace = true\nasync-stream.workspace = true\n\n[build-dependencies]\ntonic-build.workspace = true\n"
  },
  {
    "path": "crates/topos-test-sdk/build.rs",
    "content": "use std::{env, path::PathBuf, str::FromStr};\n\nfn main() {\n    let mut path = PathBuf::from_str(\n        &env::var(\"CARGO_MANIFEST_DIR\").expect(\"unable to build du to missing CARGO_MANIFEST_DIR\"),\n    )\n    .expect(\"Unable to build PathBuf for topos-test-sdk\");\n\n    path.push(\"./../../target/tmp/\");\n    let path = path.as_path();\n    println!(\n        \"cargo:rustc-env=TOPOS_TEST_SDK_TMP={}\",\n        path.to_str().unwrap()\n    );\n\n    let path = PathBuf::from(\"./src/grpc/behaviour/\");\n\n    tonic_build::configure()\n        .out_dir(path)\n        .compile(\n            &[\n                \"./proto/behaviour/helloworld.proto\",\n                \"./proto/behaviour/noop.proto\",\n            ],\n            &[\"proto/\"],\n        )\n        .unwrap();\n}\n"
  },
  {
    "path": "crates/topos-test-sdk/proc_macro_sdk/Cargo.toml",
    "content": "[package]\nname = \"proc_macro_sdk\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n[lints]\nworkspace = true\n\n[lib]\nproc-macro = true\n\n[dependencies]\nsyn = \"1.0\"\nquote = \"1.0\"\n"
  },
  {
    "path": "crates/topos-test-sdk/proc_macro_sdk/src/lib.rs",
    "content": "use proc_macro::TokenStream;\nuse quote::format_ident;\nuse quote::quote;\nuse syn::parse_macro_input;\nuse syn::Expr;\nuse syn::ExprLit;\nuse syn::ExprRange;\nuse syn::Lit;\n\n#[proc_macro]\npub fn generate_certificate_ids(input: TokenStream) -> TokenStream {\n    let range: ExprRange = parse_macro_input!(input as ExprRange);\n\n    let range = parse_range(range);\n\n    let mut quotes = Vec::new();\n    for i in range {\n        let certificate_name = format_ident!(\"CERTIFICATE_ID_{}\", i);\n        quotes.push(quote! {\n            pub const #certificate_name: ::topos_core::uci::CertificateId = ::topos_core::uci::CertificateId::from_array([#i; ::topos_core::uci::CERTIFICATE_ID_LENGTH]);\n        });\n    }\n\n    TokenStream::from(quote! { #(#quotes)* })\n}\n\n#[proc_macro]\npub fn generate_source_subnet_ids(input: TokenStream) -> TokenStream {\n    generate_subnet_ids(\"SOURCE\", input)\n}\n\n#[proc_macro]\npub fn generate_target_subnet_ids(input: TokenStream) -> TokenStream {\n    generate_subnet_ids(\"TARGET\", input)\n}\n\nfn generate_subnet_ids(subnet_type: &str, input: TokenStream) -> TokenStream {\n    let range: ExprRange = parse_macro_input!(input as ExprRange);\n\n    let range = parse_range(range);\n\n    let mut quotes = Vec::new();\n    for (index, i) in range.enumerate() {\n        let source_subnet_name = format_ident!(\"{}_SUBNET_ID_{}\", subnet_type, index + 1);\n        quotes.push(quote! {\n            pub const #source_subnet_name: ::topos_core::uci::SubnetId = ::topos_core::uci::SubnetId::from_array([#i; ::topos_core::uci::SUBNET_ID_LENGTH]);\n        });\n    }\n\n    TokenStream::from(quote! { #(#quotes)* })\n}\n\nfn parse_range(range: ExprRange) -> std::ops::Range<u8> {\n    let from: u8 = if let Expr::Lit(ExprLit {\n        lit: Lit::Int(value),\n        ..\n    }) = *range\n        .from\n        .expect(\"topos_test_sdk: generate cert/subnet, from input isn't valid\")\n    {\n        value\n            .base10_parse()\n            .expect(\"topos_test_sdk: generate cert/subnet, unable to parse from int\")\n    } else {\n        panic!(\"topos_test_sdk: generate cert/subnet, unable to parse from input\");\n    };\n\n    let to: u8 = if let Expr::Lit(ExprLit {\n        lit: Lit::Int(value),\n        ..\n    }) = *range\n        .to\n        .expect(\"topos_test_sdk: generate cert/subnet, to input isn't valid\")\n    {\n        value\n            .base10_parse()\n            .expect(\"topos_test_sdk: generate cert/subnet, unable to parse to int\")\n    } else {\n        panic!(\"topos_test_sdk: generate cert/subnet, unable to parse to input\");\n    };\n\n    match range.limits {\n        syn::RangeLimits::HalfOpen(_) => from..to,\n        syn::RangeLimits::Closed(_) => from..(to + 1),\n    }\n}\n"
  },
  {
    "path": "crates/topos-test-sdk/proto/behaviour/helloworld.proto",
    "content": "syntax = \"proto3\";\n\npackage helloworld;\n\n// The greeting service definition.\nservice Greeter {\n  // Sends a greeting\n  rpc SayHello (HelloRequest) returns (HelloReply) {}\n\n  // Send a greeting with a delay\n  rpc SayHelloWithDelay(HelloWithDelayRequest) returns (HelloReply) {}\n}\n\n// The request message containing the user's name.\nmessage HelloRequest {\n  string name = 1;\n}\n\n// The request message containing the user's name and the delay.\nmessage HelloWithDelayRequest {\n  string name = 1;\n  uint64 delay_in_seconds = 2;\n}\n\n// The response message containing the greetings\nmessage HelloReply {\n  string message = 1;\n}\n"
  },
  {
    "path": "crates/topos-test-sdk/proto/behaviour/noop.proto",
    "content": "syntax = \"proto3\";\n\npackage noop;\n\n// The greeting service definition.\nservice Noop {\n  // Trigger nothing\n  rpc do_nothing (NoopRequest) returns (NoopResponse) {}\n}\n\nmessage NoopRequest {\n}\n\nmessage NoopResponse {\n}\n"
  },
  {
    "path": "crates/topos-test-sdk/src/certificates/mod.rs",
    "content": "use rstest::*;\nuse std::collections::HashMap;\n\nuse topos_core::{\n    types::{\n        stream::CertificateSourceStreamPosition, stream::Position, CertificateDelivered,\n        ProofOfDelivery,\n    },\n    uci::{Certificate, CertificateId, SubnetId, INITIAL_CERTIFICATE_ID},\n};\n\nuse crate::constants::PREV_CERTIFICATE_ID;\nuse crate::constants::SOURCE_SUBNET_ID_1;\nuse crate::constants::TARGET_SUBNET_ID_1;\n\n#[fixture]\npub fn create_certificate(\n    #[default(SOURCE_SUBNET_ID_1)] source_subnet: SubnetId,\n    #[default(&[TARGET_SUBNET_ID_1])] target_subnets: &[SubnetId],\n    #[default(None)] previous_certificate_id: Option<CertificateId>,\n) -> Certificate {\n    Certificate::new_with_default_fields(\n        previous_certificate_id.unwrap_or(INITIAL_CERTIFICATE_ID),\n        source_subnet,\n        target_subnets,\n    )\n    .unwrap()\n}\n\n#[fixture]\npub fn create_certificate_at_position(\n    #[default(Position::ZERO)] position: Position,\n    create_certificate: Certificate,\n) -> CertificateDelivered {\n    let certificate_id = create_certificate.id;\n    let subnet_id = create_certificate.source_subnet_id;\n\n    CertificateDelivered {\n        certificate: create_certificate,\n        proof_of_delivery: ProofOfDelivery {\n            certificate_id,\n            delivery_position: CertificateSourceStreamPosition {\n                subnet_id,\n                position,\n            },\n            readies: vec![],\n            threshold: 0,\n        },\n    }\n}\n\n#[fixture]\npub fn create_certificate_chain(\n    #[default(SOURCE_SUBNET_ID_1)] source_subnet: topos_core::uci::SubnetId,\n    #[default(&[TARGET_SUBNET_ID_1])] target_subnets: &[topos_core::uci::SubnetId],\n    #[default(1)] number: usize,\n) -> Vec<CertificateDelivered> {\n    let mut certificates = Vec::new();\n    let mut parent = None;\n\n    for i in 0..number {\n        let cert = Certificate::new_with_default_fields(\n            parent.take().unwrap_or(*PREV_CERTIFICATE_ID.as_array()),\n            source_subnet,\n            target_subnets,\n        )\n        .unwrap();\n        parent = Some(*cert.id.as_array());\n        let id = cert.id;\n        certificates.push(CertificateDelivered {\n            certificate: cert,\n            proof_of_delivery: ProofOfDelivery {\n                certificate_id: id,\n                delivery_position: CertificateSourceStreamPosition {\n                    subnet_id: source_subnet,\n                    position: i.try_into().unwrap(),\n                },\n                readies: Vec::new(),\n                threshold: 0,\n            },\n        });\n    }\n\n    certificates\n}\n\n/// Generate and assign nb_cert number of certificates to existing subnets\n/// Could be different number of certificates per subnet\npub fn create_certificate_chains(\n    subnets: &[SubnetId],\n    number_of_certificates_per_subnet: usize,\n) -> HashMap<SubnetId, Vec<CertificateDelivered>> {\n    let mut result = HashMap::new();\n\n    subnets.iter().for_each(|subnet| {\n        let targets = subnets\n            .iter()\n            .filter(|sub| *sub != subnet)\n            .copied()\n            .collect::<Vec<_>>();\n\n        let certs =\n            create_certificate_chain(*subnet, targets.as_ref(), number_of_certificates_per_subnet);\n        result.entry(*subnet).or_insert(certs);\n    });\n\n    result\n}\n"
  },
  {
    "path": "crates/topos-test-sdk/src/crypto.rs",
    "content": "use std::{str::FromStr, sync::Arc};\n\nuse rstest::fixture;\nuse topos_crypto::messages::MessageSigner;\n\n#[fixture(key = \"122f3ae6ade1fd136b292cea4f6243c7811160352c8821528547a1fe7c459daf\")]\npub fn message_signer(key: &str) -> Arc<MessageSigner> {\n    Arc::new(MessageSigner::from_str(key).unwrap())\n}\n"
  },
  {
    "path": "crates/topos-test-sdk/src/grpc/behaviour/helloworld.rs",
    "content": "/// The request message containing the user's name.\n#[allow(clippy::derive_partial_eq_without_eq)]\n#[derive(Clone, PartialEq, ::prost::Message)]\npub struct HelloRequest {\n    #[prost(string, tag = \"1\")]\n    pub name: ::prost::alloc::string::String,\n}\n/// The request message containing the user's name and the delay.\n#[allow(clippy::derive_partial_eq_without_eq)]\n#[derive(Clone, PartialEq, ::prost::Message)]\npub struct HelloWithDelayRequest {\n    #[prost(string, tag = \"1\")]\n    pub name: ::prost::alloc::string::String,\n    #[prost(uint64, tag = \"2\")]\n    pub delay_in_seconds: u64,\n}\n/// The response message containing the greetings\n#[allow(clippy::derive_partial_eq_without_eq)]\n#[derive(Clone, PartialEq, ::prost::Message)]\npub struct HelloReply {\n    #[prost(string, tag = \"1\")]\n    pub message: ::prost::alloc::string::String,\n}\n/// Generated client implementations.\npub mod greeter_client {\n    #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)]\n    use tonic::codegen::*;\n    use tonic::codegen::http::Uri;\n    /// The greeting service definition.\n    #[derive(Debug, Clone)]\n    pub struct GreeterClient<T> {\n        inner: tonic::client::Grpc<T>,\n    }\n    impl GreeterClient<tonic::transport::Channel> {\n        /// Attempt to create a new client by connecting to a given endpoint.\n        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>\n        where\n            D: TryInto<tonic::transport::Endpoint>,\n            D::Error: Into<StdError>,\n        {\n            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;\n            Ok(Self::new(conn))\n        }\n    }\n    impl<T> GreeterClient<T>\n    where\n        T: tonic::client::GrpcService<tonic::body::BoxBody>,\n        T::Error: Into<StdError>,\n        T::ResponseBody: Body<Data = Bytes> + Send + 'static,\n        <T::ResponseBody as Body>::Error: Into<StdError> + Send,\n    {\n        pub fn new(inner: T) -> Self {\n            let inner = tonic::client::Grpc::new(inner);\n            Self { inner }\n        }\n        pub fn with_origin(inner: T, origin: Uri) -> Self {\n            let inner = tonic::client::Grpc::with_origin(inner, origin);\n            Self { inner }\n        }\n        pub fn with_interceptor<F>(\n            inner: T,\n            interceptor: F,\n        ) -> GreeterClient<InterceptedService<T, F>>\n        where\n            F: tonic::service::Interceptor,\n            T::ResponseBody: Default,\n            T: tonic::codegen::Service<\n                http::Request<tonic::body::BoxBody>,\n                Response = http::Response<\n                    <T as tonic::client::GrpcService<tonic::body::BoxBody>>::ResponseBody,\n                >,\n            >,\n            <T as tonic::codegen::Service<\n                http::Request<tonic::body::BoxBody>,\n            >>::Error: Into<StdError> + Send + Sync,\n        {\n            GreeterClient::new(InterceptedService::new(inner, interceptor))\n        }\n        /// Compress requests with the given encoding.\n        ///\n        /// This requires the server to support it otherwise it might respond with an\n        /// error.\n        #[must_use]\n        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {\n            self.inner = self.inner.send_compressed(encoding);\n            self\n        }\n        /// Enable decompressing responses.\n        #[must_use]\n        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {\n            self.inner = self.inner.accept_compressed(encoding);\n            self\n        }\n        /// Limits the maximum size of a decoded message.\n        ///\n        /// Default: `4MB`\n        #[must_use]\n        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {\n            self.inner = self.inner.max_decoding_message_size(limit);\n            self\n        }\n        /// Limits the maximum size of an encoded message.\n        ///\n        /// Default: `usize::MAX`\n        #[must_use]\n        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {\n            self.inner = self.inner.max_encoding_message_size(limit);\n            self\n        }\n        /// Sends a greeting\n        pub async fn say_hello(\n            &mut self,\n            request: impl tonic::IntoRequest<super::HelloRequest>,\n        ) -> std::result::Result<tonic::Response<super::HelloReply>, tonic::Status> {\n            self.inner\n                .ready()\n                .await\n                .map_err(|e| {\n                    tonic::Status::new(\n                        tonic::Code::Unknown,\n                        format!(\"Service was not ready: {}\", e.into()),\n                    )\n                })?;\n            let codec = tonic::codec::ProstCodec::default();\n            let path = http::uri::PathAndQuery::from_static(\n                \"/helloworld.Greeter/SayHello\",\n            );\n            let mut req = request.into_request();\n            req.extensions_mut()\n                .insert(GrpcMethod::new(\"helloworld.Greeter\", \"SayHello\"));\n            self.inner.unary(req, path, codec).await\n        }\n        /// Send a greeting with a delay\n        pub async fn say_hello_with_delay(\n            &mut self,\n            request: impl tonic::IntoRequest<super::HelloWithDelayRequest>,\n        ) -> std::result::Result<tonic::Response<super::HelloReply>, tonic::Status> {\n            self.inner\n                .ready()\n                .await\n                .map_err(|e| {\n                    tonic::Status::new(\n                        tonic::Code::Unknown,\n                        format!(\"Service was not ready: {}\", e.into()),\n                    )\n                })?;\n            let codec = tonic::codec::ProstCodec::default();\n            let path = http::uri::PathAndQuery::from_static(\n                \"/helloworld.Greeter/SayHelloWithDelay\",\n            );\n            let mut req = request.into_request();\n            req.extensions_mut()\n                .insert(GrpcMethod::new(\"helloworld.Greeter\", \"SayHelloWithDelay\"));\n            self.inner.unary(req, path, codec).await\n        }\n    }\n}\n/// Generated server implementations.\npub mod greeter_server {\n    #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)]\n    use tonic::codegen::*;\n    /// Generated trait containing gRPC methods that should be implemented for use with GreeterServer.\n    #[async_trait]\n    pub trait Greeter: Send + Sync + 'static {\n        /// Sends a greeting\n        async fn say_hello(\n            &self,\n            request: tonic::Request<super::HelloRequest>,\n        ) -> std::result::Result<tonic::Response<super::HelloReply>, tonic::Status>;\n        /// Send a greeting with a delay\n        async fn say_hello_with_delay(\n            &self,\n            request: tonic::Request<super::HelloWithDelayRequest>,\n        ) -> std::result::Result<tonic::Response<super::HelloReply>, tonic::Status>;\n    }\n    /// The greeting service definition.\n    #[derive(Debug)]\n    pub struct GreeterServer<T: Greeter> {\n        inner: _Inner<T>,\n        accept_compression_encodings: EnabledCompressionEncodings,\n        send_compression_encodings: EnabledCompressionEncodings,\n        max_decoding_message_size: Option<usize>,\n        max_encoding_message_size: Option<usize>,\n    }\n    struct _Inner<T>(Arc<T>);\n    impl<T: Greeter> GreeterServer<T> {\n        pub fn new(inner: T) -> Self {\n            Self::from_arc(Arc::new(inner))\n        }\n        pub fn from_arc(inner: Arc<T>) -> Self {\n            let inner = _Inner(inner);\n            Self {\n                inner,\n                accept_compression_encodings: Default::default(),\n                send_compression_encodings: Default::default(),\n                max_decoding_message_size: None,\n                max_encoding_message_size: None,\n            }\n        }\n        pub fn with_interceptor<F>(\n            inner: T,\n            interceptor: F,\n        ) -> InterceptedService<Self, F>\n        where\n            F: tonic::service::Interceptor,\n        {\n            InterceptedService::new(Self::new(inner), interceptor)\n        }\n        /// Enable decompressing requests with the given encoding.\n        #[must_use]\n        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {\n            self.accept_compression_encodings.enable(encoding);\n            self\n        }\n        /// Compress responses with the given encoding, if the client supports it.\n        #[must_use]\n        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {\n            self.send_compression_encodings.enable(encoding);\n            self\n        }\n        /// Limits the maximum size of a decoded message.\n        ///\n        /// Default: `4MB`\n        #[must_use]\n        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {\n            self.max_decoding_message_size = Some(limit);\n            self\n        }\n        /// Limits the maximum size of an encoded message.\n        ///\n        /// Default: `usize::MAX`\n        #[must_use]\n        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {\n            self.max_encoding_message_size = Some(limit);\n            self\n        }\n    }\n    impl<T, B> tonic::codegen::Service<http::Request<B>> for GreeterServer<T>\n    where\n        T: Greeter,\n        B: Body + Send + 'static,\n        B::Error: Into<StdError> + Send + 'static,\n    {\n        type Response = http::Response<tonic::body::BoxBody>;\n        type Error = std::convert::Infallible;\n        type Future = BoxFuture<Self::Response, Self::Error>;\n        fn poll_ready(\n            &mut self,\n            _cx: &mut Context<'_>,\n        ) -> Poll<std::result::Result<(), Self::Error>> {\n            Poll::Ready(Ok(()))\n        }\n        fn call(&mut self, req: http::Request<B>) -> Self::Future {\n            let inner = self.inner.clone();\n            match req.uri().path() {\n                \"/helloworld.Greeter/SayHello\" => {\n                    #[allow(non_camel_case_types)]\n                    struct SayHelloSvc<T: Greeter>(pub Arc<T>);\n                    impl<T: Greeter> tonic::server::UnaryService<super::HelloRequest>\n                    for SayHelloSvc<T> {\n                        type Response = super::HelloReply;\n                        type Future = BoxFuture<\n                            tonic::Response<Self::Response>,\n                            tonic::Status,\n                        >;\n                        fn call(\n                            &mut self,\n                            request: tonic::Request<super::HelloRequest>,\n                        ) -> Self::Future {\n                            let inner = Arc::clone(&self.0);\n                            let fut = async move {\n                                <T as Greeter>::say_hello(&inner, request).await\n                            };\n                            Box::pin(fut)\n                        }\n                    }\n                    let accept_compression_encodings = self.accept_compression_encodings;\n                    let send_compression_encodings = self.send_compression_encodings;\n                    let max_decoding_message_size = self.max_decoding_message_size;\n                    let max_encoding_message_size = self.max_encoding_message_size;\n                    let inner = self.inner.clone();\n                    let fut = async move {\n                        let inner = inner.0;\n                        let method = SayHelloSvc(inner);\n                        let codec = tonic::codec::ProstCodec::default();\n                        let mut grpc = tonic::server::Grpc::new(codec)\n                            .apply_compression_config(\n                                accept_compression_encodings,\n                                send_compression_encodings,\n                            )\n                            .apply_max_message_size_config(\n                                max_decoding_message_size,\n                                max_encoding_message_size,\n                            );\n                        let res = grpc.unary(method, req).await;\n                        Ok(res)\n                    };\n                    Box::pin(fut)\n                }\n                \"/helloworld.Greeter/SayHelloWithDelay\" => {\n                    #[allow(non_camel_case_types)]\n                    struct SayHelloWithDelaySvc<T: Greeter>(pub Arc<T>);\n                    impl<\n                        T: Greeter,\n                    > tonic::server::UnaryService<super::HelloWithDelayRequest>\n                    for SayHelloWithDelaySvc<T> {\n                        type Response = super::HelloReply;\n                        type Future = BoxFuture<\n                            tonic::Response<Self::Response>,\n                            tonic::Status,\n                        >;\n                        fn call(\n                            &mut self,\n                            request: tonic::Request<super::HelloWithDelayRequest>,\n                        ) -> Self::Future {\n                            let inner = Arc::clone(&self.0);\n                            let fut = async move {\n                                <T as Greeter>::say_hello_with_delay(&inner, request).await\n                            };\n                            Box::pin(fut)\n                        }\n                    }\n                    let accept_compression_encodings = self.accept_compression_encodings;\n                    let send_compression_encodings = self.send_compression_encodings;\n                    let max_decoding_message_size = self.max_decoding_message_size;\n                    let max_encoding_message_size = self.max_encoding_message_size;\n                    let inner = self.inner.clone();\n                    let fut = async move {\n                        let inner = inner.0;\n                        let method = SayHelloWithDelaySvc(inner);\n                        let codec = tonic::codec::ProstCodec::default();\n                        let mut grpc = tonic::server::Grpc::new(codec)\n                            .apply_compression_config(\n                                accept_compression_encodings,\n                                send_compression_encodings,\n                            )\n                            .apply_max_message_size_config(\n                                max_decoding_message_size,\n                                max_encoding_message_size,\n                            );\n                        let res = grpc.unary(method, req).await;\n                        Ok(res)\n                    };\n                    Box::pin(fut)\n                }\n                _ => {\n                    Box::pin(async move {\n                        Ok(\n                            http::Response::builder()\n                                .status(200)\n                                .header(\"grpc-status\", \"12\")\n                                .header(\"content-type\", \"application/grpc\")\n                                .body(empty_body())\n                                .unwrap(),\n                        )\n                    })\n                }\n            }\n        }\n    }\n    impl<T: Greeter> Clone for GreeterServer<T> {\n        fn clone(&self) -> Self {\n            let inner = self.inner.clone();\n            Self {\n                inner,\n                accept_compression_encodings: self.accept_compression_encodings,\n                send_compression_encodings: self.send_compression_encodings,\n                max_decoding_message_size: self.max_decoding_message_size,\n                max_encoding_message_size: self.max_encoding_message_size,\n            }\n        }\n    }\n    impl<T: Greeter> Clone for _Inner<T> {\n        fn clone(&self) -> Self {\n            Self(Arc::clone(&self.0))\n        }\n    }\n    impl<T: std::fmt::Debug> std::fmt::Debug for _Inner<T> {\n        fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n            write!(f, \"{:?}\", self.0)\n        }\n    }\n    impl<T: Greeter> tonic::server::NamedService for GreeterServer<T> {\n        const NAME: &'static str = \"helloworld.Greeter\";\n    }\n}\n"
  },
  {
    "path": "crates/topos-test-sdk/src/grpc/behaviour/noop.rs",
    "content": "#[allow(clippy::derive_partial_eq_without_eq)]\n#[derive(Clone, PartialEq, ::prost::Message)]\npub struct NoopRequest {}\n#[allow(clippy::derive_partial_eq_without_eq)]\n#[derive(Clone, PartialEq, ::prost::Message)]\npub struct NoopResponse {}\n/// Generated client implementations.\npub mod noop_client {\n    #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)]\n    use tonic::codegen::*;\n    use tonic::codegen::http::Uri;\n    /// The greeting service definition.\n    #[derive(Debug, Clone)]\n    pub struct NoopClient<T> {\n        inner: tonic::client::Grpc<T>,\n    }\n    impl NoopClient<tonic::transport::Channel> {\n        /// Attempt to create a new client by connecting to a given endpoint.\n        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>\n        where\n            D: TryInto<tonic::transport::Endpoint>,\n            D::Error: Into<StdError>,\n        {\n            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;\n            Ok(Self::new(conn))\n        }\n    }\n    impl<T> NoopClient<T>\n    where\n        T: tonic::client::GrpcService<tonic::body::BoxBody>,\n        T::Error: Into<StdError>,\n        T::ResponseBody: Body<Data = Bytes> + Send + 'static,\n        <T::ResponseBody as Body>::Error: Into<StdError> + Send,\n    {\n        pub fn new(inner: T) -> Self {\n            let inner = tonic::client::Grpc::new(inner);\n            Self { inner }\n        }\n        pub fn with_origin(inner: T, origin: Uri) -> Self {\n            let inner = tonic::client::Grpc::with_origin(inner, origin);\n            Self { inner }\n        }\n        pub fn with_interceptor<F>(\n            inner: T,\n            interceptor: F,\n        ) -> NoopClient<InterceptedService<T, F>>\n        where\n            F: tonic::service::Interceptor,\n            T::ResponseBody: Default,\n            T: tonic::codegen::Service<\n                http::Request<tonic::body::BoxBody>,\n                Response = http::Response<\n                    <T as tonic::client::GrpcService<tonic::body::BoxBody>>::ResponseBody,\n                >,\n            >,\n            <T as tonic::codegen::Service<\n                http::Request<tonic::body::BoxBody>,\n            >>::Error: Into<StdError> + Send + Sync,\n        {\n            NoopClient::new(InterceptedService::new(inner, interceptor))\n        }\n        /// Compress requests with the given encoding.\n        ///\n        /// This requires the server to support it otherwise it might respond with an\n        /// error.\n        #[must_use]\n        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {\n            self.inner = self.inner.send_compressed(encoding);\n            self\n        }\n        /// Enable decompressing responses.\n        #[must_use]\n        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {\n            self.inner = self.inner.accept_compressed(encoding);\n            self\n        }\n        /// Limits the maximum size of a decoded message.\n        ///\n        /// Default: `4MB`\n        #[must_use]\n        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {\n            self.inner = self.inner.max_decoding_message_size(limit);\n            self\n        }\n        /// Limits the maximum size of an encoded message.\n        ///\n        /// Default: `usize::MAX`\n        #[must_use]\n        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {\n            self.inner = self.inner.max_encoding_message_size(limit);\n            self\n        }\n        /// Trigger nothing\n        pub async fn do_nothing(\n            &mut self,\n            request: impl tonic::IntoRequest<super::NoopRequest>,\n        ) -> std::result::Result<tonic::Response<super::NoopResponse>, tonic::Status> {\n            self.inner\n                .ready()\n                .await\n                .map_err(|e| {\n                    tonic::Status::new(\n                        tonic::Code::Unknown,\n                        format!(\"Service was not ready: {}\", e.into()),\n                    )\n                })?;\n            let codec = tonic::codec::ProstCodec::default();\n            let path = http::uri::PathAndQuery::from_static(\"/noop.Noop/do_nothing\");\n            let mut req = request.into_request();\n            req.extensions_mut().insert(GrpcMethod::new(\"noop.Noop\", \"do_nothing\"));\n            self.inner.unary(req, path, codec).await\n        }\n    }\n}\n/// Generated server implementations.\npub mod noop_server {\n    #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)]\n    use tonic::codegen::*;\n    /// Generated trait containing gRPC methods that should be implemented for use with NoopServer.\n    #[async_trait]\n    pub trait Noop: Send + Sync + 'static {\n        /// Trigger nothing\n        async fn do_nothing(\n            &self,\n            request: tonic::Request<super::NoopRequest>,\n        ) -> std::result::Result<tonic::Response<super::NoopResponse>, tonic::Status>;\n    }\n    /// The greeting service definition.\n    #[derive(Debug)]\n    pub struct NoopServer<T: Noop> {\n        inner: _Inner<T>,\n        accept_compression_encodings: EnabledCompressionEncodings,\n        send_compression_encodings: EnabledCompressionEncodings,\n        max_decoding_message_size: Option<usize>,\n        max_encoding_message_size: Option<usize>,\n    }\n    struct _Inner<T>(Arc<T>);\n    impl<T: Noop> NoopServer<T> {\n        pub fn new(inner: T) -> Self {\n            Self::from_arc(Arc::new(inner))\n        }\n        pub fn from_arc(inner: Arc<T>) -> Self {\n            let inner = _Inner(inner);\n            Self {\n                inner,\n                accept_compression_encodings: Default::default(),\n                send_compression_encodings: Default::default(),\n                max_decoding_message_size: None,\n                max_encoding_message_size: None,\n            }\n        }\n        pub fn with_interceptor<F>(\n            inner: T,\n            interceptor: F,\n        ) -> InterceptedService<Self, F>\n        where\n            F: tonic::service::Interceptor,\n        {\n            InterceptedService::new(Self::new(inner), interceptor)\n        }\n        /// Enable decompressing requests with the given encoding.\n        #[must_use]\n        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {\n            self.accept_compression_encodings.enable(encoding);\n            self\n        }\n        /// Compress responses with the given encoding, if the client supports it.\n        #[must_use]\n        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {\n            self.send_compression_encodings.enable(encoding);\n            self\n        }\n        /// Limits the maximum size of a decoded message.\n        ///\n        /// Default: `4MB`\n        #[must_use]\n        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {\n            self.max_decoding_message_size = Some(limit);\n            self\n        }\n        /// Limits the maximum size of an encoded message.\n        ///\n        /// Default: `usize::MAX`\n        #[must_use]\n        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {\n            self.max_encoding_message_size = Some(limit);\n            self\n        }\n    }\n    impl<T, B> tonic::codegen::Service<http::Request<B>> for NoopServer<T>\n    where\n        T: Noop,\n        B: Body + Send + 'static,\n        B::Error: Into<StdError> + Send + 'static,\n    {\n        type Response = http::Response<tonic::body::BoxBody>;\n        type Error = std::convert::Infallible;\n        type Future = BoxFuture<Self::Response, Self::Error>;\n        fn poll_ready(\n            &mut self,\n            _cx: &mut Context<'_>,\n        ) -> Poll<std::result::Result<(), Self::Error>> {\n            Poll::Ready(Ok(()))\n        }\n        fn call(&mut self, req: http::Request<B>) -> Self::Future {\n            let inner = self.inner.clone();\n            match req.uri().path() {\n                \"/noop.Noop/do_nothing\" => {\n                    #[allow(non_camel_case_types)]\n                    struct do_nothingSvc<T: Noop>(pub Arc<T>);\n                    impl<T: Noop> tonic::server::UnaryService<super::NoopRequest>\n                    for do_nothingSvc<T> {\n                        type Response = super::NoopResponse;\n                        type Future = BoxFuture<\n                            tonic::Response<Self::Response>,\n                            tonic::Status,\n                        >;\n                        fn call(\n                            &mut self,\n                            request: tonic::Request<super::NoopRequest>,\n                        ) -> Self::Future {\n                            let inner = Arc::clone(&self.0);\n                            let fut = async move {\n                                <T as Noop>::do_nothing(&inner, request).await\n                            };\n                            Box::pin(fut)\n                        }\n                    }\n                    let accept_compression_encodings = self.accept_compression_encodings;\n                    let send_compression_encodings = self.send_compression_encodings;\n                    let max_decoding_message_size = self.max_decoding_message_size;\n                    let max_encoding_message_size = self.max_encoding_message_size;\n                    let inner = self.inner.clone();\n                    let fut = async move {\n                        let inner = inner.0;\n                        let method = do_nothingSvc(inner);\n                        let codec = tonic::codec::ProstCodec::default();\n                        let mut grpc = tonic::server::Grpc::new(codec)\n                            .apply_compression_config(\n                                accept_compression_encodings,\n                                send_compression_encodings,\n                            )\n                            .apply_max_message_size_config(\n                                max_decoding_message_size,\n                                max_encoding_message_size,\n                            );\n                        let res = grpc.unary(method, req).await;\n                        Ok(res)\n                    };\n                    Box::pin(fut)\n                }\n                _ => {\n                    Box::pin(async move {\n                        Ok(\n                            http::Response::builder()\n                                .status(200)\n                                .header(\"grpc-status\", \"12\")\n                                .header(\"content-type\", \"application/grpc\")\n                                .body(empty_body())\n                                .unwrap(),\n                        )\n                    })\n                }\n            }\n        }\n    }\n    impl<T: Noop> Clone for NoopServer<T> {\n        fn clone(&self) -> Self {\n            let inner = self.inner.clone();\n            Self {\n                inner,\n                accept_compression_encodings: self.accept_compression_encodings,\n                send_compression_encodings: self.send_compression_encodings,\n                max_decoding_message_size: self.max_decoding_message_size,\n                max_encoding_message_size: self.max_encoding_message_size,\n            }\n        }\n    }\n    impl<T: Noop> Clone for _Inner<T> {\n        fn clone(&self) -> Self {\n            Self(Arc::clone(&self.0))\n        }\n    }\n    impl<T: std::fmt::Debug> std::fmt::Debug for _Inner<T> {\n        fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n            write!(f, \"{:?}\", self.0)\n        }\n    }\n    impl<T: Noop> tonic::server::NamedService for NoopServer<T> {\n        const NAME: &'static str = \"noop.Noop\";\n    }\n}\n"
  },
  {
    "path": "crates/topos-test-sdk/src/grpc/mod.rs",
    "content": "pub mod behaviour {\n    #[rustfmt::skip]\n    pub mod helloworld;\n\n    #[rustfmt::skip]\n    pub mod noop;\n}\n\npub mod implementations {\n    use std::time::Duration;\n\n    use async_trait::async_trait;\n    use tonic::{Request, Response, Status};\n\n    use super::behaviour::{\n        helloworld::{greeter_server::Greeter, HelloReply, HelloRequest, HelloWithDelayRequest},\n        noop::{noop_server::Noop, NoopRequest, NoopResponse},\n    };\n\n    #[derive(Default)]\n    pub struct DummyServer {}\n\n    #[async_trait]\n    impl Greeter for DummyServer {\n        async fn say_hello(\n            &self,\n            request: Request<HelloRequest>,\n        ) -> Result<Response<HelloReply>, Status> {\n            Ok(Response::new(HelloReply {\n                message: format!(\"Hello {}\", request.into_inner().name),\n            }))\n        }\n\n        async fn say_hello_with_delay(\n            &self,\n            request: Request<HelloWithDelayRequest>,\n        ) -> Result<Response<HelloReply>, Status> {\n            let request = request.into_inner();\n            tokio::time::sleep(Duration::from_secs(request.delay_in_seconds)).await;\n\n            Ok(Response::new(HelloReply {\n                message: format!(\"Hello {}\", request.name),\n            }))\n        }\n    }\n\n    #[derive(Default)]\n    pub struct NoopServer {}\n\n    #[async_trait]\n    impl Noop for NoopServer {\n        async fn do_nothing(\n            &self,\n            _: Request<NoopRequest>,\n        ) -> Result<Response<NoopResponse>, Status> {\n            Ok(Response::new(NoopResponse {}))\n        }\n    }\n}\n"
  },
  {
    "path": "crates/topos-test-sdk/src/lib.rs",
    "content": "pub mod certificates;\n\npub mod crypto;\npub mod networking;\npub mod p2p;\npub mod sequencer;\npub mod storage;\npub mod tce;\n\nuse rand::Rng;\nuse std::{\n    collections::HashSet,\n    net::SocketAddr,\n    path::PathBuf,\n    str::FromStr,\n    sync::Mutex,\n    thread,\n    time::{SystemTime, UNIX_EPOCH},\n};\n\nuse lazy_static::lazy_static;\nuse rstest::fixture;\n\nlazy_static! {\n    pub static ref PORT_MAPPING: Mutex<HashSet<u16>> = Mutex::new(HashSet::new());\n}\n\npub mod grpc;\n\npub mod constants {\n    use proc_macro_sdk::generate_certificate_ids;\n    use proc_macro_sdk::generate_source_subnet_ids;\n    use proc_macro_sdk::generate_target_subnet_ids;\n    use topos_core::uci::CertificateId;\n    use topos_core::uci::CERTIFICATE_ID_LENGTH;\n\n    generate_source_subnet_ids!(100..150);\n    generate_target_subnet_ids!(150..200);\n\n    // Certificate range is 0..100\n    pub const PREV_CERTIFICATE_ID: CertificateId =\n        CertificateId::from_array([0u8; CERTIFICATE_ID_LENGTH]);\n    generate_certificate_ids!(1..100);\n}\n\n#[macro_export]\nmacro_rules! wait_for_event {\n    ($node:expr, matches: $( $pattern:pat_param )|+ $( if $guard: expr )?, $error_msg:expr) => {\n        wait_for_event!($node, matches: $( $pattern )|+ $( if $guard )?, $error_msg, 100);\n    };\n\n    ($node:expr, matches: $( $pattern:pat_param )|+ $( if $guard: expr )?, $error_msg:expr, $timeout:expr) => {\n        let assertion = async {\n            while let Some(event) = $node.await {\n                if matches!(event, $( $pattern )|+ $( if $guard )?) {\n                    break;\n                }\n            }\n        };\n\n        if let Err(_) = tokio::time::timeout(std::time::Duration::from_millis($timeout), assertion).await\n        {\n            panic!(\"Timed out waiting ({}ms) for event: {}\", $timeout, $error_msg);\n        }\n    };\n}\n\npub fn get_available_port() -> u16 {\n    get_available_addr().port()\n}\npub fn get_available_addr() -> SocketAddr {\n    let mut port_mapping = PORT_MAPPING.lock().unwrap();\n\n    let mut addr = None;\n    for _ in 0..10 {\n        let new_addr = next_available_port();\n        if port_mapping.insert(new_addr.port()) {\n            addr = Some(new_addr);\n            break;\n        }\n    }\n\n    assert!(addr.is_some(), \"Can't find an available port\");\n    addr.unwrap()\n}\n\nfn next_available_port() -> SocketAddr {\n    // let socket = UdpSocket::bind(\"127.0.0.1:0\").expect(\"Can't find an available port\");\n    // socket.local_addr().unwrap()\n    //\n    use std::net::{TcpListener, TcpStream};\n\n    let host = \"127.0.0.1\";\n    // Request a random available port from the OS\n    let listener = TcpListener::bind((host, 0)).expect(\"Can't bind to an available port\");\n    let addr = listener.local_addr().expect(\"Can't find an available port\");\n\n    // Create and accept a connection (which we'll promptly drop) in order to force the port\n    // into the TIME_WAIT state, ensuring that the port will be reserved from some limited\n    // amount of time (roughly 60s on some Linux systems)\n    let _sender = TcpStream::connect(addr).expect(\"Can't connect to an available port\");\n    let _incoming = listener.accept().expect(\"Can't accept an available port\");\n\n    addr\n}\n\n#[fixture]\nfn folder_name() -> &'static str {\n    Box::leak(Box::new(\n        thread::current().name().unwrap().replace(\"::\", \"_\"),\n    ))\n}\n\n#[fixture]\npub fn create_folder(folder_name: &str) -> PathBuf {\n    let dir = env!(\"TOPOS_TEST_SDK_TMP\");\n    let mut temp_dir =\n        std::path::PathBuf::from_str(dir).expect(\"Unable to read CARGO_TARGET_TMPDIR\");\n    let time = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();\n    let mut rng = rand::thread_rng();\n\n    temp_dir.push(format!(\n        \"{}/data_{}_{}\",\n        folder_name,\n        time.as_nanos(),\n        rng.gen::<u64>()\n    ));\n\n    temp_dir\n}\n"
  },
  {
    "path": "crates/topos-test-sdk/src/networking/mod.rs",
    "content": "use std::net::SocketAddr;\nuse std::net::{TcpListener, TcpStream};\n\npub fn get_available_port() -> u16 {\n    get_available_addr().port()\n}\n\npub fn get_available_addr() -> SocketAddr {\n    let host = \"127.0.0.1\";\n\n    let listener = TcpListener::bind((host, 0)).expect(\"Can't bind to an available port\");\n    let addr = listener\n        .local_addr()\n        .expect(\"Can't extract local addr from listener\");\n\n    // Forcing the port into the TIME_WAIT state is necessary to ensure that the port will be\n    // reserved from some limited amount of time (roughly 60s on some Linux systems)\n    let _sender = TcpStream::connect(addr).expect(\"Can't connect to an available port\");\n    let _incoming = listener.accept().expect(\"Can't accept connection\");\n\n    addr\n}\n"
  },
  {
    "path": "crates/topos-test-sdk/src/p2p/mod.rs",
    "content": "use libp2p::{\n    build_multiaddr,\n    identity::{self, Keypair},\n    Multiaddr,\n};\nuse rand::{thread_rng, Rng};\n\nuse crate::networking::get_available_port;\n\npub fn local_peer(peer_index: u8, memory_transport: bool) -> (Keypair, Multiaddr) {\n    let peer_id: Keypair = keypair_from_seed(peer_index);\n    let local_listen_addr = if memory_transport {\n        build_multiaddr![Memory(thread_rng().gen::<u64>())]\n    } else {\n        let port = get_available_port();\n        format!(\n            \"/ip4/127.0.0.1/tcp/{}/p2p/{}\",\n            port,\n            peer_id.public().to_peer_id()\n        )\n        .parse()\n        .unwrap()\n    };\n\n    (peer_id, local_listen_addr)\n}\n\npub fn keypair_from_seed(seed: u8) -> Keypair {\n    let mut bytes = [0u8; 32];\n    bytes[0] = seed;\n\n    identity::Keypair::ed25519_from_bytes(bytes).expect(\"Invalid keypair\")\n}\n"
  },
  {
    "path": "crates/topos-test-sdk/src/sequencer/mod.rs",
    "content": "pub const TEST_VALIDATOR_KEY: &str =\n    \"11eddfae7abe45531b3f18342c8062969323a7131d3043f1a33c40df74803cc7\";\n"
  },
  {
    "path": "crates/topos-test-sdk/src/storage/mod.rs",
    "content": "use rstest::fixture;\nuse std::path::PathBuf;\nuse std::sync::Arc;\n\nuse topos_core::types::CertificateDelivered;\nuse topos_tce_storage::{\n    epoch::EpochValidatorsStore, epoch::ValidatorPerEpochStore, fullnode::FullNodeStore,\n    index::IndexTables, store::WriteStore, validator::ValidatorPerpetualTables,\n    validator::ValidatorStore, StorageClient,\n};\n\nuse crate::folder_name;\n\n#[fixture(certificates = &[])]\npub async fn storage_client(certificates: &[CertificateDelivered]) -> StorageClient {\n    let store = create_validator_store::partial_1(certificates).await;\n\n    StorageClient::new(store)\n}\n\n#[fixture]\npub fn create_folder(folder_name: &str) -> PathBuf {\n    let mut path = crate::create_folder(folder_name);\n\n    path.push(\"rocksdb\");\n\n    path\n}\n\n#[fixture(certificates = &[])]\npub async fn create_validator_store(\n    certificates: &[CertificateDelivered],\n    #[future] create_fullnode_store: Arc<FullNodeStore>,\n) -> Arc<ValidatorStore> {\n    let temp_dir = create_folder::default();\n    let fullnode_store = create_fullnode_store.await;\n\n    let store =\n        ValidatorStore::open(&temp_dir, fullnode_store).expect(\"Unable to create validator store\");\n\n    store\n        .insert_certificates_delivered(certificates)\n        .await\n        .expect(\"Unable to insert predefined certificates\");\n\n    store\n}\n\npub async fn create_validator_store_with_fullnode(\n    fullnode_store: Arc<FullNodeStore>,\n) -> Arc<ValidatorStore> {\n    ValidatorStore::open(&create_folder::default(), fullnode_store)\n        .expect(\"Unable to create validator store\")\n}\n\n#[fixture(certificates = &[])]\npub async fn create_fullnode_store(certificates: &[CertificateDelivered]) -> Arc<FullNodeStore> {\n    let temp_dir = create_folder::default();\n\n    let perpetual_tables = Arc::new(ValidatorPerpetualTables::open(&temp_dir));\n    let index_tables = Arc::new(IndexTables::open(&temp_dir));\n\n    let validators_store =\n        EpochValidatorsStore::new(&temp_dir).expect(\"Unable to create EpochValidators store\");\n\n    let epoch_store =\n        ValidatorPerEpochStore::new(0, &temp_dir).expect(\"Unable to create Per epoch store\");\n\n    let store = FullNodeStore::open(\n        epoch_store,\n        validators_store,\n        perpetual_tables,\n        index_tables,\n    )\n    .expect(\"Unable to create full node store\");\n\n    store\n        .insert_certificates_delivered(certificates)\n        .await\n        .unwrap();\n\n    store\n}\n"
  },
  {
    "path": "crates/topos-test-sdk/src/tce/gatekeeper.rs",
    "content": "use std::error::Error;\nuse std::future::IntoFuture;\n\nuse tokio::spawn;\nuse tokio::task::JoinHandle;\n\nuse topos_tce_gatekeeper::GatekeeperClient;\nuse topos_tce_gatekeeper::GatekeeperError;\n\npub async fn create_gatekeeper(\n) -> Result<(GatekeeperClient, JoinHandle<Result<(), GatekeeperError>>), Box<dyn Error>> {\n    let (gatekeeper_client, gatekeeper_runtime) = topos_tce_gatekeeper::Gatekeeper::builder()\n        .await\n        .expect(\"Can't create the Gatekeeper\");\n\n    let gatekeeper_join_handle = spawn(gatekeeper_runtime.into_future());\n    Ok((gatekeeper_client, gatekeeper_join_handle))\n}\n"
  },
  {
    "path": "crates/topos-test-sdk/src/tce/mod.rs",
    "content": "use futures::future::join_all;\nuse futures::Stream;\nuse futures::StreamExt;\nuse libp2p::identity::Keypair;\nuse libp2p::{Multiaddr, PeerId};\nuse rstest::*;\nuse std::collections::{HashMap, HashSet};\nuse std::error::Error;\nuse std::sync::Arc;\nuse tokio::spawn;\nuse tokio::sync::broadcast;\nuse tokio::{sync::mpsc, task::JoinHandle};\nuse tokio_stream::wrappers::BroadcastStream;\nuse tokio_util::sync::CancellationToken;\nuse tonic::transport::Channel;\nuse tonic::Request;\nuse tonic::Response;\nuse tonic::Status;\nuse tracing::Instrument;\n\nuse tonic::transport::server::Router;\nuse tonic::transport::Server;\nuse topos_core::api::grpc::tce::v1::{\n    api_service_client::ApiServiceClient, console_service_client::ConsoleServiceClient,\n    synchronizer_service_server::SynchronizerService as GrpcSynchronizerService,\n    synchronizer_service_server::SynchronizerServiceServer,\n};\nuse topos_core::api::grpc::tce::v1::{\n    CheckpointRequest, CheckpointResponse, FetchCertificatesRequest, FetchCertificatesResponse,\n};\nuse topos_core::api::grpc::tce::v1::{StatusRequest, StatusResponse};\nuse topos_core::types::CertificateDelivered;\nuse topos_core::types::ValidatorId;\nuse topos_core::uci::SubnetId;\nuse topos_crypto::messages::MessageSigner;\nuse topos_p2p::{error::P2PError, Event, GrpcRouter, NetworkClient, Runtime};\nuse topos_tce::{events::Events, AppContext};\nuse topos_tce_storage::StorageClient;\nuse topos_tce_synchronizer::SynchronizerService;\nuse tracing::info;\n\nuse self::gatekeeper::create_gatekeeper;\nuse self::p2p::{bootstrap_network, create_network_worker};\nuse self::protocol::{create_reliable_broadcast_client, create_reliable_broadcast_params};\nuse self::public_api::create_public_api;\nuse self::synchronizer::create_synchronizer;\nuse crate::crypto::message_signer;\nuse crate::p2p::local_peer;\nuse crate::storage::create_fullnode_store;\nuse crate::storage::create_validator_store;\n\npub mod gatekeeper;\npub mod p2p;\npub mod protocol;\npub mod public_api;\npub mod synchronizer;\n\n#[derive(Debug)]\npub struct TceContext {\n    pub node_config: NodeConfig,\n    pub event_stream: mpsc::Receiver<Events>,\n    pub peer_id: PeerId, // P2P ID\n    pub api_entrypoint: String,\n    pub api_grpc_client: ApiServiceClient<Channel>, // GRPC Client for this peer (tce node)\n    pub console_grpc_client: ConsoleServiceClient<Channel>, // Console TCE GRPC Client for this peer (tce node)\n    pub runtime_join_handle: JoinHandle<Result<(), P2PError>>,\n    pub app_join_handle: JoinHandle<()>,\n    pub gatekeeper_join_handle: JoinHandle<Result<(), topos_tce_gatekeeper::GatekeeperError>>,\n    pub synchronizer_join_handle: JoinHandle<Result<(), topos_tce_synchronizer::SynchronizerError>>,\n    pub connected_subnets: Option<Vec<SubnetId>>, // Particular subnet clients (topos nodes) connected to this tce node\n    pub shutdown: (CancellationToken, mpsc::Receiver<()>),\n}\n\nimpl Drop for TceContext {\n    fn drop(&mut self) {\n        self.app_join_handle.abort();\n        self.runtime_join_handle.abort();\n        self.gatekeeper_join_handle.abort();\n        self.synchronizer_join_handle.abort();\n    }\n}\n\nimpl TceContext {\n    pub async fn shutdown(&mut self) -> Result<(), Box<dyn std::error::Error>> {\n        info!(\"Context performing shutdown...\");\n\n        self.shutdown.0.cancel();\n        self.shutdown.1.recv().await;\n\n        info!(\"Shutdown finished...\");\n\n        Ok(())\n    }\n}\n\n#[derive(Debug, Clone)]\npub struct NodeConfig {\n    pub seed: u8,\n    pub keypair: Keypair,\n    pub addr: Multiaddr,\n    pub minimum_cluster_size: usize,\n    pub dummy: bool,\n}\n\nimpl Default for NodeConfig {\n    fn default() -> Self {\n        Self::from_seed(1)\n    }\n}\n\nimpl NodeConfig {\n    pub fn standalone() -> Self {\n        Self {\n            dummy: true,\n            ..Default::default()\n        }\n    }\n\n    pub fn memory(seed: u8) -> Self {\n        let (keypair, addr) = local_peer(seed, true);\n\n        Self {\n            seed,\n            keypair,\n            addr,\n            minimum_cluster_size: 0,\n            dummy: false,\n        }\n    }\n\n    pub fn from_seed(seed: u8) -> Self {\n        let (keypair, addr) = local_peer(seed, false);\n\n        Self {\n            seed,\n            keypair,\n            addr,\n            minimum_cluster_size: 0,\n            dummy: false,\n        }\n    }\n\n    pub fn peer_id(&self) -> PeerId {\n        self.keypair.public().to_peer_id()\n    }\n\n    pub async fn bootstrap(\n        &self,\n        peers: &[NodeConfig],\n        router: Option<GrpcRouter>,\n    ) -> Result<\n        (\n            NetworkClient,\n            impl Stream<Item = Event> + Unpin + Send,\n            JoinHandle<Result<(), P2PError>>,\n        ),\n        Box<dyn Error>,\n    > {\n        bootstrap_network(\n            self.seed,\n            self.addr.clone(),\n            peers,\n            self.minimum_cluster_size,\n            router,\n            self.dummy,\n        )\n        .await\n    }\n\n    pub async fn create(\n        &self,\n        peers: &[NodeConfig],\n        router: Option<GrpcRouter>,\n    ) -> Result<(NetworkClient, impl Stream<Item = Event>, Runtime), P2PError> {\n        create_network_worker(\n            self.seed,\n            vec![self.addr.clone()],\n            peers,\n            self.minimum_cluster_size,\n            router,\n        )\n        .await\n    }\n}\n\n#[derive(Clone)]\nstruct DummyService {}\n\n#[async_trait::async_trait]\nimpl GrpcSynchronizerService for DummyService {\n    async fn fetch_certificates(\n        &self,\n        _request: Request<FetchCertificatesRequest>,\n    ) -> Result<Response<FetchCertificatesResponse>, Status> {\n        Err(Status::unimplemented(\"fetch_certificates\"))\n    }\n\n    async fn fetch_checkpoint(\n        &self,\n        _request: Request<CheckpointRequest>,\n    ) -> Result<Response<CheckpointResponse>, Status> {\n        Err(Status::unimplemented(\"fetch_checkpoint\"))\n    }\n}\n\npub fn create_dummy_router() -> Router {\n    Server::builder().add_service(SynchronizerServiceServer::new(DummyService {}))\n}\n\n#[fixture(\n    config = NodeConfig::default(),\n    peers = &[],\n    certificates = &[],\n    validator_id = ValidatorId::default(),\n    validators = HashSet::default()\n)]\npub async fn start_node(\n    certificates: &[CertificateDelivered],\n    config: NodeConfig,\n    peers: &[NodeConfig],\n    validator_id: ValidatorId,\n    validators: HashSet<ValidatorId>,\n    message_signer: Arc<MessageSigner>,\n) -> TceContext {\n    let is_validator = validators.contains(&validator_id);\n    let peer_id = config.keypair.public().to_peer_id();\n    let fullnode_store = create_fullnode_store(&[]).in_current_span().await;\n    let validator_store =\n        create_validator_store(certificates, futures::future::ready(fullnode_store.clone()))\n            .in_current_span()\n            .await;\n\n    let router = GrpcRouter::new(tonic::transport::Server::builder()).add_service(\n        SynchronizerServiceServer::new(SynchronizerService {\n            validator_store: validator_store.clone(),\n        }),\n    );\n\n    let (network_client, network_stream, runtime_join_handle) = bootstrap_network(\n        config.seed,\n        config.addr.clone(),\n        peers,\n        config.minimum_cluster_size,\n        Some(router),\n        config.dummy,\n    )\n    .in_current_span()\n    .await\n    .expect(\"Unable to bootstrap tce network\");\n\n    let storage_client = StorageClient::new(validator_store.clone());\n    let (sender, receiver) = broadcast::channel(100);\n    let (tce_cli, tce_stream) = create_reliable_broadcast_client(\n        validator_id,\n        validators,\n        message_signer,\n        create_reliable_broadcast_params(peers.len()),\n        validator_store.clone(),\n        sender,\n    )\n    .in_current_span()\n    .await;\n\n    let api_storage_client = storage_client.clone();\n\n    let (api_context, api_stream) = create_public_api(\n        futures::future::ready(api_storage_client),\n        receiver.resubscribe(),\n        futures::future::ready(validator_store.clone()),\n    )\n    .in_current_span()\n    .await;\n\n    let (gatekeeper_client, gatekeeper_join_handle) = create_gatekeeper().await.unwrap();\n\n    let (synchronizer_stream, synchronizer_join_handle) = create_synchronizer(\n        gatekeeper_client.clone(),\n        network_client.clone(),\n        validator_store.clone(),\n    )\n    .in_current_span()\n    .await;\n\n    let (app, event_stream) = AppContext::new(\n        is_validator,\n        storage_client,\n        tce_cli,\n        network_client,\n        api_context.client,\n        gatekeeper_client,\n        validator_store,\n        api_context.api_context.unwrap(),\n    );\n\n    let shutdown_token = CancellationToken::new();\n    let shutdown_cloned = shutdown_token.clone();\n\n    let (shutdown_sender, shutdown_receiver) = mpsc::channel(1);\n\n    let app_join_handle = spawn(\n        app.run(\n            network_stream,\n            tce_stream,\n            api_stream,\n            synchronizer_stream,\n            BroadcastStream::new(receiver).filter_map(|v| futures::future::ready(v.ok())),\n            (shutdown_token, shutdown_sender),\n        )\n        .in_current_span(),\n    );\n\n    TceContext {\n        node_config: config,\n        event_stream,\n        peer_id,\n        api_entrypoint: api_context.entrypoint,\n        api_grpc_client: api_context.api_client,\n        console_grpc_client: api_context.console_client,\n        runtime_join_handle,\n        app_join_handle,\n        gatekeeper_join_handle,\n        synchronizer_join_handle,\n        connected_subnets: None,\n        shutdown: (shutdown_cloned, shutdown_receiver),\n    }\n}\n\nfn build_peer_config_pool(peer_number: u8) -> Vec<NodeConfig> {\n    (1..=peer_number)\n        .map(NodeConfig::from_seed)\n        .map(|mut c| {\n            c.minimum_cluster_size = peer_number as usize / 2;\n            c\n        })\n        .collect()\n}\n\npub async fn start_pool(\n    peer_number: u8,\n    certificates: &[CertificateDelivered],\n) -> HashMap<PeerId, TceContext> {\n    let mut clients = HashMap::new();\n    let peers = build_peer_config_pool(peer_number);\n\n    let mut validators = Vec::new();\n    let mut message_signers = Vec::new();\n\n    for i in 1..=peer_number {\n        let message_signer = Arc::new(MessageSigner::new(&[i; 32]).unwrap());\n        message_signers.push(message_signer.clone());\n\n        let validator_id = ValidatorId::from(message_signer.public_address);\n        validators.push(validator_id);\n    }\n\n    let mut await_peers = Vec::new();\n\n    for (i, config) in peers.iter().enumerate() {\n        let validator_id = validators[i];\n        let signer = message_signers[i].clone();\n        let config_cloned = config.clone();\n        let peers_cloned = peers.clone();\n        let validators_cloned = validators.clone();\n\n        let context = tracing::info_span!(\n            \"start_node\",\n            \"peer_id\" = config_cloned.peer_id().to_string()\n        );\n        let fut = async move {\n            let client = start_node(\n                certificates,\n                config_cloned,\n                &peers_cloned,\n                validator_id,\n                validators_cloned\n                    .into_iter()\n                    .collect::<HashSet<ValidatorId>>(),\n                signer,\n            )\n            .instrument(context)\n            .await;\n\n            (client.peer_id, client)\n        };\n        await_peers.push(fut);\n    }\n\n    for (user_peer_id, client) in join_all(await_peers).await {\n        clients.insert(user_peer_id, client);\n    }\n\n    clients\n}\n\n#[fixture(\n    peer_number = 2,\n    certificates = &[]\n)]\npub async fn create_network(\n    peer_number: usize,\n    certificates: &[CertificateDelivered],\n) -> HashMap<PeerId, TceContext> {\n    // List of peers (tce nodes) with their context\n    let mut peers_context = start_pool(peer_number as u8, certificates).await;\n\n    // Waiting for new network view\n    let mut await_peers = Vec::new();\n    for (_peer_id, client) in peers_context.iter_mut() {\n        await_peers.push(client.console_grpc_client.status(StatusRequest {}));\n    }\n\n    assert!(!join_all(await_peers)\n        .await\n        .into_iter()\n        .map(|res: Result<Response<StatusResponse>, _>| res\n            .map(|r: tonic::Response<_>| r.into_inner().has_active_sample))\n        .any(|r| r.is_err() || !r.unwrap()));\n\n    tracing::error!(\"GRPC status received and ok\");\n    peers_context\n}\n"
  },
  {
    "path": "crates/topos-test-sdk/src/tce/p2p.rs",
    "content": "use std::error::Error;\n\nuse futures::Stream;\nuse libp2p::Multiaddr;\nuse tokio::{spawn, task::JoinHandle};\nuse tracing::Instrument;\n\nuse crate::p2p::keypair_from_seed;\nuse topos_p2p::{error::P2PError, Event, GrpcContext, GrpcRouter, NetworkClient, Runtime};\n\nuse super::NodeConfig;\n\npub async fn create_network_worker(\n    seed: u8,\n    addr: Vec<Multiaddr>,\n    peers: &[NodeConfig],\n    minimum_cluster_size: usize,\n    router: Option<GrpcRouter>,\n) -> Result<\n    (\n        NetworkClient,\n        impl Stream<Item = Event> + Unpin + Send,\n        Runtime,\n    ),\n    P2PError,\n> {\n    let key = keypair_from_seed(seed);\n    let _peer_id = key.public().to_peer_id();\n\n    let known_peers = if seed == 1 {\n        vec![]\n    } else {\n        peers\n            .iter()\n            .filter_map(|config| {\n                if config.seed == 1 {\n                    Some((config.keypair.public().to_peer_id(), config.addr.clone()))\n                } else {\n                    None\n                }\n            })\n            .collect::<Vec<_>>()\n    };\n    let grpc_context = if let Some(router) = router {\n        GrpcContext::default().with_router(router)\n    } else {\n        GrpcContext::default()\n    };\n\n    topos_p2p::network::builder()\n        .peer_key(key.clone())\n        .known_peers(&known_peers)\n        .public_addresses(addr.clone())\n        .listen_addresses(addr)\n        .minimum_cluster_size(minimum_cluster_size)\n        .grpc_context(grpc_context)\n        .allow_private_ip(true)\n        .build()\n        .in_current_span()\n        .await\n}\n\npub async fn bootstrap_network(\n    seed: u8,\n    addr: Multiaddr,\n    peers: &[NodeConfig],\n    minimum_cluster_size: usize,\n    router: Option<GrpcRouter>,\n    dummy: bool,\n) -> Result<\n    (\n        NetworkClient,\n        impl Stream<Item = Event> + Unpin + Send,\n        JoinHandle<Result<(), P2PError>>,\n    ),\n    Box<dyn Error>,\n> {\n    let (network_client, mut network_stream, runtime) =\n        create_network_worker(seed, vec![addr], peers, minimum_cluster_size, router)\n            .in_current_span()\n            .await?;\n\n    let runtime_join_handle = if dummy {\n        spawn(runtime.run().in_current_span())\n    } else {\n        runtime\n            .bootstrap(&mut network_stream)\n            .in_current_span()\n            .await?\n    };\n\n    println!(\"Network bootstrap done.\");\n\n    Ok((network_client, network_stream, runtime_join_handle))\n}\n"
  },
  {
    "path": "crates/topos-test-sdk/src/tce/protocol.rs",
    "content": "use futures::Stream;\nuse std::collections::HashSet;\nuse std::sync::Arc;\nuse tokio::sync::broadcast;\nuse topos_config::tce::broadcast::ReliableBroadcastParams;\nuse topos_core::types::ValidatorId;\nuse topos_crypto::messages::MessageSigner;\nuse topos_tce_broadcast::event::ProtocolEvents;\nuse topos_tce_broadcast::{ReliableBroadcastClient, ReliableBroadcastConfig};\nuse topos_tce_storage::types::CertificateDeliveredWithPositions;\nuse topos_tce_storage::validator::ValidatorStore;\nuse tracing::Instrument;\n\npub async fn create_reliable_broadcast_client(\n    validator_id: ValidatorId,\n    validators: HashSet<ValidatorId>,\n    message_signer: Arc<MessageSigner>,\n    tce_params: ReliableBroadcastParams,\n    storage: Arc<ValidatorStore>,\n    sender: broadcast::Sender<CertificateDeliveredWithPositions>,\n) -> (\n    ReliableBroadcastClient,\n    impl Stream<Item = ProtocolEvents> + Unpin,\n) {\n    let config = ReliableBroadcastConfig {\n        tce_params,\n        validator_id,\n        validators,\n        message_signer,\n    };\n\n    ReliableBroadcastClient::new(config, storage, sender)\n        .in_current_span()\n        .await\n}\n\npub fn create_reliable_broadcast_params(number_of_nodes: usize) -> ReliableBroadcastParams {\n    let mut params = ReliableBroadcastParams {\n        ..Default::default()\n    };\n    let f = (number_of_nodes.saturating_sub(1)) / 3;\n\n    params.echo_threshold = 1 + ((number_of_nodes.saturating_add(f)) / 2);\n    params.ready_threshold = 1 + f;\n    params.delivery_threshold = 2 * f + 1;\n\n    params\n}\n"
  },
  {
    "path": "crates/topos-test-sdk/src/tce/public_api.rs",
    "content": "use std::str::FromStr;\nuse std::sync::Arc;\n\nuse futures::Stream;\nuse rstest::*;\nuse tokio::sync::broadcast;\nuse tonic::transport::{channel, Channel};\n\nuse topos_core::api::grpc::tce::v1::{\n    api_service_client::ApiServiceClient, console_service_client::ConsoleServiceClient,\n};\nuse topos_tce_api::RuntimeClient;\nuse topos_tce_api::RuntimeContext;\nuse topos_tce_api::RuntimeEvent;\nuse topos_tce_storage::types::CertificateDeliveredWithPositions;\nuse topos_tce_storage::validator::ValidatorStore;\nuse topos_tce_storage::StorageClient;\nuse tracing::warn;\nuse tracing::Instrument;\n\nuse crate::networking::get_available_addr;\nuse crate::storage::create_validator_store;\nuse crate::storage::storage_client;\nuse crate::PORT_MAPPING;\n\npub struct PublicApiContext {\n    pub entrypoint: String,\n    pub client: RuntimeClient,\n    pub api_client: ApiServiceClient<Channel>,\n    pub console_client: ConsoleServiceClient<Channel>,\n    pub api_context: Option<RuntimeContext>,\n}\n\n#[fixture]\npub fn broadcast_stream() -> broadcast::Receiver<CertificateDeliveredWithPositions> {\n    let (_, r) = broadcast::channel(1000);\n\n    r\n}\n\n#[fixture]\npub async fn create_public_api(\n    #[future] storage_client: StorageClient,\n    broadcast_stream: broadcast::Receiver<CertificateDeliveredWithPositions>,\n    #[future] create_validator_store: Arc<ValidatorStore>,\n) -> (PublicApiContext, impl Stream<Item = RuntimeEvent>) {\n    let storage_client = storage_client.await;\n    let store = create_validator_store.await;\n    let grpc_addr = get_available_addr();\n    let graphql_addr = get_available_addr();\n    let metrics_addr = get_available_addr();\n\n    let api_port = grpc_addr.port();\n\n    let api_endpoint = format!(\"http://0.0.0.0:{api_port}\");\n    warn!(\"API endpoint: {}\", api_endpoint);\n    warn!(\"gRPC endpoint: {}\", grpc_addr);\n    warn!(\"GraphQL endpoint: {}\", graphql_addr);\n    warn!(\"Metrics endpoint: {}\", metrics_addr);\n    warn!(\"PORT MAPPING: {:?}\", PORT_MAPPING.lock().unwrap());\n    let (client, stream, ctx) = topos_tce_api::Runtime::builder()\n        .with_broadcast_stream(broadcast_stream)\n        .serve_grpc_addr(grpc_addr)\n        .serve_graphql_addr(graphql_addr)\n        .serve_metrics_addr(metrics_addr)\n        .store(store)\n        .storage(storage_client)\n        .build_and_launch()\n        .in_current_span()\n        .await;\n\n    let api_channel = channel::Endpoint::from_str(&api_endpoint)\n        .unwrap()\n        .connect_lazy();\n\n    let console_channel = channel::Endpoint::from_str(&api_endpoint)\n        .unwrap()\n        .connect_lazy();\n\n    let api_client = ApiServiceClient::new(api_channel);\n    let console_client = ConsoleServiceClient::new(console_channel);\n\n    let context = PublicApiContext {\n        entrypoint: api_endpoint,\n        client,\n        api_client,\n        console_client,\n        api_context: Some(ctx),\n    };\n\n    (context, stream)\n}\n"
  },
  {
    "path": "crates/topos-test-sdk/src/tce/synchronizer.rs",
    "content": "use futures::Stream;\nuse std::future::IntoFuture;\nuse std::sync::Arc;\nuse tokio::{spawn, task::JoinHandle};\nuse tokio_util::sync::CancellationToken;\nuse tracing::Instrument;\n\nuse topos_p2p::NetworkClient;\nuse topos_tce_gatekeeper::GatekeeperClient;\nuse topos_tce_storage::validator::ValidatorStore;\nuse topos_tce_synchronizer::SynchronizerError;\nuse topos_tce_synchronizer::SynchronizerEvent;\n\npub async fn create_synchronizer(\n    _: GatekeeperClient,\n    network_client: NetworkClient,\n    store: Arc<ValidatorStore>,\n) -> (\n    impl Stream<Item = SynchronizerEvent>,\n    JoinHandle<Result<(), SynchronizerError>>,\n) {\n    let shutdown = CancellationToken::new();\n    let (synchronizer_runtime, synchronizer_stream) =\n        topos_tce_synchronizer::Synchronizer::builder()\n            .with_shutdown(shutdown)\n            .with_store(store)\n            .with_network_client(network_client)\n            .build()\n            .expect(\"Can't create the Synchronizer\");\n\n    let synchronizer_join_handle = spawn(synchronizer_runtime.into_future().in_current_span());\n\n    (synchronizer_stream, synchronizer_join_handle)\n}\n"
  },
  {
    "path": "crates/topos-wallet/Cargo.toml",
    "content": "[package]\nname = \"topos-wallet\"\ndescription = \"Key manager\"\nversion = \"0.1.0\"\nedition = \"2021\"\n\n[lints]\nworkspace = true\n\n[dependencies]\nsecp256k1.workspace = true\nbyteorder.workspace = true\nhex.workspace = true\nthiserror.workspace = true\ntracing.workspace = true\n\nkeccak-hash = \"0.10.0\"\neth-keystore = \"0.5.0\"\n\ntopos-crypto.workspace = true\n"
  },
  {
    "path": "crates/topos-wallet/src/error.rs",
    "content": "use thiserror::Error;\n\n#[derive(Debug, Error)]\npub enum Error {\n    #[error(\"Keystore error: {0}\")]\n    KeystoreError(#[from] eth_keystore::KeystoreError),\n\n    #[error(\"Keystore file io error: {0}\")]\n    KeystoreFileError(#[from] std::io::Error),\n\n    #[error(\"Invalid key error: {0}\")]\n    InvalidKeyError(String),\n\n    #[error(\"Elliptic curve error: {0}\")]\n    Secp256k1Error(#[from] secp256k1::Error),\n\n    #[error(\"Invalid signature: {0}\")]\n    InvalidSignature(String),\n}\n"
  },
  {
    "path": "crates/topos-wallet/src/lib.rs",
    "content": "use std::fs;\nuse std::path::PathBuf;\n\npub mod error;\n\n// File tree generated by Polygon Edge\n\n/// Key for the authentication on libp2p (secp256k1)\npub const NETWORK_KEY: &str = \"libp2p/libp2p.key\";\n/// Key for the contracts authentication (secp256k1)\npub const VALIDATOR_KEY: &str = \"consensus/validator.key\";\n/// Key for the IBFT authentication (bls)\npub const VALIDATOR_BLS_KEY: &str = \"consensus/validator-bls.key\";\n\n/// Load from the filesystem\npub fn load_fs_secret(file: PathBuf) -> Option<SecretKey> {\n    match &fs::read_to_string(&file) {\n        Ok(s) => Some(hex::decode(s).unwrap_or_else(|_| panic!(\"decode failure for {}\", s))),\n        Err(e) => panic!(\"Failed at reading {file:?}: {e}\"),\n    }\n}\n\n/// Load from the AWS Secret Manager\n#[allow(dead_code)]\npub fn load_aws_secrets(secrets_config: &str) {\n    println!(\"loading from aws-sm {}\", secrets_config);\n}\n\npub type SecretKey = Vec<u8>;\npub type PublicKey = Vec<u8>;\n\n#[derive(Default, Debug)]\npub struct SecretManager {\n    pub network: Option<SecretKey>,\n    pub validator: Option<SecretKey>,\n    pub validator_bls: Option<SecretKey>,\n}\n\nimpl SecretManager {\n    pub fn from_fs(home_path: PathBuf) -> Self {\n        Self {\n            network: load_fs_secret(home_path.join(NETWORK_KEY)),\n            validator: load_fs_secret(home_path.join(VALIDATOR_KEY)),\n            validator_bls: load_fs_secret(home_path.join(VALIDATOR_BLS_KEY)),\n        }\n    }\n\n    pub fn from_aws(_secrets_config: &str) -> Self {\n        println!(\"loading from aws-sm\");\n        todo!()\n    }\n\n    pub fn validator_pubkey(&self) -> Option<PublicKey> {\n        self.validator\n            .as_ref()\n            .map(|pk| topos_crypto::keys::derive_public_key(pk).unwrap())\n    }\n}\n"
  },
  {
    "path": "docs/.gitignore",
    "content": "book/\nsrc/mdbook-plantuml-img/\n"
  },
  {
    "path": "docs/README.md",
    "content": "# The Topos specification and development internals documentation\n\nThe specification doc is compiled from several source files with [`mdBook`](https://github.com/rust-lang/mdBook).\nTo view it live, locally, from the repo root:\n\nEnsure graphviz and plantuml are installed:\n```sh\nbrew install graphviz plantuml # for macOS\nsudo apt-get install graphviz plantuml # for Ubuntu/Debian\n```\n\nThen install and build the book:\n\n```sh\ncargo install mdbook mdbook-plantuml mdbook-linkcheck mdbook-graphviz\nmdbook serve doc\nopen http://localhost:3000\n```\n"
  },
  {
    "path": "docs/architecture/certificates_collector.md",
    "content": "# CertificatesCollector\n\nThe `CertificatesCollector` is responsible for fetching and validating certificates by gathering the certificate data across peers, verifying it and persist it if everything is ok.\n\n## General design\n\nUpon receiving a `SourceStreamPosition` from the `CheckpointsCollector`, the `CertificatesCollector` will check the difference between the current position that the local node have, and the expected position reported by the `CheckpointsCollector`. The `CertificatesCollector` will then ask the peers of the network to retrieve the list of `CertificateId` to sync, in order. Upon receiving those responses,\n\n<!-- ## Internal design -->\n"
  },
  {
    "path": "docs/architecture/checkpoints_collector.md",
    "content": "# CheckpointsCollector\n\nThe `CheckpointsCollector` is the component that will communicate with other peers to negotiate a network checkpoint to sync with.\n\n## General design\n\nThe `CheckpointsCollector` will use the P2P network to ask for the `Checkpoints` of others peers. The selected peers will be provided by the `Gatekeeper`, returning a chunk of random peers to communicate with.\nThis component asks for Peer's `Checkpoint`. `Peers` will respond with their current `Checkpoint`. This is the responsibility of the `CheckpointsCollector` to communicate with others peers to build and find a network `Checkpoint` that can be used to `sync`.\n\n## Internal design\n\nThe `CheckpointsCollector`, as describe above, has a main goal of negotiating a common checkpoint to start syncing the node.\n\nThe first thing to do for the `CheckpointsCollector` is to contact the `Gatekeeper` for a list of peers to sync with.\nUpon receiving the list of peers, the `CheckpointsCollector` will open connections and send a message to ask for checkpoint, it is pretty straightforward, but it's the first step to define if the peers are trustable to sync.\n\nThe message sent to all peers as the following:\n\n```protobuf\nmessage CheckpointRequest {\n  bool content = 1;\n}\n\nmessage CheckpointResponse {\n  repeatable SourceStreamPosition heads = 1;\n  CheckpointContents content = 2;\n}\n\nmessage CheckpointContents {\n  map<CertificateId, Certificate> content = 1;\n  int32 count = 2;\n}\n```\n\n```\nSEND to   peer1: CheckpointRequest { content: false }\nRECV from peer1: CheckpointResponse { heads: [SourceStreamPosition { subnet_id: \"0x0a\", certificate_id: \"0xba\", position: 10}, ..], content: CheckpointContents { content: { .. }, count: 10 } }\n\n```\n\nUpon receiving the response of every peer, the `CheckpointsCollector` will gather every `CheckpointResponse` and analyze them in order to decide if it can trust the peers that respond.\nFor all subnets contained in the `CheckpointResponse` of every peer, the `CheckpointsCollector` will ask the local storage for the current node `SourceStreamPosition` head.\nFor every `SourceStreamPosition` that are lower than our current head, we don't have to sync.\n\n### TTL on CheckpointRequest\n\nIf a peer does not respond following a TTL on the request, this peer will be tagged as byzantine.\nIf the `CheckpointsCollector` find itself having less responses than expected regarding a preconfigured threshold, it can decide to dump every response and ask for a new set of peers from the `Gatekeeper`. (Informing the `Gatekeeper` with the list of peers that didn't respond in time).\n\n### Selecting the smallest Position across responses per subnet\n\nBecause of the distributed and asynchronous delivery of certificates during the broadcast, some peers of our set of sync peers can be late or in advance compare to others. In order to sync and have a consistent view of the network, the node needs to detect this pattern and chose the smallest position in the responses in order to ask for the `SourceStreamPosition` for that subnet at that position.\n\n```\nResponses:\n  RECV from peer1: CheckpointResponse { heads: [SourceStreamPosition { subnet_id: \"0x0a\", certificate_id: \"0xba\", position: 10}, ..], .. }\n  RECV from peer2: CheckpointResponse { heads: [SourceStreamPosition { subnet_id: \"0x0a\", certificate_id: \"0xba\", position: 10}, ..], .. }\n  RECV from peer3: CheckpointResponse { heads: [SourceStreamPosition { subnet_id: \"0x0a\", certificate_id: \"0xef\", position: 9}, ..], .. }\n\nProblem:  Received different position for the same subnet\nFallback: Requesting `SourceStreamPosition` for the position 9 to check for consistency\n```\n\n```\nResponses:\n  RECV from peer1: SourceStreamPositionResponse { subnet_id: \"0x0a\", certificate_id: \"0xef\", position: 9}\n  RECV from peer2: SourceStreamPositionResponse { subnet_id: \"0x0a\", certificate_id: \"0xef\", position: 9}\n  RECV from peer3: SourceStreamPositionResponse { subnet_id: \"0x0a\", certificate_id: \"0xef\", position: 9}\n\nResult: This set of peers are sync and we have a consistent point in the stream to sync. Every peers have the same certificate_id and position.\n```\n\n### Inconsistent CheckpointResponses\n\nApart from TTL there are some cases which can represent an inconsistent set of `CheckpointResponse`.\n\n#### Receiving different certificate_id for the same Position\n\nThe `Position` of a `certificate` in the `SourceStream` of a subnet is guaranteed to be the same across all TCE node. It is enforced by the topos protocol itself and more precisely by the `Broadcast` mechanisms.\nWhen receiving inconsistent `SourceStreamPosition` for a `subnet` across multiples `CheckpointResponse`, if it hits a threshold, the node needs to dump every response and fetch a new set of peers from the `Gatekeeper`.\n\n```\nResponses:\n  RECV from peer1: CheckpointResponse { heads: [SourceStreamPosition { subnet_id: \"0x0a\", certificate_id: \"0xba\", position: 10}, ..], .. }\n  RECV from peer2: CheckpointResponse { heads: [SourceStreamPosition { subnet_id: \"0x0a\", certificate_id: \"0xba\", position: 10}, ..], .. }\n  RECV from peer3: CheckpointResponse { heads: [SourceStreamPosition { subnet_id: \"0x0a\", certificate_id: \"0xef\", position: 10}, ..], .. }\n\nProblem:  Receiving different cert for the same position of the same subnet\nFallback: Requesting another batch of TCE nodes until receiving consistent response\n```\n\n#### Receiving different Position for the same certificate_id\n\nThe `Position` of a `certificate` in the `SourceStream` of a subnet is guaranteed to be the same across all TCE node. It is enforced by the Topos protocol itself and more precisely by the `Broadcast` mechanisms.\nWhen receiving inconsistent `SourceStreamPosition` for a `subnet` across multiples `CheckpointResponse`, if it hits a threshold, the node needs to dump every response and fetch a new set of peers from the `Gatekeeper`.\n\n```\nResponses:\n  RECV from peer1: CheckpointResponse { heads: [SourceStreamPosition { subnet_id: \"0x0a\", certificate_id: \"0xba\", position: 10}, ..], .. }\n  RECV from peer2: CheckpointResponse { heads: [SourceStreamPosition { subnet_id: \"0x0a\", certificate_id: \"0xba\", position: 10}, ..], .. }\n  RECV from peer3: CheckpointResponse { heads: [SourceStreamPosition { subnet_id: \"0x0a\", certificate_id: \"0xba\", position: 11}, ..], .. }\n\nProblem:  Receiving different position for the same cert of the same subnet\nFallback: Requesting another batch of TCE nodes until receiving consistent response\n```\n"
  },
  {
    "path": "docs/architecture/gatekeeper.md",
    "content": "# Gatekeeper\n\nThe `Gatekeeper` is a central piece of the node. It has the role of defining which peers can participate and which nodes need to be listened to.\n\n## General Design\n\nThe `Gatekeeper` will have a local in-memory state representing,\n\n- The list of TCE nodes allowed to participate\n- The list of all subnets allowed to submit certificates\n\nThe `Gatekeeper` will manage the two lists using multiple mechanisms that are not defined for now.\n\nThe `Gatekeeper` can receive and respond to commands in order to provide information to other components:\n\n- Update the configuration of the `Gatekeeper`\n- Request a full list of all peers\n- Request a random list of peers\n- Request a full list of all subnets\n\n## Internal design\n\nThe `Gatekeeper` isn't fully designed for now but a first iteration will be to expose a simple gRPC API to push update of the list of TCE validators.\n\nIn the future we'll need to find solution to fetch this information from the source of truth. The goal here is to expose methods that can be used by any components and in a near future, replace the internal implementation.\n\nThis component will be responsible for exposing the lists of peers/subnets, and it'll be also responsible for maintaining some kind of reputation for peers that we're connecting with.\n\n### Peer list\n\nThe `Peer` list maintained by the `Gatekeeper` will be a simple list of `PeerId`.\n\n### Subnet list\n\nThe `Subnet` list maintained by the `Gatekeeper` will be a simple list of `SubnetId`.\n"
  },
  {
    "path": "docs/architecture/synchronizer.md",
    "content": "# Synchronizer\n\nThe `Synchronizer` is responsible for organizing the components involved in the sync process.\n\n## General design\n\nThe main goal of the `Synchronizer` is to be the entry point of the `Runtime` to start and drive the `sync` process of the node. The `Synchronizer` is responsible for spawning and driving the components involved in the `sync` process by listening events coming from those components but also sending command to them if needed.\n\nThe `Synchronizer` will have some commands used by the `Runtime`, and expose events to notify the `Runtime` about important actions, events or issues during the `sync` process.\n\nThe `Synchronizer` manages two main subcomponents which are `CheckpointsCollector` and `CertificatesCollector`.\n\n<!-- ## Internal design -->\n"
  },
  {
    "path": "docs/book.toml",
    "content": "[book]\nlanguage = \"en\"\nmultilingual = false\nsrc = \"src\"\ntitle = \"The Topos specification and development internals documentation\"\n\n[preprocessor.graphviz]\ncommand = \"mdbook-graphviz\"\n[preprocessor.plantuml]\ncommand = \"mdbook-plantuml\"\n"
  },
  {
    "path": "docs/src/README.md",
    "content": "# Preamble\n\nThis documentation aims at including the first shoot for development specification.\n\nPlease refer also to <https://docs.toposware.com/> for documentation on the protocol level.\n"
  },
  {
    "path": "docs/src/SUMMARY.md",
    "content": "# Summary\n\n[Preamble](README.md)\n\n- [Topos Node Architecture](topos-node.md)\n- [Test](test.md)\n\n[Glossary](glossary.md)\n"
  },
  {
    "path": "docs/src/glossary.md",
    "content": "# Glossary\n\n- BABE: (Blind Assignment for Blockchain Extension). The consensus algorithm validators use as block production mechanism. See [the Polkadot wiki][0] for more information.\n- Extrinsic: An element of a relay-chain block which triggers a specific entry-point of a runtime module with given arguments.\n- GRANDPA: (Ghost-based Recursive ANcestor Deriving Prefix Agreement). The algorithm validators uses to guarantee finality of the subnet. See [the Polkadot wiki][0] for more information.\n- Pallet: A component of the Runtime logic, encapsulating storage, routines, and entry-points.\n- Runtime: The subnet state machine.\n- Runtime API: A means for the node-side behavior to access structured information based on the state of a fork of the blockchain.\n- Worker: A long-running task which is responsible for carrying out a particular category of subprotocols, e.g., DKG is a Topos subprotocol implemented as a Worker.\n- Validator: Specially-selected node in the network who is responsible for validating subnet block and issuing attestations about their validity.\n\nAlso of use is the [Substrate Glossary](https://substrate.dev/docs/en/knowledgebase/getting-started/glossary).\n\n[0]: https://wiki.polkadot.network/docs/learn-consensus\n"
  },
  {
    "path": "docs/src/test.md",
    "content": "# Test\n\nThe tests are described using [Gherkin](https://cucumber.io/docs/gherkin/), a language for [Behavior-driven development](https://en.wikipedia.org/wiki/Behavior-driven_development).\n\nThe tests are located in the root folder `tests`.\nEach file with the `.feature` extension implements the test scenario for each component.\n\nCurrently, the high level logical components are namely,\n- Subnet\n- TCE\n- API\n"
  },
  {
    "path": "docs/src/topos-node.md",
    "content": "# Topos Node\n\n## Overview of the components\n\n```plantuml\n@startuml\nnode \"Subnet Node (Process)\" as subnet_node {\n    [Runtime]-[Service]\n}\n\nnode \"TCE Node (Process)\" as tce_node {\n    [Web Api]\n}\n\npackage \"Oracle App (Process)\" as oracle_app {\n    [zk-VM]\n    [Certificate Generator]\n    [FROST Signature Generator]\n}\n\n[Runtime] <--> [Certificate Generator]: RPC/websocket\n[Certificate Generator] <--> [FROST Signature Generator]: Message passing\n[Certificate Generator] <--> [zk-VM]: Message passing\n[Certificate Generator] ---> [Web Api]: Message passing\n\n@enduml\n```\n"
  },
  {
    "path": "grafana/benchmarks-dashboard.json",
    "content": "{\n  \"annotations\": {\n    \"list\": [\n      {\n        \"builtIn\": 1,\n        \"datasource\": {\n          \"type\": \"grafana\",\n          \"uid\": \"-- Grafana --\"\n        },\n        \"enable\": true,\n        \"hide\": true,\n        \"iconColor\": \"rgba(0, 211, 255, 1)\",\n        \"name\": \"Annotations & Alerts\",\n        \"target\": {\n          \"limit\": 100,\n          \"matchAny\": false,\n          \"tags\": [],\n          \"type\": \"dashboard\"\n        },\n        \"type\": \"dashboard\"\n      }\n    ]\n  },\n  \"editable\": true,\n  \"fiscalYearStartMonth\": 0,\n  \"graphTooltip\": 0,\n  \"id\": 419,\n  \"links\": [],\n  \"liveNow\": false,\n  \"panels\": [\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${datasource}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"palette-classic\"\n          },\n          \"custom\": {\n            \"axisCenteredZero\": false,\n            \"axisColorMode\": \"text\",\n            \"axisLabel\": \"\",\n            \"axisPlacement\": \"auto\",\n            \"barAlignment\": 0,\n            \"drawStyle\": \"line\",\n            \"fillOpacity\": 0,\n            \"gradientMode\": \"none\",\n            \"hideFrom\": {\n              \"legend\": false,\n              \"tooltip\": false,\n              \"viz\": false\n            },\n            \"lineInterpolation\": \"linear\",\n            \"lineWidth\": 1,\n            \"pointSize\": 5,\n            \"scaleDistribution\": {\n              \"type\": \"linear\"\n            },\n            \"showPoints\": \"auto\",\n            \"spanNulls\": false,\n            \"stacking\": {\n              \"group\": \"A\",\n              \"mode\": \"none\"\n            },\n            \"thresholdsStyle\": {\n              \"mode\": \"off\"\n            }\n          },\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          }\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 5,\n        \"w\": 10,\n        \"x\": 0,\n        \"y\": 0\n      },\n      \"id\": 51,\n      \"options\": {\n        \"legend\": {\n          \"calcs\": [],\n          \"displayMode\": \"list\",\n          \"placement\": \"bottom\",\n          \"showLegend\": true\n        },\n        \"tooltip\": {\n          \"mode\": \"single\",\n          \"sort\": \"none\"\n        }\n      },\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"builder\",\n          \"expr\": \"topos_certificate_delivered_total\",\n          \"legendFormat\": \"__auto\",\n          \"range\": true,\n          \"refId\": \"A\"\n        }\n      ],\n      \"title\": \"Panel Title\",\n      \"transformations\": [\n        {\n          \"id\": \"convertFieldType\",\n          \"options\": {\n            \"conversions\": [],\n            \"fields\": {}\n          }\n        }\n      ],\n      \"type\": \"timeseries\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${datasource}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"palette-classic\"\n          },\n          \"custom\": {\n            \"axisCenteredZero\": false,\n            \"axisColorMode\": \"text\",\n            \"axisGridShow\": true,\n            \"axisLabel\": \"\",\n            \"axisPlacement\": \"auto\",\n            \"barAlignment\": 0,\n            \"drawStyle\": \"line\",\n            \"fillOpacity\": 0,\n            \"gradientMode\": \"none\",\n            \"hideFrom\": {\n              \"legend\": false,\n              \"tooltip\": false,\n              \"viz\": false\n            },\n            \"lineInterpolation\": \"linear\",\n            \"lineWidth\": 1,\n            \"pointSize\": 5,\n            \"scaleDistribution\": {\n              \"type\": \"linear\"\n            },\n            \"showPoints\": \"auto\",\n            \"spanNulls\": false,\n            \"stacking\": {\n              \"group\": \"A\",\n              \"mode\": \"none\"\n            },\n            \"thresholdsStyle\": {\n              \"mode\": \"off\"\n            }\n          },\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          },\n          \"unit\": \"s\"\n        },\n        \"overrides\": [\n          {\n            \"matcher\": {\n              \"id\": \"byFrameRefID\",\n              \"options\": \"max\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"custom.fillBelowTo\",\n                \"value\": \"min\"\n              },\n              {\n                \"id\": \"custom.fillOpacity\",\n                \"value\": 17\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byFrameRefID\",\n              \"options\": \"avg\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"custom.lineWidth\",\n                \"value\": 3\n              }\n            ]\n          }\n        ]\n      },\n      \"gridPos\": {\n        \"h\": 5,\n        \"w\": 13,\n        \"x\": 10,\n        \"y\": 0\n      },\n      \"id\": 52,\n      \"interval\": \"4\",\n      \"options\": {\n        \"legend\": {\n          \"calcs\": [\n            \"stdDev\"\n          ],\n          \"displayMode\": \"list\",\n          \"placement\": \"right\",\n          \"showLegend\": true\n        },\n        \"tooltip\": {\n          \"mode\": \"single\",\n          \"sort\": \"none\"\n        }\n      },\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"avg(rate(topos_double_echo_delivery_latency_sum[$__rate_interval]) / rate(topos_double_echo_delivery_latency_count[$__rate_interval]))\",\n          \"legendFormat\": \"avg\",\n          \"range\": true,\n          \"refId\": \"avg\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"min(rate(topos_double_echo_delivery_latency_sum[$__rate_interval]) / rate(topos_double_echo_delivery_latency_count[$__rate_interval]))\",\n          \"hide\": false,\n          \"legendFormat\": \"min\",\n          \"range\": true,\n          \"refId\": \"min\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"max(rate(topos_double_echo_delivery_latency_sum[$__rate_interval]) / rate(topos_double_echo_delivery_latency_count[$__rate_interval]))\",\n          \"hide\": false,\n          \"legendFormat\": \"max\",\n          \"range\": true,\n          \"refId\": \"max\"\n        },\n        {\n          \"datasource\": {\n            \"name\": \"Expression\",\n            \"type\": \"__expr__\",\n            \"uid\": \"__expr__\"\n          },\n          \"expression\": \"avg\",\n          \"hide\": false,\n          \"reducer\": \"mean\",\n          \"refId\": \"reduced_point\",\n          \"settings\": {\n            \"mode\": \"dropNN\"\n          },\n          \"type\": \"reduce\"\n        }\n      ],\n      \"title\": \"Latency\",\n      \"transformations\": [],\n      \"type\": \"timeseries\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${datasource}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"thresholds\"\n          },\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          },\n          \"unit\": \"none\"\n        },\n        \"overrides\": [\n          {\n            \"matcher\": {\n              \"id\": \"byFrameRefID\",\n              \"options\": \"Ratio\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"thresholds\",\n                \"value\": {\n                  \"mode\": \"percentage\",\n                  \"steps\": [\n                    {\n                      \"color\": \"green\",\n                      \"value\": null\n                    }\n                  ]\n                }\n              }\n            ]\n          }\n        ]\n      },\n      \"gridPos\": {\n        \"h\": 8,\n        \"w\": 2,\n        \"x\": 0,\n        \"y\": 5\n      },\n      \"id\": 25,\n      \"options\": {\n        \"colorMode\": \"none\",\n        \"graphMode\": \"none\",\n        \"justifyMode\": \"auto\",\n        \"orientation\": \"auto\",\n        \"reduceOptions\": {\n          \"calcs\": [\n            \"lastNotNull\"\n          ],\n          \"fields\": \"\",\n          \"values\": false\n        },\n        \"textMode\": \"auto\"\n      },\n      \"pluginVersion\": \"9.3.8\",\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"sum(topos_certificate_processing_total{job=~\\\"$peer\\\"})\",\n          \"legendFormat\": \"Received\",\n          \"range\": true,\n          \"refId\": \"A\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"sum(topos_certificate_delivered_total{job=~\\\"$peer\\\"})\",\n          \"hide\": false,\n          \"legendFormat\": \"Delivered\",\n          \"range\": true,\n          \"refId\": \"B\"\n        },\n        {\n          \"datasource\": {\n            \"name\": \"Expression\",\n            \"type\": \"__expr__\",\n            \"uid\": \"__expr__\"\n          },\n          \"expression\": \"100 * ($B ) / ($A) \",\n          \"hide\": false,\n          \"refId\": \"Ratio\",\n          \"type\": \"math\"\n        }\n      ],\n      \"title\": \"Health\",\n      \"type\": \"stat\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${datasource}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"custom\": {\n            \"hideFrom\": {\n              \"legend\": false,\n              \"tooltip\": false,\n              \"viz\": false\n            },\n            \"scaleDistribution\": {\n              \"type\": \"linear\"\n            }\n          }\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 8,\n        \"w\": 9,\n        \"x\": 2,\n        \"y\": 5\n      },\n      \"id\": 37,\n      \"interval\": \"5\",\n      \"options\": {\n        \"calculate\": false,\n        \"cellGap\": 1,\n        \"color\": {\n          \"exponent\": 0.5,\n          \"fill\": \"dark-orange\",\n          \"mode\": \"scheme\",\n          \"reverse\": true,\n          \"scale\": \"exponential\",\n          \"scheme\": \"Magma\",\n          \"steps\": 128\n        },\n        \"exemplars\": {\n          \"color\": \"rgba(255,0,255,0.7)\"\n        },\n        \"filterValues\": {\n          \"le\": 1e-10\n        },\n        \"legend\": {\n          \"show\": true\n        },\n        \"rowsFrame\": {\n          \"layout\": \"auto\"\n        },\n        \"tooltip\": {\n          \"show\": true,\n          \"yHistogram\": false\n        },\n        \"yAxis\": {\n          \"axisPlacement\": \"left\",\n          \"reverse\": false\n        }\n      },\n      \"pluginVersion\": \"9.3.8\",\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"builder\",\n          \"expr\": \"sum by(job) (increase(topos_certificate_delivered_total[$__rate_interval]))\",\n          \"legendFormat\": \"__auto\",\n          \"range\": true,\n          \"refId\": \"A\"\n        }\n      ],\n      \"title\": \"Delivery dynamic\",\n      \"type\": \"heatmap\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${datasource}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"palette-classic\"\n          },\n          \"custom\": {\n            \"axisCenteredZero\": false,\n            \"axisColorMode\": \"text\",\n            \"axisLabel\": \"\",\n            \"axisPlacement\": \"auto\",\n            \"barAlignment\": 0,\n            \"drawStyle\": \"line\",\n            \"fillOpacity\": 0,\n            \"gradientMode\": \"none\",\n            \"hideFrom\": {\n              \"legend\": false,\n              \"tooltip\": false,\n              \"viz\": false\n            },\n            \"lineInterpolation\": \"linear\",\n            \"lineWidth\": 1,\n            \"pointSize\": 5,\n            \"scaleDistribution\": {\n              \"type\": \"linear\"\n            },\n            \"showPoints\": \"auto\",\n            \"spanNulls\": false,\n            \"stacking\": {\n              \"group\": \"A\",\n              \"mode\": \"none\"\n            },\n            \"thresholdsStyle\": {\n              \"mode\": \"off\"\n            }\n          },\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          }\n        },\n        \"overrides\": [\n          {\n            \"matcher\": {\n              \"id\": \"byFrameRefID\",\n              \"options\": \"input\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"custom.lineWidth\",\n                \"value\": 2\n              },\n              {\n                \"id\": \"custom.lineStyle\",\n                \"value\": {\n                  \"dash\": [\n                    10,\n                    10\n                  ],\n                  \"fill\": \"dash\"\n                }\n              }\n            ]\n          }\n        ]\n      },\n      \"gridPos\": {\n        \"h\": 7,\n        \"w\": 13,\n        \"x\": 11,\n        \"y\": 5\n      },\n      \"id\": 39,\n      \"interval\": \"14\",\n      \"options\": {\n        \"legend\": {\n          \"calcs\": [],\n          \"displayMode\": \"list\",\n          \"placement\": \"right\",\n          \"showLegend\": true\n        },\n        \"tooltip\": {\n          \"mode\": \"single\",\n          \"sort\": \"none\"\n        }\n      },\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"builder\",\n          \"expr\": \"avg(sum by(job) (irate(topos_certificate_delivered_total[$__rate_interval])))\",\n          \"hide\": false,\n          \"legendFormat\": \"Actual\",\n          \"range\": true,\n          \"refId\": \"A\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"avg(sum by(job) (irate(topos_certificate_processing_total[$__interval])))\",\n          \"hide\": true,\n          \"legendFormat\": \"Input\",\n          \"range\": true,\n          \"refId\": \"input\"\n        }\n      ],\n      \"title\": \"Throughput\",\n      \"type\": \"timeseries\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${datasource}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"palette-classic\"\n          },\n          \"custom\": {\n            \"axisCenteredZero\": false,\n            \"axisColorMode\": \"text\",\n            \"axisGridShow\": true,\n            \"axisLabel\": \"\",\n            \"axisPlacement\": \"auto\",\n            \"barAlignment\": 0,\n            \"drawStyle\": \"line\",\n            \"fillOpacity\": 0,\n            \"gradientMode\": \"none\",\n            \"hideFrom\": {\n              \"legend\": false,\n              \"tooltip\": false,\n              \"viz\": false\n            },\n            \"lineInterpolation\": \"linear\",\n            \"lineWidth\": 1,\n            \"pointSize\": 5,\n            \"scaleDistribution\": {\n              \"type\": \"linear\"\n            },\n            \"showPoints\": \"auto\",\n            \"spanNulls\": false,\n            \"stacking\": {\n              \"group\": \"A\",\n              \"mode\": \"none\"\n            },\n            \"thresholdsStyle\": {\n              \"mode\": \"off\"\n            }\n          },\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          },\n          \"unit\": \"s\"\n        },\n        \"overrides\": [\n          {\n            \"matcher\": {\n              \"id\": \"byFrameRefID\",\n              \"options\": \"max\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"custom.fillBelowTo\",\n                \"value\": \"min\"\n              },\n              {\n                \"id\": \"custom.fillOpacity\",\n                \"value\": 17\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byFrameRefID\",\n              \"options\": \"avg\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"custom.lineWidth\",\n                \"value\": 3\n              }\n            ]\n          }\n        ]\n      },\n      \"gridPos\": {\n        \"h\": 5,\n        \"w\": 13,\n        \"x\": 11,\n        \"y\": 12\n      },\n      \"id\": 41,\n      \"interval\": \"4\",\n      \"options\": {\n        \"legend\": {\n          \"calcs\": [\n            \"stdDev\"\n          ],\n          \"displayMode\": \"list\",\n          \"placement\": \"right\",\n          \"showLegend\": true\n        },\n        \"tooltip\": {\n          \"mode\": \"single\",\n          \"sort\": \"none\"\n        }\n      },\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"avg(rate(topos_double_echo_delivery_latency_sum[$__rate_interval]) / rate(topos_double_echo_delivery_latency_count[$__rate_interval]))\",\n          \"legendFormat\": \"avg\",\n          \"range\": true,\n          \"refId\": \"avg\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"min(rate(topos_double_echo_delivery_latency_sum[$__rate_interval]) / rate(topos_double_echo_delivery_latency_count[$__rate_interval]))\",\n          \"hide\": false,\n          \"legendFormat\": \"min\",\n          \"range\": true,\n          \"refId\": \"min\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"max(rate(topos_double_echo_delivery_latency_sum[$__rate_interval]) / rate(topos_double_echo_delivery_latency_count[$__rate_interval]))\",\n          \"hide\": false,\n          \"legendFormat\": \"max\",\n          \"range\": true,\n          \"refId\": \"max\"\n        }\n      ],\n      \"title\": \"Latency\",\n      \"type\": \"timeseries\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${datasource}\"\n      },\n      \"description\": \"Received from gRPC\",\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"palette-classic\"\n          },\n          \"custom\": {\n            \"axisCenteredZero\": false,\n            \"axisColorMode\": \"text\",\n            \"axisLabel\": \"\",\n            \"axisPlacement\": \"auto\",\n            \"barAlignment\": 0,\n            \"drawStyle\": \"line\",\n            \"fillOpacity\": 0,\n            \"gradientMode\": \"none\",\n            \"hideFrom\": {\n              \"legend\": false,\n              \"tooltip\": false,\n              \"viz\": false\n            },\n            \"lineInterpolation\": \"linear\",\n            \"lineWidth\": 1,\n            \"pointSize\": 5,\n            \"scaleDistribution\": {\n              \"type\": \"linear\"\n            },\n            \"showPoints\": \"auto\",\n            \"spanNulls\": false,\n            \"stacking\": {\n              \"group\": \"A\",\n              \"mode\": \"none\"\n            },\n            \"thresholdsStyle\": {\n              \"mode\": \"off\"\n            }\n          },\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          }\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 8,\n        \"w\": 11,\n        \"x\": 0,\n        \"y\": 13\n      },\n      \"id\": 43,\n      \"interval\": \"5\",\n      \"options\": {\n        \"legend\": {\n          \"calcs\": [],\n          \"displayMode\": \"list\",\n          \"placement\": \"bottom\",\n          \"showLegend\": true\n        },\n        \"tooltip\": {\n          \"mode\": \"single\",\n          \"sort\": \"none\"\n        }\n      },\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"builder\",\n          \"expr\": \"topos_api_grpc_certificate_received_total\",\n          \"legendFormat\": \"{{job}}\",\n          \"range\": true,\n          \"refId\": \"A\"\n        }\n      ],\n      \"title\": \"gRPC received\",\n      \"type\": \"timeseries\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${datasource}\"\n      },\n      \"description\": \"\",\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"palette-classic\"\n          },\n          \"custom\": {\n            \"axisCenteredZero\": false,\n            \"axisColorMode\": \"text\",\n            \"axisLabel\": \"\",\n            \"axisPlacement\": \"auto\",\n            \"barAlignment\": 0,\n            \"drawStyle\": \"line\",\n            \"fillOpacity\": 0,\n            \"gradientMode\": \"none\",\n            \"hideFrom\": {\n              \"legend\": false,\n              \"tooltip\": false,\n              \"viz\": false\n            },\n            \"lineInterpolation\": \"linear\",\n            \"lineWidth\": 1,\n            \"pointSize\": 5,\n            \"scaleDistribution\": {\n              \"type\": \"linear\"\n            },\n            \"showPoints\": \"auto\",\n            \"spanNulls\": false,\n            \"stacking\": {\n              \"group\": \"A\",\n              \"mode\": \"none\"\n            },\n            \"thresholdsStyle\": {\n              \"mode\": \"off\"\n            }\n          },\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          }\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 8,\n        \"w\": 13,\n        \"x\": 11,\n        \"y\": 17\n      },\n      \"id\": 9,\n      \"interval\": \"4\",\n      \"options\": {\n        \"legend\": {\n          \"calcs\": [],\n          \"displayMode\": \"list\",\n          \"placement\": \"bottom\",\n          \"showLegend\": true\n        },\n        \"tooltip\": {\n          \"mode\": \"single\",\n          \"sort\": \"none\"\n        }\n      },\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"avg(topos_certificate_processing_total{job=~\\\"$peer\\\"})\",\n          \"hide\": false,\n          \"legendFormat\": \"Double Echo module\",\n          \"range\": true,\n          \"refId\": \"A\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"avg(topos_certificate_processing_from_api_total{job=~\\\"$peer\\\"})\",\n          \"hide\": false,\n          \"legendFormat\": \"Received by gRPC API\",\n          \"range\": true,\n          \"refId\": \"B\"\n        }\n      ],\n      \"title\": \"Certificate reception in the double echo\",\n      \"type\": \"timeseries\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${datasource}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"palette-classic\"\n          },\n          \"custom\": {\n            \"axisCenteredZero\": false,\n            \"axisColorMode\": \"text\",\n            \"axisLabel\": \"\",\n            \"axisPlacement\": \"auto\",\n            \"barAlignment\": 0,\n            \"drawStyle\": \"line\",\n            \"fillOpacity\": 0,\n            \"gradientMode\": \"none\",\n            \"hideFrom\": {\n              \"legend\": false,\n              \"tooltip\": false,\n              \"viz\": false\n            },\n            \"lineInterpolation\": \"linear\",\n            \"lineWidth\": 1,\n            \"pointSize\": 5,\n            \"scaleDistribution\": {\n              \"type\": \"linear\"\n            },\n            \"showPoints\": \"auto\",\n            \"spanNulls\": false,\n            \"stacking\": {\n              \"group\": \"A\",\n              \"mode\": \"none\"\n            },\n            \"thresholdsStyle\": {\n              \"mode\": \"off\"\n            }\n          },\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          }\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 8,\n        \"w\": 11,\n        \"x\": 0,\n        \"y\": 21\n      },\n      \"id\": 45,\n      \"interval\": \"5\",\n      \"options\": {\n        \"legend\": {\n          \"calcs\": [],\n          \"displayMode\": \"list\",\n          \"placement\": \"bottom\",\n          \"showLegend\": true\n        },\n        \"tooltip\": {\n          \"mode\": \"single\",\n          \"sort\": \"none\"\n        }\n      },\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"builder\",\n          \"expr\": \"topos_double_echo_active_tasks_count\",\n          \"legendFormat\": \"{{job}}\",\n          \"range\": true,\n          \"refId\": \"A\"\n        }\n      ],\n      \"title\": \"Active tokio tasks\",\n      \"type\": \"timeseries\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${datasource}\"\n      },\n      \"description\": \"Upon received from the gRPC API\",\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"palette-classic\"\n          },\n          \"custom\": {\n            \"axisCenteredZero\": false,\n            \"axisColorMode\": \"text\",\n            \"axisLabel\": \"\",\n            \"axisPlacement\": \"auto\",\n            \"barAlignment\": 0,\n            \"drawStyle\": \"line\",\n            \"fillOpacity\": 0,\n            \"gradientMode\": \"none\",\n            \"hideFrom\": {\n              \"legend\": false,\n              \"tooltip\": false,\n              \"viz\": false\n            },\n            \"lineInterpolation\": \"linear\",\n            \"lineWidth\": 1,\n            \"pointSize\": 5,\n            \"scaleDistribution\": {\n              \"type\": \"linear\"\n            },\n            \"showPoints\": \"auto\",\n            \"spanNulls\": false,\n            \"stacking\": {\n              \"group\": \"A\",\n              \"mode\": \"none\"\n            },\n            \"thresholdsStyle\": {\n              \"mode\": \"off\"\n            }\n          },\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          }\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 8,\n        \"w\": 13,\n        \"x\": 11,\n        \"y\": 25\n      },\n      \"id\": 48,\n      \"interval\": \"4\",\n      \"options\": {\n        \"legend\": {\n          \"calcs\": [],\n          \"displayMode\": \"list\",\n          \"placement\": \"bottom\",\n          \"showLegend\": true\n        },\n        \"tooltip\": {\n          \"mode\": \"single\",\n          \"sort\": \"none\"\n        }\n      },\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"topos_certificate_processing_from_api_total{job=~\\\"$peer\\\"}\",\n          \"hide\": false,\n          \"legendFormat\": \"API {{job}}\",\n          \"range\": true,\n          \"refId\": \"A\"\n        }\n      ],\n      \"title\": \"Certificate reception from API\",\n      \"type\": \"timeseries\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${datasource}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"custom\": {\n            \"hideFrom\": {\n              \"legend\": false,\n              \"tooltip\": false,\n              \"viz\": false\n            },\n            \"scaleDistribution\": {\n              \"type\": \"linear\"\n            }\n          }\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 8,\n        \"w\": 11,\n        \"x\": 0,\n        \"y\": 29\n      },\n      \"id\": 35,\n      \"interval\": \"5\",\n      \"options\": {\n        \"calculate\": false,\n        \"cellGap\": 1,\n        \"color\": {\n          \"exponent\": 0.5,\n          \"fill\": \"dark-orange\",\n          \"mode\": \"scheme\",\n          \"reverse\": true,\n          \"scale\": \"exponential\",\n          \"scheme\": \"Oranges\",\n          \"steps\": 113\n        },\n        \"exemplars\": {\n          \"color\": \"rgba(255,0,255,0.7)\"\n        },\n        \"filterValues\": {\n          \"le\": 1e-9\n        },\n        \"legend\": {\n          \"show\": true\n        },\n        \"rowsFrame\": {\n          \"layout\": \"auto\"\n        },\n        \"tooltip\": {\n          \"show\": true,\n          \"yHistogram\": false\n        },\n        \"yAxis\": {\n          \"axisPlacement\": \"left\",\n          \"reverse\": false\n        }\n      },\n      \"pluginVersion\": \"9.3.8\",\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"builder\",\n          \"expr\": \"sum by(le) (increase(topos_p2p_gossip_batch_size_bucket[$__rate_interval]))\",\n          \"format\": \"heatmap\",\n          \"legendFormat\": \"{{job}}\",\n          \"range\": true,\n          \"refId\": \"A\"\n        }\n      ],\n      \"title\": \"Batch size\",\n      \"type\": \"heatmap\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${datasource}\"\n      },\n      \"description\": \"Upon received from GossipSub\",\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"palette-classic\"\n          },\n          \"custom\": {\n            \"axisCenteredZero\": false,\n            \"axisColorMode\": \"text\",\n            \"axisLabel\": \"\",\n            \"axisPlacement\": \"auto\",\n            \"barAlignment\": 0,\n            \"drawStyle\": \"line\",\n            \"fillOpacity\": 0,\n            \"gradientMode\": \"none\",\n            \"hideFrom\": {\n              \"legend\": false,\n              \"tooltip\": false,\n              \"viz\": false\n            },\n            \"lineInterpolation\": \"linear\",\n            \"lineWidth\": 1,\n            \"pointSize\": 5,\n            \"scaleDistribution\": {\n              \"type\": \"linear\"\n            },\n            \"showPoints\": \"auto\",\n            \"spanNulls\": false,\n            \"stacking\": {\n              \"group\": \"A\",\n              \"mode\": \"none\"\n            },\n            \"thresholdsStyle\": {\n              \"mode\": \"off\"\n            }\n          },\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          }\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 8,\n        \"w\": 13,\n        \"x\": 11,\n        \"y\": 33\n      },\n      \"id\": 49,\n      \"interval\": \"4\",\n      \"options\": {\n        \"legend\": {\n          \"calcs\": [],\n          \"displayMode\": \"list\",\n          \"placement\": \"bottom\",\n          \"showLegend\": true\n        },\n        \"tooltip\": {\n          \"mode\": \"single\",\n          \"sort\": \"none\"\n        }\n      },\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"builder\",\n          \"expr\": \"topos_libp2p_gossipsub_topic_msg_recv_counts_total{job=~\\\"$peer\\\", hash=\\\"topos_gossip\\\"}\",\n          \"hide\": false,\n          \"legendFormat\": \"API {{job}}\",\n          \"range\": true,\n          \"refId\": \"A\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"hide\": false,\n          \"refId\": \"B\"\n        }\n      ],\n      \"title\": \"Certificate reception from Gossip\",\n      \"type\": \"timeseries\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${datasource}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"palette-classic\"\n          },\n          \"custom\": {\n            \"axisCenteredZero\": false,\n            \"axisColorMode\": \"text\",\n            \"axisLabel\": \"\",\n            \"axisPlacement\": \"auto\",\n            \"barAlignment\": 0,\n            \"drawStyle\": \"line\",\n            \"fillOpacity\": 0,\n            \"gradientMode\": \"none\",\n            \"hideFrom\": {\n              \"legend\": false,\n              \"tooltip\": false,\n              \"viz\": false\n            },\n            \"lineInterpolation\": \"linear\",\n            \"lineWidth\": 1,\n            \"pointSize\": 5,\n            \"scaleDistribution\": {\n              \"type\": \"linear\"\n            },\n            \"showPoints\": \"auto\",\n            \"spanNulls\": false,\n            \"stacking\": {\n              \"group\": \"A\",\n              \"mode\": \"none\"\n            },\n            \"thresholdsStyle\": {\n              \"mode\": \"off\"\n            }\n          },\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          }\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 8,\n        \"w\": 13,\n        \"x\": 11,\n        \"y\": 41\n      },\n      \"id\": 47,\n      \"interval\": \"4\",\n      \"options\": {\n        \"legend\": {\n          \"calcs\": [],\n          \"displayMode\": \"list\",\n          \"placement\": \"bottom\",\n          \"showLegend\": true\n        },\n        \"tooltip\": {\n          \"mode\": \"single\",\n          \"sort\": \"none\"\n        }\n      },\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"builder\",\n          \"expr\": \"topos_double_echo_broadcast_finished_total\",\n          \"legendFormat\": \"{{job}}\",\n          \"range\": true,\n          \"refId\": \"A\"\n        }\n      ],\n      \"title\": \"Broadcast finished\",\n      \"type\": \"timeseries\"\n    },\n    {\n      \"collapsed\": false,\n      \"gridPos\": {\n        \"h\": 1,\n        \"w\": 24,\n        \"x\": 0,\n        \"y\": 49\n      },\n      \"id\": 32,\n      \"panels\": [],\n      \"title\": \"Misc\",\n      \"type\": \"row\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${datasource}\"\n      },\n      \"description\": \"Latency between rounds\",\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"palette-classic\"\n          },\n          \"custom\": {\n            \"axisCenteredZero\": false,\n            \"axisColorMode\": \"text\",\n            \"axisLabel\": \"\",\n            \"axisPlacement\": \"auto\",\n            \"barAlignment\": 0,\n            \"drawStyle\": \"line\",\n            \"fillOpacity\": 0,\n            \"gradientMode\": \"none\",\n            \"hideFrom\": {\n              \"legend\": false,\n              \"tooltip\": false,\n              \"viz\": false\n            },\n            \"lineInterpolation\": \"linear\",\n            \"lineWidth\": 1,\n            \"pointSize\": 5,\n            \"scaleDistribution\": {\n              \"type\": \"linear\"\n            },\n            \"showPoints\": \"auto\",\n            \"spanNulls\": false,\n            \"stacking\": {\n              \"group\": \"A\",\n              \"mode\": \"none\"\n            },\n            \"thresholdsStyle\": {\n              \"mode\": \"off\"\n            }\n          },\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          }\n        },\n        \"overrides\": [\n          {\n            \"matcher\": {\n              \"id\": \"byFrameRefID\",\n              \"options\": \"A\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"custom.axisPlacement\",\n                \"value\": \"right\"\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byFrameRefID\",\n              \"options\": \"Echo published\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"custom.axisPlacement\",\n                \"value\": \"right\"\n              }\n            ]\n          }\n        ]\n      },\n      \"gridPos\": {\n        \"h\": 8,\n        \"w\": 12,\n        \"x\": 0,\n        \"y\": 50\n      },\n      \"id\": 30,\n      \"interval\": \"4\",\n      \"options\": {\n        \"legend\": {\n          \"calcs\": [],\n          \"displayMode\": \"list\",\n          \"placement\": \"bottom\",\n          \"showLegend\": true\n        },\n        \"tooltip\": {\n          \"mode\": \"single\",\n          \"sort\": \"none\"\n        }\n      },\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"sum by(hash) (increase(topos_libp2p_gossipsub_topic_msg_recv_counts_total{hash=\\\"topos_gossip\\\", job=~\\\"$peer\\\"}[$__rate_interval]))\",\n          \"legendFormat\": \"Gossip received\",\n          \"range\": true,\n          \"refId\": \"Gossip received\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"sum by(hash) (increase(topos_libp2p_gossipsub_topic_msg_published_total{hash=\\\"topos_echo\\\", job=~\\\"$peer\\\"}[$__rate_interval]))\",\n          \"hide\": false,\n          \"legendFormat\": \"Echo published\",\n          \"range\": true,\n          \"refId\": \"Echo published\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"sum by(hash) (increase(topos_libp2p_gossipsub_topic_iwant_msgs_total{hash=\\\"topos_gossip\\\", job=~\\\"$peer\\\"}[$__rate_interval]))\",\n          \"hide\": false,\n          \"legendFormat\": \"IWANT Gossip\",\n          \"range\": true,\n          \"refId\": \"IWANT\"\n        }\n      ],\n      \"title\": \"Gossip received vs. Echo published\",\n      \"type\": \"timeseries\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${datasource}\"\n      },\n      \"description\": \"Measure the redundancy\",\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"palette-classic\"\n          },\n          \"custom\": {\n            \"axisCenteredZero\": false,\n            \"axisColorMode\": \"text\",\n            \"axisLabel\": \"\",\n            \"axisPlacement\": \"auto\",\n            \"barAlignment\": 0,\n            \"drawStyle\": \"line\",\n            \"fillOpacity\": 0,\n            \"gradientMode\": \"none\",\n            \"hideFrom\": {\n              \"legend\": false,\n              \"tooltip\": false,\n              \"viz\": false\n            },\n            \"lineInterpolation\": \"linear\",\n            \"lineWidth\": 1,\n            \"pointSize\": 5,\n            \"scaleDistribution\": {\n              \"type\": \"linear\"\n            },\n            \"showPoints\": \"auto\",\n            \"spanNulls\": false,\n            \"stacking\": {\n              \"group\": \"A\",\n              \"mode\": \"none\"\n            },\n            \"thresholdsStyle\": {\n              \"mode\": \"off\"\n            }\n          },\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          }\n        },\n        \"overrides\": [\n          {\n            \"matcher\": {\n              \"id\": \"byFrameRefID\",\n              \"options\": \"Ratio\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"custom.axisPlacement\",\n                \"value\": \"right\"\n              },\n              {\n                \"id\": \"custom.scaleDistribution\",\n                \"value\": {\n                  \"log\": 10,\n                  \"type\": \"log\"\n                }\n              },\n              {\n                \"id\": \"custom.lineStyle\",\n                \"value\": {\n                  \"dash\": [\n                    0,\n                    10\n                  ],\n                  \"fill\": \"dot\"\n                }\n              },\n              {\n                \"id\": \"custom.lineWidth\",\n                \"value\": 4\n              }\n            ]\n          }\n        ]\n      },\n      \"gridPos\": {\n        \"h\": 8,\n        \"w\": 12,\n        \"x\": 12,\n        \"y\": 50\n      },\n      \"id\": 29,\n      \"interval\": \"4\",\n      \"options\": {\n        \"legend\": {\n          \"calcs\": [],\n          \"displayMode\": \"list\",\n          \"placement\": \"bottom\",\n          \"showLegend\": true\n        },\n        \"tooltip\": {\n          \"mode\": \"single\",\n          \"sort\": \"none\"\n        }\n      },\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"sum by(instance) (avg by(hash) (topos_libp2p_gossipsub_topic_msg_recv_counts_total{job=~\\\"$peer\\\"}))\",\n          \"hide\": false,\n          \"legendFormat\": \"Filtered\",\n          \"range\": true,\n          \"refId\": \"filtered\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"sum by(instance) (avg by(hash) (topos_libp2p_gossipsub_topic_msg_recv_counts_unfiltered_total{job=~\\\"$peer\\\"}))\",\n          \"hide\": false,\n          \"legendFormat\": \"Unfiltered\",\n          \"range\": true,\n          \"refId\": \"unfiltered\"\n        },\n        {\n          \"datasource\": {\n            \"name\": \"Expression\",\n            \"type\": \"__expr__\",\n            \"uid\": \"__expr__\"\n          },\n          \"expression\": \"$unfiltered / $filtered\",\n          \"hide\": false,\n          \"refId\": \"Ratio\",\n          \"type\": \"math\"\n        }\n      ],\n      \"title\": \"Ratio payload recv unfiltered vs. filtered\",\n      \"type\": \"timeseries\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${datasource}\"\n      },\n      \"description\": \"Latency between rounds\",\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"palette-classic\"\n          },\n          \"custom\": {\n            \"axisCenteredZero\": false,\n            \"axisColorMode\": \"text\",\n            \"axisLabel\": \"\",\n            \"axisPlacement\": \"auto\",\n            \"barAlignment\": 0,\n            \"drawStyle\": \"line\",\n            \"fillOpacity\": 0,\n            \"gradientMode\": \"none\",\n            \"hideFrom\": {\n              \"legend\": false,\n              \"tooltip\": false,\n              \"viz\": false\n            },\n            \"lineInterpolation\": \"linear\",\n            \"lineWidth\": 1,\n            \"pointSize\": 5,\n            \"scaleDistribution\": {\n              \"type\": \"linear\"\n            },\n            \"showPoints\": \"auto\",\n            \"spanNulls\": false,\n            \"stacking\": {\n              \"group\": \"A\",\n              \"mode\": \"none\"\n            },\n            \"thresholdsStyle\": {\n              \"mode\": \"off\"\n            }\n          },\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          }\n        },\n        \"overrides\": [\n          {\n            \"matcher\": {\n              \"id\": \"byFrameRefID\",\n              \"options\": \"A\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"custom.axisLabel\",\n                \"value\": \"IWANT\"\n              }\n            ]\n          }\n        ]\n      },\n      \"gridPos\": {\n        \"h\": 8,\n        \"w\": 12,\n        \"x\": 0,\n        \"y\": 58\n      },\n      \"id\": 31,\n      \"interval\": \"4\",\n      \"options\": {\n        \"legend\": {\n          \"calcs\": [],\n          \"displayMode\": \"list\",\n          \"placement\": \"bottom\",\n          \"showLegend\": true\n        },\n        \"tooltip\": {\n          \"mode\": \"single\",\n          \"sort\": \"none\"\n        }\n      },\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"sum by(hash) (increase(topos_libp2p_gossipsub_topic_msg_recv_counts_total{hash=\\\"topos_echo\\\", job=~\\\"$peer\\\"}[$__rate_interval]))\",\n          \"hide\": false,\n          \"legendFormat\": \"Echo received\",\n          \"range\": true,\n          \"refId\": \"Gossip received\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"sum by(hash) (increase(topos_libp2p_gossipsub_topic_msg_published_total{hash=\\\"topos_ready\\\", job=~\\\"$peer\\\"}[$__rate_interval]))\",\n          \"hide\": false,\n          \"legendFormat\": \"Ready published\",\n          \"range\": true,\n          \"refId\": \"Echo published\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"sum by(hash) (increase(topos_libp2p_gossipsub_topic_iwant_msgs_total{hash=\\\"topos_echo\\\", job=~\\\"$peer\\\"}[$__rate_interval]))\",\n          \"hide\": false,\n          \"legendFormat\": \"IWANT Echo\",\n          \"range\": true,\n          \"refId\": \"A\"\n        }\n      ],\n      \"title\": \"Echo received vs. Ready published\",\n      \"type\": \"timeseries\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${datasource}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"continuous-GrYlRd\"\n          },\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          }\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 16,\n        \"w\": 12,\n        \"x\": 12,\n        \"y\": 58\n      },\n      \"id\": 27,\n      \"options\": {\n        \"displayMode\": \"basic\",\n        \"minVizHeight\": 10,\n        \"minVizWidth\": 0,\n        \"orientation\": \"horizontal\",\n        \"reduceOptions\": {\n          \"calcs\": [\n            \"lastNotNull\"\n          ],\n          \"fields\": \"\",\n          \"values\": false\n        },\n        \"showUnfilled\": true,\n        \"valueMode\": \"color\"\n      },\n      \"pluginVersion\": \"9.3.8\",\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"topos_certificate_delivered_total{job=~\\\"$peer\\\"}\",\n          \"legendFormat\": \"{{job}}\",\n          \"range\": true,\n          \"refId\": \"A\"\n        }\n      ],\n      \"title\": \"Certificate delivered per peer\",\n      \"type\": \"bargauge\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${datasource}\"\n      },\n      \"description\": \"Latency between rounds\",\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"palette-classic\"\n          },\n          \"custom\": {\n            \"axisCenteredZero\": false,\n            \"axisColorMode\": \"text\",\n            \"axisLabel\": \"\",\n            \"axisPlacement\": \"auto\",\n            \"barAlignment\": 0,\n            \"drawStyle\": \"line\",\n            \"fillOpacity\": 0,\n            \"gradientMode\": \"none\",\n            \"hideFrom\": {\n              \"legend\": false,\n              \"tooltip\": false,\n              \"viz\": false\n            },\n            \"lineInterpolation\": \"linear\",\n            \"lineWidth\": 1,\n            \"pointSize\": 5,\n            \"scaleDistribution\": {\n              \"type\": \"linear\"\n            },\n            \"showPoints\": \"auto\",\n            \"spanNulls\": false,\n            \"stacking\": {\n              \"group\": \"A\",\n              \"mode\": \"none\"\n            },\n            \"thresholdsStyle\": {\n              \"mode\": \"off\"\n            }\n          },\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          }\n        },\n        \"overrides\": [\n          {\n            \"matcher\": {\n              \"id\": \"byFrameRefID\",\n              \"options\": \"Missing delivery\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"custom.axisPlacement\",\n                \"value\": \"right\"\n              },\n              {\n                \"id\": \"custom.drawStyle\",\n                \"value\": \"line\"\n              },\n              {\n                \"id\": \"custom.lineStyle\",\n                \"value\": {\n                  \"dash\": [\n                    0,\n                    10\n                  ],\n                  \"fill\": \"dot\"\n                }\n              },\n              {\n                \"id\": \"custom.lineWidth\",\n                \"value\": 3\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byFrameRefID\",\n              \"options\": \"A\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"custom.axisLabel\",\n                \"value\": \"IWANT\"\n              }\n            ]\n          }\n        ]\n      },\n      \"gridPos\": {\n        \"h\": 8,\n        \"w\": 12,\n        \"x\": 0,\n        \"y\": 66\n      },\n      \"id\": 33,\n      \"interval\": \"4\",\n      \"options\": {\n        \"legend\": {\n          \"calcs\": [],\n          \"displayMode\": \"list\",\n          \"placement\": \"bottom\",\n          \"showLegend\": true\n        },\n        \"tooltip\": {\n          \"mode\": \"single\",\n          \"sort\": \"none\"\n        }\n      },\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"topos_libp2p_gossipsub_topic_msg_recv_counts_total{hash=\\\"topos_ready\\\"}\",\n          \"hide\": true,\n          \"legendFormat\": \"Ready received\",\n          \"range\": true,\n          \"refId\": \"Ready received\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"sum(topos_libp2p_gossipsub_topic_msg_published_total{hash=\\\"topos_ready\\\"})\",\n          \"hide\": false,\n          \"legendFormat\": \"Ready published\",\n          \"range\": true,\n          \"refId\": \"Ready published\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"sum by(hash) (increase(topos_libp2p_gossipsub_topic_iwant_msgs_total{hash=\\\"topos_ready\\\", job=~\\\"$peer\\\"}[$__rate_interval]))\",\n          \"hide\": true,\n          \"legendFormat\": \"IWANT Ready\",\n          \"range\": true,\n          \"refId\": \"A\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"avg(topos_certificate_processing_total{job=~\\\"$peer\\\"})\",\n          \"hide\": true,\n          \"legendFormat\": \"__auto\",\n          \"range\": true,\n          \"refId\": \"total_cert\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"builder\",\n          \"expr\": \"avg(topos_certificate_delivered_total)\",\n          \"hide\": true,\n          \"legendFormat\": \"__auto\",\n          \"range\": true,\n          \"refId\": \"cert_delivered\"\n        },\n        {\n          \"datasource\": {\n            \"name\": \"Expression\",\n            \"type\": \"__expr__\",\n            \"uid\": \"__expr__\"\n          },\n          \"expression\": \"$total_cert - $cert_delivered\",\n          \"hide\": true,\n          \"refId\": \"Missing delivery\",\n          \"type\": \"math\"\n        }\n      ],\n      \"title\": \"Ready received vs. Ready published vs. Delivered certificate\",\n      \"type\": \"timeseries\"\n    },\n    {\n      \"collapsed\": false,\n      \"gridPos\": {\n        \"h\": 1,\n        \"w\": 24,\n        \"x\": 0,\n        \"y\": 74\n      },\n      \"id\": 19,\n      \"panels\": [],\n      \"title\": \"P2P - Gossip protocol\",\n      \"type\": \"row\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${datasource}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"palette-classic\"\n          },\n          \"custom\": {\n            \"axisCenteredZero\": false,\n            \"axisColorMode\": \"text\",\n            \"axisLabel\": \"\",\n            \"axisPlacement\": \"auto\",\n            \"barAlignment\": 0,\n            \"drawStyle\": \"line\",\n            \"fillOpacity\": 0,\n            \"gradientMode\": \"none\",\n            \"hideFrom\": {\n              \"legend\": false,\n              \"tooltip\": false,\n              \"viz\": false\n            },\n            \"lineInterpolation\": \"linear\",\n            \"lineWidth\": 1,\n            \"pointSize\": 5,\n            \"scaleDistribution\": {\n              \"type\": \"linear\"\n            },\n            \"showPoints\": \"auto\",\n            \"spanNulls\": false,\n            \"stacking\": {\n              \"group\": \"A\",\n              \"mode\": \"none\"\n            },\n            \"thresholdsStyle\": {\n              \"mode\": \"off\"\n            }\n          },\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          }\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 8,\n        \"w\": 12,\n        \"x\": 0,\n        \"y\": 75\n      },\n      \"id\": 20,\n      \"options\": {\n        \"legend\": {\n          \"calcs\": [],\n          \"displayMode\": \"list\",\n          \"placement\": \"bottom\",\n          \"showLegend\": true\n        },\n        \"tooltip\": {\n          \"mode\": \"single\",\n          \"sort\": \"none\"\n        }\n      },\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"irate(topos_libp2p_gossipsub_topic_iwant_msgs_total{ job=~\\\"$peer\\\"}[$__interval])\",\n          \"legendFormat\": \"IWANT {{hash}} - {{job}}\",\n          \"range\": true,\n          \"refId\": \"A\"\n        }\n      ],\n      \"title\": \"IWANT msg\",\n      \"type\": \"timeseries\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${datasource}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"palette-classic\"\n          },\n          \"custom\": {\n            \"axisCenteredZero\": false,\n            \"axisColorMode\": \"text\",\n            \"axisLabel\": \"\",\n            \"axisPlacement\": \"auto\",\n            \"barAlignment\": 0,\n            \"drawStyle\": \"line\",\n            \"fillOpacity\": 0,\n            \"gradientMode\": \"none\",\n            \"hideFrom\": {\n              \"legend\": false,\n              \"tooltip\": false,\n              \"viz\": false\n            },\n            \"lineInterpolation\": \"linear\",\n            \"lineWidth\": 1,\n            \"pointSize\": 5,\n            \"scaleDistribution\": {\n              \"type\": \"linear\"\n            },\n            \"showPoints\": \"auto\",\n            \"spanNulls\": false,\n            \"stacking\": {\n              \"group\": \"A\",\n              \"mode\": \"none\"\n            },\n            \"thresholdsStyle\": {\n              \"mode\": \"off\"\n            }\n          },\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          }\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 8,\n        \"w\": 12,\n        \"x\": 12,\n        \"y\": 75\n      },\n      \"id\": 21,\n      \"options\": {\n        \"legend\": {\n          \"calcs\": [],\n          \"displayMode\": \"list\",\n          \"placement\": \"bottom\",\n          \"showLegend\": true\n        },\n        \"tooltip\": {\n          \"mode\": \"single\",\n          \"sort\": \"none\"\n        }\n      },\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"irate(topos_libp2p_gossipsub_topic_msg_recv_counts_total{ job=~\\\"$peer\\\"}[$__interval])\",\n          \"legendFormat\": \"{{hash}} {{job}}\",\n          \"range\": true,\n          \"refId\": \"A\"\n        }\n      ],\n      \"title\": \"MSG recv filtered\",\n      \"type\": \"timeseries\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${datasource}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"thresholds\"\n          },\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          }\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 8,\n        \"w\": 12,\n        \"x\": 0,\n        \"y\": 83\n      },\n      \"id\": 22,\n      \"options\": {\n        \"orientation\": \"auto\",\n        \"reduceOptions\": {\n          \"calcs\": [\n            \"lastNotNull\"\n          ],\n          \"fields\": \"\",\n          \"values\": false\n        },\n        \"showThresholdLabels\": false,\n        \"showThresholdMarkers\": true\n      },\n      \"pluginVersion\": \"9.3.8\",\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"sum by(hash) (topos_libp2p_gossipsub_topic_msg_recv_counts_total{ job=~\\\"$peer\\\"})\",\n          \"legendFormat\": \"{{hash}} \",\n          \"range\": true,\n          \"refId\": \"A\"\n        }\n      ],\n      \"title\": \"MSG recv total per topic\",\n      \"type\": \"gauge\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${datasource}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"palette-classic\"\n          },\n          \"custom\": {\n            \"axisCenteredZero\": false,\n            \"axisColorMode\": \"text\",\n            \"axisLabel\": \"\",\n            \"axisPlacement\": \"auto\",\n            \"fillOpacity\": 80,\n            \"gradientMode\": \"none\",\n            \"hideFrom\": {\n              \"legend\": false,\n              \"tooltip\": false,\n              \"viz\": false\n            },\n            \"lineWidth\": 1,\n            \"scaleDistribution\": {\n              \"type\": \"linear\"\n            },\n            \"thresholdsStyle\": {\n              \"mode\": \"off\"\n            }\n          },\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          }\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 8,\n        \"w\": 12,\n        \"x\": 12,\n        \"y\": 83\n      },\n      \"id\": 23,\n      \"options\": {\n        \"barRadius\": 0,\n        \"barWidth\": 0.97,\n        \"fullHighlight\": false,\n        \"groupWidth\": 0.7,\n        \"legend\": {\n          \"calcs\": [],\n          \"displayMode\": \"list\",\n          \"placement\": \"bottom\",\n          \"showLegend\": true\n        },\n        \"orientation\": \"auto\",\n        \"showValue\": \"auto\",\n        \"stacking\": \"none\",\n        \"tooltip\": {\n          \"mode\": \"single\",\n          \"sort\": \"none\"\n        },\n        \"xTickLabelRotation\": 0,\n        \"xTickLabelSpacing\": 0\n      },\n      \"pluginVersion\": \"9.5.3\",\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"code\",\n          \"exemplar\": false,\n          \"expr\": \"max by(hash, job) (topos_libp2p_gossipsub_topic_msg_sent_counts_total{ job=~\\\"$peer\\\"})\",\n          \"format\": \"heatmap\",\n          \"instant\": true,\n          \"legendFormat\": \"{{job}} {{hash}}\",\n          \"range\": false,\n          \"refId\": \"A\"\n        }\n      ],\n      \"title\": \"MSG sent total per topic\",\n      \"type\": \"barchart\"\n    },\n    {\n      \"collapsed\": false,\n      \"gridPos\": {\n        \"h\": 1,\n        \"w\": 24,\n        \"x\": 0,\n        \"y\": 91\n      },\n      \"id\": 15,\n      \"panels\": [],\n      \"title\": \"Storage layer\",\n      \"type\": \"row\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${datasource}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"palette-classic\"\n          },\n          \"custom\": {\n            \"axisCenteredZero\": false,\n            \"axisColorMode\": \"text\",\n            \"axisLabel\": \"\",\n            \"axisPlacement\": \"auto\",\n            \"barAlignment\": 0,\n            \"drawStyle\": \"line\",\n            \"fillOpacity\": 0,\n            \"gradientMode\": \"none\",\n            \"hideFrom\": {\n              \"legend\": false,\n              \"tooltip\": false,\n              \"viz\": false\n            },\n            \"lineInterpolation\": \"linear\",\n            \"lineWidth\": 1,\n            \"pointSize\": 5,\n            \"scaleDistribution\": {\n              \"type\": \"linear\"\n            },\n            \"showPoints\": \"auto\",\n            \"spanNulls\": false,\n            \"stacking\": {\n              \"group\": \"A\",\n              \"mode\": \"none\"\n            },\n            \"thresholdsStyle\": {\n              \"mode\": \"off\"\n            }\n          },\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          }\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 8,\n        \"w\": 12,\n        \"x\": 0,\n        \"y\": 92\n      },\n      \"id\": 16,\n      \"options\": {\n        \"legend\": {\n          \"calcs\": [],\n          \"displayMode\": \"list\",\n          \"placement\": \"bottom\",\n          \"showLegend\": true\n        },\n        \"tooltip\": {\n          \"mode\": \"single\",\n          \"sort\": \"none\"\n        }\n      },\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"irate(topos_storage_command_channel_capacity_total{ job=~\\\"$peer\\\"}[$__interval])\",\n          \"legendFormat\": \"{{job}}\",\n          \"range\": true,\n          \"refId\": \"A\"\n        }\n      ],\n      \"title\": \"Command channel at capacity\",\n      \"type\": \"timeseries\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${datasource}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"thresholds\"\n          },\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          }\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 8,\n        \"w\": 12,\n        \"x\": 12,\n        \"y\": 92\n      },\n      \"id\": 17,\n      \"options\": {\n        \"displayMode\": \"gradient\",\n        \"minVizHeight\": 10,\n        \"minVizWidth\": 0,\n        \"orientation\": \"auto\",\n        \"reduceOptions\": {\n          \"calcs\": [\n            \"lastNotNull\"\n          ],\n          \"fields\": \"\",\n          \"values\": false\n        },\n        \"showUnfilled\": true,\n        \"valueMode\": \"color\"\n      },\n      \"pluginVersion\": \"9.3.8\",\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"sum(increase(topos_storage_pending_certificate_existance_latency_bucket{ job=~\\\"$peer\\\"}[$__range])) by (le)\",\n          \"format\": \"heatmap\",\n          \"legendFormat\": \"{{le}}\",\n          \"range\": true,\n          \"refId\": \"A\"\n        }\n      ],\n      \"title\": \"Pending existance latency\",\n      \"type\": \"bargauge\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${datasource}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"thresholds\"\n          },\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\"\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          }\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 8,\n        \"w\": 12,\n        \"x\": 12,\n        \"y\": 100\n      },\n      \"id\": 18,\n      \"options\": {\n        \"displayMode\": \"gradient\",\n        \"minVizHeight\": 10,\n        \"minVizWidth\": 0,\n        \"orientation\": \"auto\",\n        \"reduceOptions\": {\n          \"calcs\": [\n            \"lastNotNull\"\n          ],\n          \"fields\": \"\",\n          \"values\": false\n        },\n        \"showUnfilled\": true,\n        \"valueMode\": \"color\"\n      },\n      \"pluginVersion\": \"9.3.8\",\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"sum(increase(topos_storage_adding_pending_certificate_latency_bucket{ job=~\\\"$peer\\\"}[$__range])) by (le)\",\n          \"format\": \"heatmap\",\n          \"legendFormat\": \"__auto\",\n          \"range\": true,\n          \"refId\": \"A\"\n        }\n      ],\n      \"title\": \"Adding to pending latency\",\n      \"type\": \"bargauge\"\n    },\n    {\n      \"collapsed\": false,\n      \"gridPos\": {\n        \"h\": 1,\n        \"w\": 24,\n        \"x\": 0,\n        \"y\": 108\n      },\n      \"id\": 11,\n      \"panels\": [],\n      \"title\": \"P2P layer\",\n      \"type\": \"row\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${datasource}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"palette-classic\"\n          },\n          \"custom\": {\n            \"axisCenteredZero\": false,\n            \"axisColorMode\": \"text\",\n            \"axisLabel\": \"\",\n            \"axisPlacement\": \"auto\",\n            \"barAlignment\": 0,\n            \"drawStyle\": \"line\",\n            \"fillOpacity\": 0,\n            \"gradientMode\": \"none\",\n            \"hideFrom\": {\n              \"legend\": false,\n              \"tooltip\": false,\n              \"viz\": false\n            },\n            \"lineInterpolation\": \"linear\",\n            \"lineWidth\": 1,\n            \"pointSize\": 5,\n            \"scaleDistribution\": {\n              \"type\": \"linear\"\n            },\n            \"showPoints\": \"auto\",\n            \"spanNulls\": false,\n            \"stacking\": {\n              \"group\": \"A\",\n              \"mode\": \"none\"\n            },\n            \"thresholdsStyle\": {\n              \"mode\": \"off\"\n            }\n          },\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\"\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          }\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 12,\n        \"w\": 12,\n        \"x\": 0,\n        \"y\": 109\n      },\n      \"id\": 3,\n      \"options\": {\n        \"legend\": {\n          \"calcs\": [],\n          \"displayMode\": \"list\",\n          \"placement\": \"bottom\",\n          \"showLegend\": true\n        },\n        \"tooltip\": {\n          \"mode\": \"single\",\n          \"sort\": \"none\"\n        }\n      },\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"irate(topos_p2p_event_stream_capacity_total{ job=~\\\"$peer\\\"}[$__interval])\",\n          \"legendFormat\": \"{{job}}\",\n          \"range\": true,\n          \"refId\": \"A\"\n        }\n      ],\n      \"title\": \"P2P Channel at capacity\",\n      \"type\": \"timeseries\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${datasource}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"palette-classic\"\n          },\n          \"custom\": {\n            \"axisCenteredZero\": false,\n            \"axisColorMode\": \"text\",\n            \"axisLabel\": \"\",\n            \"axisPlacement\": \"auto\",\n            \"barAlignment\": 0,\n            \"drawStyle\": \"line\",\n            \"fillOpacity\": 0,\n            \"gradientMode\": \"none\",\n            \"hideFrom\": {\n              \"legend\": false,\n              \"tooltip\": false,\n              \"viz\": false\n            },\n            \"lineInterpolation\": \"linear\",\n            \"lineWidth\": 1,\n            \"pointSize\": 5,\n            \"scaleDistribution\": {\n              \"type\": \"linear\"\n            },\n            \"showPoints\": \"auto\",\n            \"spanNulls\": false,\n            \"stacking\": {\n              \"group\": \"A\",\n              \"mode\": \"none\"\n            },\n            \"thresholdsStyle\": {\n              \"mode\": \"off\"\n            }\n          },\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\"\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          }\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 12,\n        \"w\": 12,\n        \"x\": 12,\n        \"y\": 109\n      },\n      \"id\": 2,\n      \"options\": {\n        \"legend\": {\n          \"calcs\": [],\n          \"displayMode\": \"list\",\n          \"placement\": \"bottom\",\n          \"showLegend\": true\n        },\n        \"tooltip\": {\n          \"mode\": \"single\",\n          \"sort\": \"none\"\n        }\n      },\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"irate(topos_p2p_echo_message_total{ job=~\\\"$peer\\\"}[$__interval])\",\n          \"legendFormat\": \"Echo {{job}}\",\n          \"range\": true,\n          \"refId\": \"A\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"irate(topos_p2p_ready_message_total{ job=~\\\"$peer\\\"}[$__interval])\",\n          \"hide\": false,\n          \"legendFormat\": \"Ready {{job}}\",\n          \"range\": true,\n          \"refId\": \"B\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"irate(topos_p2p_gossip_message_total{ job=~\\\"$peer\\\"}[$__interval])\",\n          \"hide\": false,\n          \"legendFormat\": \"Gossip {{job}}\",\n          \"range\": true,\n          \"refId\": \"C\"\n        }\n      ],\n      \"title\": \"Message received\",\n      \"type\": \"timeseries\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${datasource}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"palette-classic\"\n          },\n          \"custom\": {\n            \"axisCenteredZero\": false,\n            \"axisColorMode\": \"text\",\n            \"axisLabel\": \"\",\n            \"axisPlacement\": \"auto\",\n            \"barAlignment\": 0,\n            \"drawStyle\": \"line\",\n            \"fillOpacity\": 0,\n            \"gradientMode\": \"none\",\n            \"hideFrom\": {\n              \"legend\": false,\n              \"tooltip\": false,\n              \"viz\": false\n            },\n            \"lineInterpolation\": \"linear\",\n            \"lineWidth\": 1,\n            \"pointSize\": 5,\n            \"scaleDistribution\": {\n              \"type\": \"linear\"\n            },\n            \"showPoints\": \"auto\",\n            \"spanNulls\": false,\n            \"stacking\": {\n              \"group\": \"A\",\n              \"mode\": \"none\"\n            },\n            \"thresholdsStyle\": {\n              \"mode\": \"off\"\n            }\n          },\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\"\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          }\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 12,\n        \"w\": 12,\n        \"x\": 0,\n        \"y\": 121\n      },\n      \"id\": 1,\n      \"options\": {\n        \"legend\": {\n          \"calcs\": [],\n          \"displayMode\": \"list\",\n          \"placement\": \"bottom\",\n          \"showLegend\": true\n        },\n        \"tooltip\": {\n          \"mode\": \"single\",\n          \"sort\": \"none\"\n        }\n      },\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"sum(topos_p2p_gossipsub_message_sent_total{ job=~\\\"$peer\\\"})\",\n          \"legendFormat\": \"{{job}}\",\n          \"range\": true,\n          \"refId\": \"A\"\n        }\n      ],\n      \"title\": \"Gossip message sent\",\n      \"type\": \"timeseries\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${datasource}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"thresholds\"\n          },\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\"\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          }\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 12,\n        \"w\": 12,\n        \"x\": 12,\n        \"y\": 121\n      },\n      \"id\": 7,\n      \"options\": {\n        \"displayMode\": \"gradient\",\n        \"minVizHeight\": 10,\n        \"minVizWidth\": 0,\n        \"orientation\": \"auto\",\n        \"reduceOptions\": {\n          \"calcs\": [\n            \"lastNotNull\"\n          ],\n          \"fields\": \"\",\n          \"values\": false\n        },\n        \"showUnfilled\": true,\n        \"valueMode\": \"color\"\n      },\n      \"pluginVersion\": \"9.3.8\",\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"code\",\n          \"exemplar\": false,\n          \"expr\": \"sum(increase(topos_p2p_gossip_batch_size_bucket{ job=~\\\"$peer\\\"}[$__range])) by (le)\",\n          \"format\": \"heatmap\",\n          \"instant\": false,\n          \"legendFormat\": \"{{le}}\",\n          \"range\": true,\n          \"refId\": \"A\"\n        }\n      ],\n      \"title\": \"Gossip message sent\",\n      \"type\": \"bargauge\"\n    },\n    {\n      \"collapsed\": false,\n      \"gridPos\": {\n        \"h\": 1,\n        \"w\": 24,\n        \"x\": 0,\n        \"y\": 133\n      },\n      \"id\": 12,\n      \"panels\": [],\n      \"title\": \"Double Echo - external\",\n      \"type\": \"row\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${datasource}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"palette-classic\"\n          },\n          \"custom\": {\n            \"axisCenteredZero\": false,\n            \"axisColorMode\": \"text\",\n            \"axisLabel\": \"\",\n            \"axisPlacement\": \"auto\",\n            \"barAlignment\": 0,\n            \"drawStyle\": \"line\",\n            \"fillOpacity\": 0,\n            \"gradientMode\": \"none\",\n            \"hideFrom\": {\n              \"legend\": false,\n              \"tooltip\": false,\n              \"viz\": false\n            },\n            \"lineInterpolation\": \"linear\",\n            \"lineWidth\": 1,\n            \"pointSize\": 5,\n            \"scaleDistribution\": {\n              \"type\": \"linear\"\n            },\n            \"showPoints\": \"auto\",\n            \"spanNulls\": false,\n            \"stacking\": {\n              \"group\": \"A\",\n              \"mode\": \"none\"\n            },\n            \"thresholdsStyle\": {\n              \"mode\": \"off\"\n            }\n          },\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\"\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          }\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 8,\n        \"w\": 12,\n        \"x\": 0,\n        \"y\": 134\n      },\n      \"id\": 13,\n      \"options\": {\n        \"legend\": {\n          \"calcs\": [],\n          \"displayMode\": \"list\",\n          \"placement\": \"bottom\",\n          \"showLegend\": true\n        },\n        \"tooltip\": {\n          \"mode\": \"single\",\n          \"sort\": \"none\"\n        }\n      },\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"irate(topos_double_echo_command_channel_capacity_total{ job=~\\\"$peer\\\"}[$__interval])\",\n          \"legendFormat\": \"{{job}}\",\n          \"range\": true,\n          \"refId\": \"A\"\n        }\n      ],\n      \"title\": \"Command channel capacity\",\n      \"type\": \"timeseries\"\n    },\n    {\n      \"collapsed\": false,\n      \"gridPos\": {\n        \"h\": 1,\n        \"w\": 24,\n        \"x\": 0,\n        \"y\": 142\n      },\n      \"id\": 10,\n      \"panels\": [],\n      \"title\": \"Double Echo - internal\",\n      \"type\": \"row\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${datasource}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"palette-classic\"\n          },\n          \"custom\": {\n            \"axisCenteredZero\": false,\n            \"axisColorMode\": \"text\",\n            \"axisLabel\": \"\",\n            \"axisPlacement\": \"auto\",\n            \"barAlignment\": 0,\n            \"drawStyle\": \"line\",\n            \"fillOpacity\": 0,\n            \"gradientMode\": \"none\",\n            \"hideFrom\": {\n              \"legend\": false,\n              \"tooltip\": false,\n              \"viz\": false\n            },\n            \"lineInterpolation\": \"linear\",\n            \"lineWidth\": 1,\n            \"pointSize\": 5,\n            \"scaleDistribution\": {\n              \"type\": \"linear\"\n            },\n            \"showPoints\": \"auto\",\n            \"spanNulls\": false,\n            \"stacking\": {\n              \"group\": \"A\",\n              \"mode\": \"none\"\n            },\n            \"thresholdsStyle\": {\n              \"mode\": \"off\"\n            }\n          },\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\"\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          }\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 8,\n        \"w\": 12,\n        \"x\": 0,\n        \"y\": 143\n      },\n      \"id\": 4,\n      \"options\": {\n        \"legend\": {\n          \"calcs\": [],\n          \"displayMode\": \"list\",\n          \"placement\": \"bottom\",\n          \"showLegend\": true\n        },\n        \"tooltip\": {\n          \"mode\": \"single\",\n          \"sort\": \"none\"\n        }\n      },\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"topos_double_echo_current_buffer_size{ job=~\\\"$peer\\\"}\",\n          \"hide\": false,\n          \"legendFormat\": \"{{job}}\",\n          \"range\": true,\n          \"refId\": \"B\"\n        }\n      ],\n      \"title\": \"Double echo - Buffer size\",\n      \"type\": \"timeseries\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${datasource}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"palette-classic\"\n          },\n          \"custom\": {\n            \"axisCenteredZero\": false,\n            \"axisColorMode\": \"text\",\n            \"axisLabel\": \"\",\n            \"axisPlacement\": \"auto\",\n            \"barAlignment\": 0,\n            \"drawStyle\": \"line\",\n            \"fillOpacity\": 0,\n            \"gradientMode\": \"none\",\n            \"hideFrom\": {\n              \"legend\": false,\n              \"tooltip\": false,\n              \"viz\": false\n            },\n            \"lineInterpolation\": \"linear\",\n            \"lineWidth\": 1,\n            \"pointSize\": 5,\n            \"scaleDistribution\": {\n              \"type\": \"linear\"\n            },\n            \"showPoints\": \"auto\",\n            \"spanNulls\": false,\n            \"stacking\": {\n              \"group\": \"A\",\n              \"mode\": \"none\"\n            },\n            \"thresholdsStyle\": {\n              \"mode\": \"off\"\n            }\n          },\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\"\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          }\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 8,\n        \"w\": 12,\n        \"x\": 12,\n        \"y\": 143\n      },\n      \"id\": 5,\n      \"options\": {\n        \"legend\": {\n          \"calcs\": [],\n          \"displayMode\": \"list\",\n          \"placement\": \"bottom\",\n          \"showLegend\": true\n        },\n        \"tooltip\": {\n          \"mode\": \"single\",\n          \"sort\": \"none\"\n        }\n      },\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"irate(topos_double_echo_buffer_capacity_total{ job=~\\\"$peer\\\"}[$__interval])\",\n          \"legendFormat\": \"{{job}}\",\n          \"range\": true,\n          \"refId\": \"A\"\n        }\n      ],\n      \"title\": \"Double Echo Buffer at capacity\",\n      \"type\": \"timeseries\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${datasource}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"palette-classic\"\n          },\n          \"custom\": {\n            \"axisCenteredZero\": false,\n            \"axisColorMode\": \"text\",\n            \"axisLabel\": \"\",\n            \"axisPlacement\": \"auto\",\n            \"barAlignment\": 0,\n            \"drawStyle\": \"line\",\n            \"fillOpacity\": 0,\n            \"gradientMode\": \"none\",\n            \"hideFrom\": {\n              \"legend\": false,\n              \"tooltip\": false,\n              \"viz\": false\n            },\n            \"lineInterpolation\": \"linear\",\n            \"lineWidth\": 1,\n            \"pointSize\": 5,\n            \"scaleDistribution\": {\n              \"type\": \"linear\"\n            },\n            \"showPoints\": \"auto\",\n            \"spanNulls\": false,\n            \"stacking\": {\n              \"group\": \"A\",\n              \"mode\": \"none\"\n            },\n            \"thresholdsStyle\": {\n              \"mode\": \"off\"\n            }\n          },\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\"\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          }\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 8,\n        \"w\": 12,\n        \"x\": 0,\n        \"y\": 151\n      },\n      \"id\": 6,\n      \"options\": {\n        \"legend\": {\n          \"calcs\": [],\n          \"displayMode\": \"list\",\n          \"placement\": \"bottom\",\n          \"showLegend\": true\n        },\n        \"tooltip\": {\n          \"mode\": \"single\",\n          \"sort\": \"none\"\n        }\n      },\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"topos_double_echo_buffered_message_count{ job=~\\\"$peer\\\"}\",\n          \"hide\": false,\n          \"legendFormat\": \"{{job}}\",\n          \"range\": true,\n          \"refId\": \"B\"\n        }\n      ],\n      \"title\": \"Double echo - Buffered messages\",\n      \"type\": \"timeseries\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${datasource}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"palette-classic\"\n          },\n          \"custom\": {\n            \"axisCenteredZero\": false,\n            \"axisColorMode\": \"text\",\n            \"axisLabel\": \"\",\n            \"axisPlacement\": \"auto\",\n            \"barAlignment\": 0,\n            \"drawStyle\": \"line\",\n            \"fillOpacity\": 0,\n            \"gradientMode\": \"none\",\n            \"hideFrom\": {\n              \"legend\": false,\n              \"tooltip\": false,\n              \"viz\": false\n            },\n            \"lineInterpolation\": \"linear\",\n            \"lineWidth\": 1,\n            \"pointSize\": 5,\n            \"scaleDistribution\": {\n              \"type\": \"linear\"\n            },\n            \"showPoints\": \"auto\",\n            \"spanNulls\": false,\n            \"stacking\": {\n              \"group\": \"A\",\n              \"mode\": \"none\"\n            },\n            \"thresholdsStyle\": {\n              \"mode\": \"off\"\n            }\n          },\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\"\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          }\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 8,\n        \"w\": 12,\n        \"x\": 12,\n        \"y\": 151\n      },\n      \"id\": 8,\n      \"options\": {\n        \"legend\": {\n          \"calcs\": [],\n          \"displayMode\": \"list\",\n          \"placement\": \"bottom\",\n          \"showLegend\": true\n        },\n        \"tooltip\": {\n          \"mode\": \"single\",\n          \"sort\": \"none\"\n        }\n      },\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${datasource}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"topos_double_echo_current_buffer_size{ job=~\\\"$peer\\\"}\",\n          \"hide\": false,\n          \"legendFormat\": \"{{job}}\",\n          \"range\": true,\n          \"refId\": \"B\"\n        }\n      ],\n      \"title\": \"Double echo - Buffer size\",\n      \"type\": \"timeseries\"\n    }\n  ],\n  \"refresh\": \"5s\",\n  \"schemaVersion\": 37,\n  \"style\": \"dark\",\n  \"tags\": [],\n  \"templating\": {\n    \"list\": [\n      {\n        \"current\": {\n          \"selected\": false,\n          \"text\": \"All\",\n          \"value\": \"$__all\"\n        },\n        \"datasource\": {\n          \"type\": \"prometheus\",\n          \"uid\": \"${datasource}\"\n        },\n        \"definition\": \"label_values(topos_certificate_processing_total,job)\",\n        \"hide\": 0,\n        \"includeAll\": true,\n        \"multi\": true,\n        \"name\": \"peer\",\n        \"options\": [],\n        \"query\": {\n          \"query\": \"label_values(topos_certificate_processing_total,job)\",\n          \"refId\": \"PrometheusVariableQueryEditor-VariableQuery\"\n        },\n        \"refresh\": 1,\n        \"regex\": \"\",\n        \"skipUrlSync\": false,\n        \"sort\": 0,\n        \"type\": \"query\"\n      },\n      {\n        \"current\": {\n          \"selected\": true,\n          \"text\": \"Prometheus\",\n          \"value\": \"Prometheus\"\n        },\n        \"hide\": 0,\n        \"includeAll\": false,\n        \"label\": \"Datasource\",\n        \"multi\": false,\n        \"name\": \"datasource\",\n        \"options\": [],\n        \"query\": \"prometheus\",\n        \"queryValue\": \"\",\n        \"refresh\": 1,\n        \"regex\": \"\",\n        \"skipUrlSync\": false,\n        \"type\": \"datasource\"\n      }\n    ]\n  },\n  \"time\": {\n    \"from\": \"now-1h\",\n    \"to\": \"now\"\n  },\n  \"timepicker\": {},\n  \"timezone\": \"\",\n  \"title\": \"Benchmarks - Gossiped Certificate\",\n  \"uid\": \"f4d3b025-4b36-454a-a724-818f85806b6e\",\n  \"version\": 9,\n  \"weekStart\": \"\"\n}\n"
  },
  {
    "path": "rust-toolchain",
    "content": "[toolchain]\nchannel = \"1.74.0\"\nprofile = \"minimal\"\n"
  },
  {
    "path": "rustfmt.toml",
    "content": "edition = \"2021\"\nuse_field_init_shorthand = true\nreorder_imports = true\nformat_strings = true\n\n"
  },
  {
    "path": "scripts/check_readme.sh",
    "content": "#!/bin/bash\n\nset -e\n\nparam=$1\n\nfunction check {\n  if [ \"$param\" == \"generate\" ]; then\n    cargo readme -r $1 > $1/README.md\n  else\n    diff <(cargo readme  -r $1) $1/README.md || (echo 1>&2 \"Please update the $1/README with \"'`'\"cargo readme -r $1 > $1/README.md\"'`' && exit 1 )\n  fi\n}\n\ncheck crates/topos-tce-broadcast\ncheck crates/topos-tce-storage\n"
  }
]