Repository: topos-protocol/topos Branch: main Commit: 3b5815932db0 Files: 355 Total size: 1.3 MB Directory structure: gitextract_hgeg_lhv/ ├── .cargo/ │ ├── audit.toml │ └── config.toml ├── .config/ │ └── nextest.toml ├── .dockerignore ├── .github/ │ ├── CODEOWNERS │ ├── ISSUE_TEMPLATE/ │ │ └── bug_report.md │ ├── actions/ │ │ └── install-rust/ │ │ └── action.yml │ └── workflows/ │ ├── coverage.yml │ ├── doc.yml │ ├── docker_build_push.yml │ ├── docker_utils.yml │ ├── pr-checking.yml │ ├── quality.yml │ ├── release.yml │ ├── sequencer_topos_core_contract_test.yml │ └── test.yml ├── .gitignore ├── CHANGELOG.md ├── Cargo.toml ├── Cross.toml ├── Dockerfile ├── LICENSE ├── README.md ├── cliff.toml ├── crates/ │ ├── topos/ │ │ ├── Cargo.toml │ │ ├── build.rs │ │ ├── src/ │ │ │ ├── components/ │ │ │ │ ├── mod.rs │ │ │ │ ├── node/ │ │ │ │ │ ├── commands/ │ │ │ │ │ │ ├── init.rs │ │ │ │ │ │ ├── status.rs │ │ │ │ │ │ └── up.rs │ │ │ │ │ ├── commands.rs │ │ │ │ │ ├── mod.rs │ │ │ │ │ ├── services/ │ │ │ │ │ │ └── status.rs │ │ │ │ │ └── services.rs │ │ │ │ ├── regtest/ │ │ │ │ │ ├── commands/ │ │ │ │ │ │ └── spam.rs │ │ │ │ │ ├── commands.rs │ │ │ │ │ └── mod.rs │ │ │ │ └── setup/ │ │ │ │ ├── commands/ │ │ │ │ │ └── subnet.rs │ │ │ │ ├── commands.rs │ │ │ │ └── mod.rs │ │ │ ├── lib.rs │ │ │ ├── main.rs │ │ │ ├── options/ │ │ │ │ └── input_format.rs │ │ │ └── options.rs │ │ └── tests/ │ │ ├── cert_delivery.rs │ │ ├── config.rs │ │ ├── node.rs │ │ ├── regtest.rs │ │ ├── setup.rs │ │ ├── snapshots/ │ │ │ ├── node__help_display.snap │ │ │ ├── push_certificate__help_display.snap │ │ │ └── regtest__regtest_spam_help_display.snap │ │ └── utils.rs │ ├── topos-certificate-spammer/ │ │ ├── Cargo.toml │ │ ├── README.md │ │ ├── config/ │ │ │ └── target_nodes_example.json │ │ └── src/ │ │ ├── config.rs │ │ ├── error.rs │ │ ├── lib.rs │ │ └── utils.rs │ ├── topos-clock/ │ │ ├── Cargo.toml │ │ └── src/ │ │ ├── lib.rs │ │ └── time.rs │ ├── topos-config/ │ │ ├── Cargo.toml │ │ ├── assets/ │ │ │ └── genesis-example.json │ │ └── src/ │ │ ├── base.rs │ │ ├── edge/ │ │ │ └── command.rs │ │ ├── edge.rs │ │ ├── genesis/ │ │ │ ├── mod.rs │ │ │ └── tests.rs │ │ ├── lib.rs │ │ ├── node.rs │ │ ├── sequencer.rs │ │ ├── tce/ │ │ │ ├── broadcast.rs │ │ │ ├── p2p.rs │ │ │ └── synchronization.rs │ │ └── tce.rs │ ├── topos-core/ │ │ ├── .rustfmt.toml │ │ ├── Cargo.toml │ │ ├── build.rs │ │ ├── proto/ │ │ │ ├── buf.yaml │ │ │ └── topos/ │ │ │ ├── p2p/ │ │ │ │ └── info.proto │ │ │ ├── shared/ │ │ │ │ └── v1/ │ │ │ │ ├── certificate.proto │ │ │ │ ├── checkpoints.proto │ │ │ │ ├── frost.proto │ │ │ │ ├── signature.proto │ │ │ │ ├── stark_proof.proto │ │ │ │ ├── subnet.proto │ │ │ │ ├── uuid.proto │ │ │ │ └── validator_id.proto │ │ │ ├── tce/ │ │ │ │ └── v1/ │ │ │ │ ├── api.proto │ │ │ │ ├── console.proto │ │ │ │ ├── double_echo.proto │ │ │ │ ├── gossipsub.proto │ │ │ │ └── synchronization.proto │ │ │ └── uci/ │ │ │ └── v1/ │ │ │ └── certification.proto │ │ ├── src/ │ │ │ ├── api/ │ │ │ │ ├── graphql/ │ │ │ │ │ ├── certificate.rs │ │ │ │ │ ├── checkpoint.rs │ │ │ │ │ ├── errors.rs │ │ │ │ │ ├── filter.rs │ │ │ │ │ ├── mod.rs │ │ │ │ │ ├── query.rs │ │ │ │ │ └── subnet.rs │ │ │ │ ├── grpc/ │ │ │ │ │ ├── checkpoints/ │ │ │ │ │ │ ├── errors.rs │ │ │ │ │ │ ├── mod.rs │ │ │ │ │ │ └── positions.rs │ │ │ │ │ ├── conversions/ │ │ │ │ │ │ ├── shared/ │ │ │ │ │ │ │ └── v1/ │ │ │ │ │ │ │ ├── certificate.rs │ │ │ │ │ │ │ ├── signature.rs │ │ │ │ │ │ │ ├── subnet.rs │ │ │ │ │ │ │ ├── uuid.rs │ │ │ │ │ │ │ └── validator_id.rs │ │ │ │ │ │ ├── tce/ │ │ │ │ │ │ │ └── v1/ │ │ │ │ │ │ │ ├── api.rs │ │ │ │ │ │ │ ├── mod.rs │ │ │ │ │ │ │ └── synchronization.rs │ │ │ │ │ │ └── uci/ │ │ │ │ │ │ └── v1/ │ │ │ │ │ │ └── uci.rs │ │ │ │ │ ├── generated/ │ │ │ │ │ │ ├── topos.p2p.rs │ │ │ │ │ │ ├── topos.shared.v1.rs │ │ │ │ │ │ ├── topos.tce.v1.rs │ │ │ │ │ │ └── topos.uci.v1.rs │ │ │ │ │ └── mod.rs │ │ │ │ └── mod.rs │ │ │ ├── errors.rs │ │ │ ├── lib.rs │ │ │ ├── test.rs │ │ │ ├── types/ │ │ │ │ └── stream.rs │ │ │ ├── types.rs │ │ │ └── uci/ │ │ │ ├── certificate.rs │ │ │ ├── certificate_id.rs │ │ │ ├── mod.rs │ │ │ └── subnet_id.rs │ │ └── tests/ │ │ └── tce_layer.rs │ ├── topos-crypto/ │ │ ├── Cargo.toml │ │ ├── src/ │ │ │ ├── hash.rs │ │ │ ├── keys.rs │ │ │ ├── keystore.rs │ │ │ ├── lib.rs │ │ │ ├── messages.rs │ │ │ ├── signatures.rs │ │ │ └── validator_id.rs │ │ └── tests/ │ │ └── messages.rs │ ├── topos-metrics/ │ │ ├── Cargo.toml │ │ └── src/ │ │ ├── api.rs │ │ ├── double_echo.rs │ │ ├── lib.rs │ │ ├── p2p.rs │ │ ├── storage.rs │ │ └── tests.rs │ ├── topos-node/ │ │ ├── Cargo.toml │ │ ├── build.rs │ │ └── src/ │ │ ├── lib.rs │ │ ├── main.rs │ │ └── process.rs │ ├── topos-p2p/ │ │ ├── Cargo.toml │ │ ├── src/ │ │ │ ├── behaviour/ │ │ │ │ ├── discovery.rs │ │ │ │ ├── gossip.rs │ │ │ │ ├── grpc/ │ │ │ │ │ ├── connection.rs │ │ │ │ │ ├── error.rs │ │ │ │ │ ├── event.rs │ │ │ │ │ ├── handler/ │ │ │ │ │ │ ├── event.rs │ │ │ │ │ │ └── protocol.rs │ │ │ │ │ ├── handler.rs │ │ │ │ │ ├── proxy.rs │ │ │ │ │ └── stream.rs │ │ │ │ ├── grpc.rs │ │ │ │ ├── peer_info.rs │ │ │ │ └── topos.rs │ │ │ ├── behaviour.rs │ │ │ ├── client.rs │ │ │ ├── command.rs │ │ │ ├── config.rs │ │ │ ├── constants.rs │ │ │ ├── error.rs │ │ │ ├── event.rs │ │ │ ├── lib.rs │ │ │ ├── network.rs │ │ │ ├── runtime/ │ │ │ │ ├── handle_command.rs │ │ │ │ ├── handle_event/ │ │ │ │ │ ├── discovery.rs │ │ │ │ │ ├── gossipsub.rs │ │ │ │ │ ├── grpc.rs │ │ │ │ │ └── peer_info.rs │ │ │ │ ├── handle_event.rs │ │ │ │ └── mod.rs │ │ │ └── tests/ │ │ │ ├── behaviour/ │ │ │ │ ├── grpc.rs │ │ │ │ └── mod.rs │ │ │ ├── bootstrap.rs │ │ │ ├── command/ │ │ │ │ ├── mod.rs │ │ │ │ └── random_peer.rs │ │ │ ├── mod.rs │ │ │ └── support/ │ │ │ ├── macros.rs │ │ │ └── mod.rs │ │ └── tests/ │ │ └── support/ │ │ └── network.rs │ ├── topos-sequencer/ │ │ ├── Cargo.toml │ │ └── src/ │ │ ├── app_context.rs │ │ └── lib.rs │ ├── topos-sequencer-subnet-client/ │ │ ├── Cargo.toml │ │ └── src/ │ │ ├── lib.rs │ │ └── subnet_contract.rs │ ├── topos-sequencer-subnet-runtime/ │ │ ├── Cargo.toml │ │ ├── src/ │ │ │ ├── certification.rs │ │ │ ├── lib.rs │ │ │ └── proxy.rs │ │ └── tests/ │ │ ├── common/ │ │ │ ├── abi.rs │ │ │ ├── mod.rs │ │ │ └── subnet_test_data.rs │ │ └── subnet_contract.rs │ ├── topos-tce/ │ │ ├── Cargo.toml │ │ └── src/ │ │ ├── app_context/ │ │ │ ├── api.rs │ │ │ ├── network.rs │ │ │ └── protocol.rs │ │ ├── app_context.rs │ │ ├── events.rs │ │ ├── lib.rs │ │ └── tests/ │ │ ├── api.rs │ │ ├── mod.rs │ │ └── network.rs │ ├── topos-tce-api/ │ │ ├── Cargo.toml │ │ ├── src/ │ │ │ ├── graphql/ │ │ │ │ ├── builder.rs │ │ │ │ ├── filter.rs │ │ │ │ ├── mod.rs │ │ │ │ ├── query.rs │ │ │ │ ├── routes.rs │ │ │ │ └── tests.rs │ │ │ ├── grpc/ │ │ │ │ ├── builder.rs │ │ │ │ ├── console.rs │ │ │ │ ├── messaging.rs │ │ │ │ ├── mod.rs │ │ │ │ └── tests.rs │ │ │ ├── lib.rs │ │ │ ├── metrics/ │ │ │ │ ├── builder.rs │ │ │ │ └── mod.rs │ │ │ ├── runtime/ │ │ │ │ ├── builder.rs │ │ │ │ ├── client.rs │ │ │ │ ├── commands.rs │ │ │ │ ├── error.rs │ │ │ │ ├── events.rs │ │ │ │ ├── mod.rs │ │ │ │ ├── sync_task.rs │ │ │ │ └── tests.rs │ │ │ ├── stream/ │ │ │ │ ├── commands.rs │ │ │ │ ├── errors.rs │ │ │ │ ├── mod.rs │ │ │ │ ├── tests/ │ │ │ │ │ └── utils.rs │ │ │ │ └── tests.rs │ │ │ └── tests.rs │ │ └── tests/ │ │ ├── grpc/ │ │ │ ├── certificate_precedence.rs │ │ │ └── mod.rs │ │ └── runtime.rs │ ├── topos-tce-broadcast/ │ │ ├── Cargo.toml │ │ ├── README.md │ │ ├── benches/ │ │ │ ├── double_echo.rs │ │ │ └── task_manager.rs │ │ └── src/ │ │ ├── constant.rs │ │ ├── double_echo/ │ │ │ ├── broadcast_state/ │ │ │ │ └── status.rs │ │ │ ├── broadcast_state.rs │ │ │ └── mod.rs │ │ ├── event.rs │ │ ├── lib.rs │ │ ├── sampler/ │ │ │ └── mod.rs │ │ ├── task_manager/ │ │ │ ├── mod.rs │ │ │ └── task.rs │ │ └── tests/ │ │ ├── mod.rs │ │ ├── task.rs │ │ └── task_manager.rs │ ├── topos-tce-gatekeeper/ │ │ ├── Cargo.toml │ │ └── src/ │ │ ├── builder.rs │ │ ├── client.rs │ │ ├── lib.rs │ │ └── tests.rs │ ├── topos-tce-proxy/ │ │ ├── Cargo.toml │ │ ├── src/ │ │ │ ├── client.rs │ │ │ ├── lib.rs │ │ │ └── worker.rs │ │ └── tests/ │ │ └── tce_tests.rs │ ├── topos-tce-storage/ │ │ ├── Cargo.toml │ │ ├── README.md │ │ └── src/ │ │ ├── client.rs │ │ ├── constant.rs │ │ ├── epoch/ │ │ │ ├── mod.rs │ │ │ └── tables.rs │ │ ├── errors.rs │ │ ├── fullnode/ │ │ │ ├── locking.rs │ │ │ └── mod.rs │ │ ├── index/ │ │ │ └── mod.rs │ │ ├── lib.rs │ │ ├── rocks/ │ │ │ ├── constants.rs │ │ │ ├── db.rs │ │ │ ├── db_column.rs │ │ │ ├── iterator.rs │ │ │ ├── map.rs │ │ │ └── types.rs │ │ ├── rocks.rs │ │ ├── store.rs │ │ ├── tests/ │ │ │ ├── checkpoints.rs │ │ │ ├── db_columns.rs │ │ │ ├── mod.rs │ │ │ ├── pending_certificates.rs │ │ │ ├── position.rs │ │ │ ├── rocks.rs │ │ │ └── support/ │ │ │ ├── columns.rs │ │ │ ├── folder.rs │ │ │ └── mod.rs │ │ ├── types.rs │ │ └── validator/ │ │ ├── mod.rs │ │ └── tables.rs │ ├── topos-tce-synchronizer/ │ │ ├── Cargo.toml │ │ └── src/ │ │ ├── builder.rs │ │ ├── checkpoints_collector/ │ │ │ ├── error.rs │ │ │ ├── mod.rs │ │ │ ├── tests/ │ │ │ │ └── integration.rs │ │ │ └── tests.rs │ │ └── lib.rs │ ├── topos-telemetry/ │ │ ├── Cargo.toml │ │ └── src/ │ │ ├── lib.rs │ │ └── tracing.rs │ ├── topos-test-sdk/ │ │ ├── Cargo.toml │ │ ├── build.rs │ │ ├── proc_macro_sdk/ │ │ │ ├── Cargo.toml │ │ │ └── src/ │ │ │ └── lib.rs │ │ ├── proto/ │ │ │ └── behaviour/ │ │ │ ├── helloworld.proto │ │ │ └── noop.proto │ │ └── src/ │ │ ├── certificates/ │ │ │ └── mod.rs │ │ ├── crypto.rs │ │ ├── grpc/ │ │ │ ├── behaviour/ │ │ │ │ ├── helloworld.rs │ │ │ │ └── noop.rs │ │ │ └── mod.rs │ │ ├── lib.rs │ │ ├── networking/ │ │ │ └── mod.rs │ │ ├── p2p/ │ │ │ └── mod.rs │ │ ├── sequencer/ │ │ │ └── mod.rs │ │ ├── storage/ │ │ │ └── mod.rs │ │ └── tce/ │ │ ├── gatekeeper.rs │ │ ├── mod.rs │ │ ├── p2p.rs │ │ ├── protocol.rs │ │ ├── public_api.rs │ │ └── synchronizer.rs │ └── topos-wallet/ │ ├── Cargo.toml │ └── src/ │ ├── error.rs │ └── lib.rs ├── docs/ │ ├── .gitignore │ ├── README.md │ ├── architecture/ │ │ ├── certificates_collector.md │ │ ├── checkpoints_collector.md │ │ ├── gatekeeper.md │ │ └── synchronizer.md │ ├── book.toml │ └── src/ │ ├── README.md │ ├── SUMMARY.md │ ├── glossary.md │ ├── test.md │ └── topos-node.md ├── grafana/ │ └── benchmarks-dashboard.json ├── rust-toolchain ├── rustfmt.toml └── scripts/ └── check_readme.sh ================================================ FILE CONTENTS ================================================ ================================================ FILE: .cargo/audit.toml ================================================ [advisories] ignore = [] ================================================ FILE: .cargo/config.toml ================================================ [alias] xclippy = [ "clippy", "--workspace", "--tests", "--all-targets", "--all-features", "--", "-Wclippy::all", "-Wclippy::disallowed-methods", ] ================================================ FILE: .config/nextest.toml ================================================ [profile.default] slow-timeout = { period = "60s", terminate-after = 1 } leak-timeout = "10s" [test-groups] serial-integration = { max-threads = 1 } [[profile.default.overrides]] filter = 'test(serial_integration::)' test-group = 'serial-integration' ================================================ FILE: .dockerignore ================================================ # Ignore everything ** # Allow Rust source code !src !crates !tests !Cargo.* !tools/init.sh !tools/node_config !tools/node_config/**/* !tools/liveness.sh !tools/config/nextest.toml !.git !LICENSE ================================================ FILE: .github/CODEOWNERS ================================================ * @topos-protocol/protocol # Crypto Internals /crates/topos-crypto/ @topos-protocol/protocol @topos-protocol/crypto @Nashtare ================================================ FILE: .github/ISSUE_TEMPLATE/bug_report.md ================================================ --- name: Bug Report about: Create a report to help us solve bugs! labels: bug --- ## Summary ## Steps to Reproduce ## Expected behavior ## Screenshots ## Version - OS Name: [e.g. Ubuntu] - OS Version [e.g. 20.04] - CLI Version (output of `topos --version`) ## Additional context ================================================ FILE: .github/actions/install-rust/action.yml ================================================ name: 'Install Rust toolchain' description: 'Install a rust toolchain and cache the crates index' inputs: toolchain: description: 'Default toolchain to install' required: false default: 'stable' target: description: 'Default target to add' required: false default: 'x86_64-unknown-linux-gnu' msrv: description: 'Enable rust-toolchain version for msrv' required: false type: boolean default: false lockfiles: description: 'Path glob for Cargo.lock files to use as cache keys' required: false default: '**/Cargo.lock' components: description: 'Components to install' required: false tools: description: 'Tools to install' required: false default: nextest,protoc AWS_ACCESS_KEY_ID: required: true AWS_SECRET_ACCESS_KEY: required: true with_cache: required: false type: boolean default: true runs: using: composite steps: - name: Environment shell: bash run: | rustup target add ${{ inputs.target }} if ${{ inputs.msrv }}; then rustup override unset rustup show else rustup set profile minimal rustup update "${{ inputs.toolchain }}" --no-self-update rustup override set "${{ inputs.toolchain }}" fi if [ ! -z "${{ inputs.components }}" ]; then rustup component add $(echo ${{ inputs.components }}|sed 's/,/ /') fi echo CARGO_TERM_COLOR="always" >> "$GITHUB_ENV" # Disable incremental compilation. # # Incremental compilation is useful as part of an edit-build-test-edit cycle, # as it lets the compiler avoid recompiling code that hasn't changed. However, # on CI, we're not making small edits; we're almost always building the entire # project from scratch. Thus, incremental compilation on CI actually # introduces *additional* overhead to support making future builds # faster...but no future builds will ever occur in any given CI environment. # # See https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow # for details. echo CARGO_INCREMENTAL=0 >> "$GITHUB_ENV" # Allow more retries for network requests in cargo (downloading crates) and # rustup (installing toolchains). This should help to reduce flaky CI failures # from transient network timeouts or other issues. cat >> "$GITHUB_ENV" <> "$GITHUB_ENV" echo RUSTFLAGS="-D warnings" >> "$GITHUB_ENV" echo RUSTDOCFLAGS="-D warnings" >> "$GITHUB_ENV" if ${{ inputs.with_cache }}; then cat >> "$GITHUB_ENV" <> "$GITHUB_ENV" <> "$GITHUB_ENV" <" > target/doc/index.html cp -r target/doc/* ./host-docs - name: Upload documentation uses: actions/upload-pages-artifact@v2.0.0 with: path: "host-docs/" deploy: name: Deploy documentation needs: build permissions: pages: write id-token: write environment: name: github-pages url: ${{ steps.deployment.outputs.page_url }} runs-on: ubuntu-latest steps: - name: Deploy to GitHub Pages id: deployment uses: actions/deploy-pages@v2 ================================================ FILE: .github/workflows/docker_build_push.yml ================================================ name: Docker build and push on: push: branches: [main, debug/**] pull_request: types: [opened, synchronize, reopened, ready_for_review] release: types: [created] concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true jobs: docker: uses: ./.github/workflows/docker_utils.yml secrets: inherit integration-erc20-e2e: runs-on: ubuntu-latest needs: docker if: ${{ github.event_name == 'pull_request' }} steps: - name: Set environment run: | # It's fine to assume a single tag. Our tagging strategy follows a 1:1 mapping of image:tag tags=${{ needs.docker.outputs.tags }} echo "docker_tag=${tags#*:}" >> $GITHUB_ENV shell: bash - uses: convictional/trigger-workflow-and-wait@v1.6.1 with: owner: topos-protocol repo: e2e-tests github_token: ${{ secrets.ROBOT_PAT_TRIGGER_E2E_WORKFLOWS }} workflow_file_name: topos:integration-tests.yml ref: main wait_interval: 60 client_payload: '{ "topos-docker-tag": "${{ env.docker_tag }}" }' frontend-erc20-e2e: runs-on: ubuntu-latest needs: docker if: ${{ github.event_name == 'pull_request' }} steps: - name: Set environment run: | # It's fine to assume a single tag. Our tagging strategy follows a 1:1 mapping of image:tag tags=${{ needs.docker.outputs.tags }} echo "docker_tag=${tags#*:}" >> $GITHUB_ENV shell: bash - uses: convictional/trigger-workflow-and-wait@v1.6.1 with: owner: topos-protocol repo: e2e-tests github_token: ${{ secrets.ROBOT_PAT_TRIGGER_E2E_WORKFLOWS }} workflow_file_name: frontend:erc20-messaging.yml ref: main wait_interval: 60 client_payload: '{ "topos-docker-tag": "${{ env.docker_tag }}" }' ================================================ FILE: .github/workflows/docker_utils.yml ================================================ name: template - docker env: REGISTRY: ghcr.io IMAGE_NAME: ${{ github.repository }} AWS_SHARED_CREDENTIALS_FILE: "${{ github.workspace }}/.aws/credentials" on: workflow_call: inputs: # Docker target (test | fmt | lint | topos | etc) target: required: false type: string default: topos # Rust toolchain version (stable | nightly) toolchain_version: required: false type: string default: stable outputs: tags: description: "Docker tags" value: ${{ jobs.docker.outputs.tags }} jobs: docker: name: Build and push docker image to GitHub Container Registry runs-on: ubuntu-latest-16-core outputs: tags: ${{ steps.meta.outputs.tags }} steps: - name: Checkout uses: actions/checkout@v4 - name: Set up QEMU uses: docker/setup-qemu-action@v2 - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4 with: short-length: 7 - name: Login to GitHub Container Registry uses: docker/login-action@v2 with: registry: ${{ env.REGISTRY }} username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Configure AWS credentials for cicd-devnet-1 account uses: aws-actions/configure-aws-credentials@v2 with: aws-access-key-id: ${{ secrets.ROBOT_AWS_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.ROBOT_AWS_SECRET_ACCESS_KEY }} role-to-assume: arn:aws:iam::367397670706:role/CacheBucketAccessRole role-session-name: RobotToposware-session aws-region: us-east-1 role-skip-session-tagging: true role-duration-seconds: 3600 - name: Add profile credentials to .aws/credentials run: | aws configure set aws_access_key_id ${{ env.AWS_ACCESS_KEY_ID }} --profile default aws configure set aws_secret_access_key ${{ env.AWS_SECRET_ACCESS_KEY }} --profile default aws configure set aws_session_token ${{ env.AWS_SESSION_TOKEN }} --profile default - name: Extract metadata (tags, labels) for Docker id: meta uses: docker/metadata-action@v4 with: images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} tags: | type=ref,event=branch type=ref,event=pr type=semver,pattern={{version}} type=semver,pattern={{major}}.{{minor}} - name: Push to GitHub Container Registry uses: docker/build-push-action@v3 with: context: . platforms: linux/amd64,linux/arm64 # push only images targeting topos (e.g.: exclude test, lint, etc.) push: ${{ inputs.target == 'topos' }} target: ${{ inputs.target }} tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} secret-files: | "aws=${{ github.workspace }}/.aws/credentials" build-args: | RUSTUP_TOOLCHAIN=${{ inputs.toolchain_version }} SCCACHE_S3_KEY_PREFIX=${{ inputs.target }} SCCACHE_BUCKET=cicd-devnet-1-sccache SCCACHE_REGION=us-east-1 RUSTC_WRAPPER=/usr/local/cargo/bin/sccache ================================================ FILE: .github/workflows/pr-checking.yml ================================================ name: Checking PR semantic on: pull_request_target: types: - opened - edited - synchronize jobs: title: name: Validate PR title runs-on: ubuntu-latest steps: - uses: amannn/action-semantic-pull-request@v5 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: ignoreLabels: | release commits: name: Validate PR commits runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 with: fetch-depth: 0 - uses: wagoid/commitlint-github-action@v5 ================================================ FILE: .github/workflows/quality.yml ================================================ name: Quality on: push: branches: - main pull_request: types: [opened, synchronize, reopened, ready_for_review] workflow_dispatch: jobs: readme: name: Readme - checking readme compatibility runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v4 - uses: ./.github/actions/install-rust with: with_cache: false tools: cargo-readme AWS_ACCESS_KEY_ID: ${{ secrets.ROBOT_AWS_ACCESS_KEY_ID}} AWS_SECRET_ACCESS_KEY: ${{ secrets.ROBOT_AWS_SECRET_ACCESS_KEY}} - name: checking readme run: ./scripts/check_readme.sh audit: name: Audit - crate security vulnerabilities runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v4 - uses: ./.github/actions/install-rust with: with_cache: false tools: cargo-audit AWS_ACCESS_KEY_ID: ${{ secrets.ROBOT_AWS_ACCESS_KEY_ID}} AWS_SECRET_ACCESS_KEY: ${{ secrets.ROBOT_AWS_SECRET_ACCESS_KEY}} - name: Cargo audit run: cargo audit lint: name: Lint - Clippy runs-on: ubuntu-latest-16-core env: CARGO_TERM_COLOR: always RUSTFLAGS: -Dwarnings RUST_BACKTRACE: 1 steps: - name: Checkout topos repo uses: actions/checkout@v4 - name: Install Rust uses: ./.github/actions/install-rust with: components: clippy AWS_ACCESS_KEY_ID: ${{ secrets.ROBOT_AWS_ACCESS_KEY_ID}} AWS_SECRET_ACCESS_KEY: ${{ secrets.ROBOT_AWS_SECRET_ACCESS_KEY}} - name: Install Protoc uses: arduino/setup-protoc@v1 - name: Checkout topos-smart-contracts repo uses: actions/checkout@v4 with: repository: topos-protocol/topos-smart-contracts ref: ${{ env.CONTRACTS_REF }} path: contracts - name: Set up NodeJS uses: actions/setup-node@v3 with: node-version: 16 cache: "npm" cache-dependency-path: contracts/package-lock.json - name: Install dependencies working-directory: contracts run: npm ci - name: Build contracts working-directory: contracts run: npm run build - name: Move contract artifacts run: mv contracts/artifacts ./ - name: Cargo xclippy run: cargo xclippy fmt: name: Check - Format runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v4 - uses: ./.github/actions/install-rust with: with_cache: false toolchain: nightly components: rustfmt AWS_ACCESS_KEY_ID: ${{ secrets.ROBOT_AWS_ACCESS_KEY_ID}} AWS_SECRET_ACCESS_KEY: ${{ secrets.ROBOT_AWS_SECRET_ACCESS_KEY}} - name: Cargo fmt run: cargo +nightly fmt --all -- --check msrv: name: Check - MSRV runs-on: ubuntu-latest-16-core steps: - name: Checkout uses: actions/checkout@v4 - uses: ./.github/actions/install-rust with: msrv: true AWS_ACCESS_KEY_ID: ${{ secrets.ROBOT_AWS_ACCESS_KEY_ID}} AWS_SECRET_ACCESS_KEY: ${{ secrets.ROBOT_AWS_SECRET_ACCESS_KEY}} - name: Cargo check run: cargo check --workspace --all-features --locked ================================================ FILE: .github/workflows/release.yml ================================================ name: Release on: release: types: [published] env: CARGO_TERM_COLOR: always CARGO_INCREMENTAL: 0 CARGO_NET_RETRY: 10 RUSTUP_MAX_RETRIES: 10 RUST_BACKTRACE: short CARGO: cargo CROSS_VERSION: v0.2.5 jobs: release-build: timeout-minutes: 30 runs-on: ${{ matrix.os }} strategy: matrix: include: - build: stable-x86 rust: stable os: ubuntu-latest-16-core target: x86_64-unknown-linux-gnu - build: linux rust: stable os: ubuntu-latest-16-core target: x86_64-unknown-linux-musl - build: stable-aarch64 rust: stable os: ubuntu-latest-16-core target: aarch64-unknown-linux-gnu steps: - uses: actions/checkout@v4 - uses: ./.github/actions/install-rust with: toolchain: ${{ matrix.rust }} target: ${{ matrix.target }} with_cache: false - name: Use Cross if: matrix.os == 'ubuntu-latest-16-core' && matrix.target != '' shell: bash run: | dir="$RUNNER_TEMP/cross-download" mkdir "$dir" echo "$dir" >> $GITHUB_PATH cd "$dir" curl -LO "https://github.com/cross-rs/cross/releases/download/$CROSS_VERSION/cross-x86_64-unknown-linux-musl.tar.gz" tar xf cross-x86_64-unknown-linux-musl.tar.gz echo "CARGO=cross" >> $GITHUB_ENV - name: Set target variables shell: bash run: | echo "TARGET_FLAGS=--target ${{ matrix.target }}" >> $GITHUB_ENV echo "TARGET_DIR=./target/${{ matrix.target }}" >> $GITHUB_ENV - name: Show command used for Cargo shell: bash run: | echo "cargo command is: ${{ env.CARGO }}" echo "target flag is: ${{ env.TARGET_FLAGS }}" echo "target dir is: ${{ env.TARGET_DIR }}" - name: Build release binary shell: bash run: | ${{ env.CARGO }} build --release ${{ env.TARGET_FLAGS }} bin="target/${{ matrix.target }}/release/topos" echo "BIN=$bin" >> $GITHUB_ENV - name: Rename binary shell: bash run: | export arch=$(echo ${{ matrix.target }} | cut -d- -f1) export version=${GITHUB_REF#refs/*/} echo "arch=${arch}" >> $GITHUB_ENV echo "version=${version}" >> $GITHUB_ENV mv $BIN topos-${version}-${arch} tar -czvf topos-${version}-${arch}.tgz topos-${version}-${arch} - name: Upload release artifacts for ${{ matrix.target }} architecture uses: actions/upload-artifact@v3 with: name: topos-${{ matrix.target }} if-no-files-found: error path: | topos-${{ env.version }}-${{ env.arch }}.tgz - name: Publish binaries for ${{ matrix.target }} release uses: softprops/action-gh-release@v1 with: files: | topos-${{ env.version }}-${{ env.arch }}.tgz notify-release: needs: release-build runs-on: ubuntu-latest if: ${{ github.event_name == 'release'}} && ${{ github.event.release.prerelease == 'false' }} steps: - name: Send Slack notification uses: slackapi/slack-github-action@v1.24.0 with: payload: | { "repository": "${{ github.repository }}", "version": "${{ github.ref }}" } env: SLACK_WEBHOOK_URL: ${{ vars.RELEASE_PIPELINE_SLACK_WEBHOOK_URL }} ================================================ FILE: .github/workflows/sequencer_topos_core_contract_test.yml ================================================ name: Sequencer Topos Core Contract interaction test on: push: branches: [main] pull_request: types: [opened, synchronize, reopened, ready_for_review] workflow_dispatch: jobs: sequencer-contracts-e2e: runs-on: ubuntu-latest-16-core steps: - uses: convictional/trigger-workflow-and-wait@v1.6.1 with: owner: topos-protocol repo: e2e-tests github_token: ${{ secrets.ROBOT_PAT_TRIGGER_E2E_WORKFLOWS }} workflow_file_name: topos:sequencer-contracts.yml ref: main wait_interval: 60 client_payload: '{ "topos-ref": "${{ github.head_ref }}" }' ================================================ FILE: .github/workflows/test.yml ================================================ name: Test on: push: branches: - main pull_request: types: [opened, synchronize, reopened, ready_for_review] workflow_dispatch: concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} jobs: test-doc: name: Test documentation runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v4 - uses: ./.github/actions/install-rust with: toolchain: nightly AWS_ACCESS_KEY_ID: ${{ secrets.ROBOT_AWS_ACCESS_KEY_ID}} AWS_SECRET_ACCESS_KEY: ${{ secrets.ROBOT_AWS_SECRET_ACCESS_KEY}} - name: Build Documentation run: cargo +nightly doc --no-deps --all --all-features test_stable: runs-on: ubuntu-latest-16-core strategy: fail-fast: false name: stable - Test steps: - uses: actions/checkout@v4 - uses: ./.github/actions/install-rust with: AWS_ACCESS_KEY_ID: ${{ secrets.ROBOT_AWS_ACCESS_KEY_ID}} AWS_SECRET_ACCESS_KEY: ${{ secrets.ROBOT_AWS_SECRET_ACCESS_KEY}} - run: cargo nextest run --workspace --exclude topos-sequencer-subnet-runtime && cargo test --doc --workspace env: RUST_LOG: warn,topos=info test_nightly: runs-on: ubuntu-latest-16-core strategy: fail-fast: false name: nightly - Test steps: - uses: actions/checkout@v4 - uses: ./.github/actions/install-rust with: toolchain: nightly AWS_ACCESS_KEY_ID: ${{ secrets.ROBOT_AWS_ACCESS_KEY_ID}} AWS_SECRET_ACCESS_KEY: ${{ secrets.ROBOT_AWS_SECRET_ACCESS_KEY}} - run: cargo nextest run --workspace --exclude topos-sequencer-subnet-runtime && cargo test --doc --workspace env: RUST_LOG: topos=warn cert_delivery: runs-on: ubuntu-latest-16-core needs: [test_stable] strategy: fail-fast: true matrix: value: ["first", "second", "third"] steps: - uses: actions/checkout@v4 - uses: ./.github/actions/install-rust with: AWS_ACCESS_KEY_ID: ${{ secrets.ROBOT_AWS_ACCESS_KEY_ID}} AWS_SECRET_ACCESS_KEY: ${{ secrets.ROBOT_AWS_SECRET_ACCESS_KEY}} - run: cargo nextest run cert_delivery --locked --no-default-features env: RUST_LOG: topos=warn ================================================ FILE: .gitignore ================================================ # Generated by Cargo # will have compiled files and executables /target/ *~ # testing database /default_db/ /db*/ # These are backup files generated by rustfmt **/*.rs.bk # IntelliJ files .idea **/*.iml # VSCode .vscode **/tests/databases # ~~~ START TERRAFORM # Local .terraform directories **/.terraform/* # .tfstate files *.tfstate *.tfstate.* # Crash log files crash.log crash.*.log # Exclude all .tfvars files, which are likely to contain sensitive data, such as # password, private keys, and other secrets. These should not be part of version # control as they are data points which are potentially sensitive and subject # to change depending on the environment. *.tfvars *.tfvars.json # Ignore override files as they are usually used to override resources locally and so # are not checked in override.tf override.tf.json *_override.tf *_override.tf.json # Include override files you do wish to add to version control using negated pattern # !example_override.tf # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan # example: *tfplan* # Ignore CLI configuration files .terraformrc terraform.rc #k8s **/kubeconfig # ~~~ END TERRAFORM # Subnet integration tests crates/topos-sequencer-subnet-runtime/tests/temp # Node modules **/node_modules artifacts/ polygon-edge # macOS directory attributes **/.DS_Store tools/node_config/node/test/libp2p/ tools/node_config/node/test/consensus/ ================================================ FILE: CHANGELOG.md ================================================ ![topos](./.github/assets/topos_logo_dark.png) ## [0.1.0](https://github.com/topos-protocol/topos/compare/v0.0.11..0.1.0) - 2024-03-25 ### ⛰️ Features - Update smart-contracts to 3.4.0 stable - ([a714493](https://github.com/topos-protocol/topos/commit/a714493dd5aaf235d99f4b903f49a355e0c38d14)) - Add p2p layer health check ([#464](https://github.com/topos-protocol/topos/issues/464)) - ([d2ec941](https://github.com/topos-protocol/topos/commit/d2ec941ec24d3cbc27c11d4fcf6b0495911f85e2)) - Terminate stream if client is dropping the connection ([#463](https://github.com/topos-protocol/topos/issues/463)) - ([2c73f0b](https://github.com/topos-protocol/topos/commit/2c73f0bae1dc25aad504d45dcc789360cce7dbaa)) - Introduce topos-node crate ([#459](https://github.com/topos-protocol/topos/issues/459)) - ([d8db631](https://github.com/topos-protocol/topos/commit/d8db631d970d6b855e5a47f0c561abe8bab9832d)) - Add proper error handling to setup command ([#452](https://github.com/topos-protocol/topos/issues/452)) - ([3335846](https://github.com/topos-protocol/topos/commit/3335846327c9c0f32e85694861f30520a9f2a6c5)) - Remove dht publication ([#449](https://github.com/topos-protocol/topos/issues/449)) - ([7030341](https://github.com/topos-protocol/topos/commit/70303412f139c7fe5ca0d4775f367d27543ac791)) - Add benchmark dns option to spam subcommand ([#448](https://github.com/topos-protocol/topos/issues/448)) - ([90405f3](https://github.com/topos-protocol/topos/commit/90405f3f4bd468c33685158ddddb793b109e3f22)) - Move telemetry-otlp setup into telemetry crate ([#446](https://github.com/topos-protocol/topos/issues/446)) - ([8a15fc4](https://github.com/topos-protocol/topos/commit/8a15fc4c0aa07ba71ca8376f21056949d71e92c5)) ### 🐛 Bug Fixes - *(config)* Fix the parse of edge_path ENV var ([#482](https://github.com/topos-protocol/topos/issues/482)) - ([b2a1af0](https://github.com/topos-protocol/topos/commit/b2a1af06dfa6987261a08ccbc8f05ed1bdc0d0b8)) - *(p2p)* Accept listener connection during bootstrap ([#484](https://github.com/topos-protocol/topos/issues/484)) - ([b8cd730](https://github.com/topos-protocol/topos/commit/b8cd730c2e2a6d2799a5c741b026cf03d9eadd33)) - *(p2p)* Rework ticks of bootstrap query interval ([#483](https://github.com/topos-protocol/topos/issues/483)) - ([5b6ddb8](https://github.com/topos-protocol/topos/commit/5b6ddb80ded50525a27617ca5f7c911525752619)) - Bump smart contract version ([#478](https://github.com/topos-protocol/topos/issues/478)) - ([642203c](https://github.com/topos-protocol/topos/commit/642203c962ede91d821af2b54e5f7bc0d845d407)) - Concurrency insert between pending and delivered ([#467](https://github.com/topos-protocol/topos/issues/467)) - ([bd5e3f5](https://github.com/topos-protocol/topos/commit/bd5e3f52ba00bafa25e0f3ce8b42b326e4fb5ef0)) - Block handling during certificate generation ([#471](https://github.com/topos-protocol/topos/issues/471)) - ([a5299c8](https://github.com/topos-protocol/topos/commit/a5299c80068d6612d1aba162556f9ccd3dd3d0a8)) - Update mio ([#473](https://github.com/topos-protocol/topos/issues/473)) - ([8291740](https://github.com/topos-protocol/topos/commit/82917405e7bf102c06194caadc973f57ac735649)) - Revert update smart contract event ([#470](https://github.com/topos-protocol/topos/issues/470)) - ([c41a51a](https://github.com/topos-protocol/topos/commit/c41a51a2ff86198f44dd6b24ff534507a17cf519)) - Update smart contract event ([#462](https://github.com/topos-protocol/topos/issues/462)) - ([f995859](https://github.com/topos-protocol/topos/commit/f9958599a1da31d7c92a8e8a0e925e79ce6140cb)) - Remove duplicated certificate push on gossipsub ([#458](https://github.com/topos-protocol/topos/issues/458)) - ([b0e88dc](https://github.com/topos-protocol/topos/commit/b0e88dce2b7ea060ce34377b40a10e54edd16e02)) - Add next_pending_certificate on end task ([#455](https://github.com/topos-protocol/topos/issues/455)) - ([2aaa500](https://github.com/topos-protocol/topos/commit/2aaa50071ca14415bb0284930c404659b6c463d8)) ### 🚜 Refactor - Improve delivery timing ([#466](https://github.com/topos-protocol/topos/issues/466)) - ([96e862f](https://github.com/topos-protocol/topos/commit/96e862f5b886a38a5c67590d1e152ab9894d6f15)) - Store instantiation ([#461](https://github.com/topos-protocol/topos/issues/461)) - ([213b8d4](https://github.com/topos-protocol/topos/commit/213b8d482cf6e08ec0f1cae0e9dfd981b156a98d)) - Update error management and config/process ([#460](https://github.com/topos-protocol/topos/issues/460)) - ([cc0c7b5](https://github.com/topos-protocol/topos/commit/cc0c7b538d9f6b91c184db10eedd9d94c4f368fb)) - Move edge config to config crate ([#445](https://github.com/topos-protocol/topos/issues/445)) - ([23cc558](https://github.com/topos-protocol/topos/commit/23cc55887703bac01b7ec26486f47b03316046c1)) - Tce-broadcast config ([#444](https://github.com/topos-protocol/topos/issues/444)) - ([10c3879](https://github.com/topos-protocol/topos/commit/10c3879cd30bf0172996cfbf48ab5c991e767eaf)) ### ⚙️ Miscellaneous Tasks - Update changelog for 0.1.0 - ([65fc8cd](https://github.com/topos-protocol/topos/commit/65fc8cd05d1fdaecd809e92a0643dc02557ad460)) - Update changelog for 0.1.0 - ([a82617a](https://github.com/topos-protocol/topos/commit/a82617a6c653f02a00fc9565f2c5abb42c9b6c26)) - Disable coverage report on release branch (push) - ([09f3663](https://github.com/topos-protocol/topos/commit/09f36639ef62a02a2a84bde8f36a98ce6274ea6f)) - Disable coverage report on release branch (push) - ([e909e22](https://github.com/topos-protocol/topos/commit/e909e22d6dac251e4026816cd8dd5c84851e9db5)) - Disable coverage report on release branch ([#481](https://github.com/topos-protocol/topos/issues/481)) - ([8f10090](https://github.com/topos-protocol/topos/commit/8f10090094bf110670137f73a115bda54f64aba5)) - Update changelog for 0.1.0 - ([c68798e](https://github.com/topos-protocol/topos/commit/c68798eeed366a421a076cc1908aaca8013d80cf)) - Creating CHANGELOG.md for 0.0.11 - ([463f52f](https://github.com/topos-protocol/topos/commit/463f52feb73f10d2a194cf44863842a9f0cf13a0)) - Bumping version 0.1.0 - ([16de6a6](https://github.com/topos-protocol/topos/commit/16de6a675b0fe44afd20526202a2e5178b40994d)) - Update deps ([#474](https://github.com/topos-protocol/topos/issues/474)) - ([264c569](https://github.com/topos-protocol/topos/commit/264c5694980fded79ea0749d03f54a345d90c741)) - Refactor logs and fix typo ([#465](https://github.com/topos-protocol/topos/issues/465)) - ([8044310](https://github.com/topos-protocol/topos/commit/8044310b8ee330d5a14d509137dc4243cb2c2372)) - Removing cache_size ([#472](https://github.com/topos-protocol/topos/issues/472)) - ([b2e4cf8](https://github.com/topos-protocol/topos/commit/b2e4cf88ac0c0b2ee92b7ef120a4c4e97493150c)) - Backport fix of 0.0.11 ([#453](https://github.com/topos-protocol/topos/issues/453)) - ([53328ac](https://github.com/topos-protocol/topos/commit/53328acc813816757c57f3279cbd5f2aa738d2f0)) ### Build - Ignore pr checking name for release ([#480](https://github.com/topos-protocol/topos/issues/480)) - ([cfd8890](https://github.com/topos-protocol/topos/commit/cfd8890a0cb03f25fdaae8b181ab9c33f785e34e)) ## [0.0.11](https://github.com/topos-protocol/topos/compare/v0.0.10..v0.0.11) - 2024-02-08 ### ⛰️ Features - Introduce topos-config crate ([#443](https://github.com/topos-protocol/topos/issues/443)) - ([4ff2a23](https://github.com/topos-protocol/topos/commit/4ff2a23e3a05ea3e950763bd4bde3d3ef6ef891b)) - Adding positions to certificate ([#440](https://github.com/topos-protocol/topos/issues/440)) - ([5315710](https://github.com/topos-protocol/topos/commit/531571025a4d81f9d9aa713ca12594756ca56a7e)) - Improve sequencer error handling and shutdown/restart sequence ([#428](https://github.com/topos-protocol/topos/issues/428)) - ([ab8bb9e](https://github.com/topos-protocol/topos/commit/ab8bb9e83afee545c3730f974ae8591c7fc70f3d)) - Update double echo to use pending CF ([#418](https://github.com/topos-protocol/topos/issues/418)) - ([8fb4003](https://github.com/topos-protocol/topos/commit/8fb4003d5579a8fee6d81c463707131959f076c3)) - Use anvil for sequencer tests ([#427](https://github.com/topos-protocol/topos/issues/427)) - ([5b0257b](https://github.com/topos-protocol/topos/commit/5b0257bed685c064c3eafbea2b5c77125e6c9041)) - Update tce config addresses ([#415](https://github.com/topos-protocol/topos/issues/415)) - ([476948f](https://github.com/topos-protocol/topos/commit/476948fa671b431bfa797aabf6b96949ac734db6)) - Remove the register commands macro ([#426](https://github.com/topos-protocol/topos/issues/426)) - ([985d0be](https://github.com/topos-protocol/topos/commit/985d0be0c75ddd1d41e94a172824b706ca0f9c5f)) - Run e2e topos integration workflow ([#408](https://github.com/topos-protocol/topos/issues/408)) - ([f0b7637](https://github.com/topos-protocol/topos/commit/f0b763786aa869454c9e30076ed08d0a456ed319)) - Adding filter on message when non validator ([#405](https://github.com/topos-protocol/topos/issues/405)) - ([b096482](https://github.com/topos-protocol/topos/commit/b0964825a5f386d75507482ee9068e26c9d74fe0)) - Add no-edge-process flag to node init ([#401](https://github.com/topos-protocol/topos/issues/401)) - ([28a553b](https://github.com/topos-protocol/topos/commit/28a553b6d17933bfbcca835640cc0c165bcd0124)) - Refactor peer selection for synchronization ([#382](https://github.com/topos-protocol/topos/issues/382)) - ([6982d33](https://github.com/topos-protocol/topos/commit/6982d336296a9b9ec5eacb025d938b6cb47b6e0a)) - Remove task manager channels ([#391](https://github.com/topos-protocol/topos/issues/391)) - ([f5fa427](https://github.com/topos-protocol/topos/commit/f5fa4276d8a524fd04bbf2a0d1d036d7f5af34bb)) - Add batch message and update double echo ([#383](https://github.com/topos-protocol/topos/issues/383)) - ([f0bc90c](https://github.com/topos-protocol/topos/commit/f0bc90c7480a84c0c12016e748f2a002477f4417)) ### 🐛 Bug Fixes - Fixing wrong use of IntCounterVec ([#442](https://github.com/topos-protocol/topos/issues/442)) - ([fe062a5](https://github.com/topos-protocol/topos/commit/fe062a5bdfa9ade2b94de88cbd6b3946b72a94c3)) - Clippy unused ([#434](https://github.com/topos-protocol/topos/issues/434)) - ([4aa6a9e](https://github.com/topos-protocol/topos/commit/4aa6a9e4723b8aaa2c2da0fdf32d8c8c926f7764)) - Remove an unused channel that was locking the broadcast ([#433](https://github.com/topos-protocol/topos/issues/433)) - ([43c6fe5](https://github.com/topos-protocol/topos/commit/43c6fe5caffd35103b20ec11d7ae2f35b1af98b9)) - RUSTSEC-2024-0003 ([#431](https://github.com/topos-protocol/topos/issues/431)) - ([cad4d76](https://github.com/topos-protocol/topos/commit/cad4d76ccf88be3f1399d371cf9dbdd7211343bc)) - RUSTSEC-2023-0078 ([#429](https://github.com/topos-protocol/topos/issues/429)) - ([35f8930](https://github.com/topos-protocol/topos/commit/35f8930056f641b33c56b4799b8c0cbe6f0a5eda)) - Fixing release notification ([#423](https://github.com/topos-protocol/topos/issues/423)) - ([7503fa7](https://github.com/topos-protocol/topos/commit/7503fa7f2385294f2a12a86eaa521af61fb9bc95)) - Move test abi generation to separate module ([#424](https://github.com/topos-protocol/topos/issues/424)) - ([d4ff358](https://github.com/topos-protocol/topos/commit/d4ff3581d43eabb14c3d977f34b69aee396e98d4)) - Return error from subprocesses ([#422](https://github.com/topos-protocol/topos/issues/422)) - ([53b3229](https://github.com/topos-protocol/topos/commit/53b3229b7e26b7bb9b3a8d97725e7cc86174df9b)) ### 🚜 Refactor - Graphql types to differentiate inputs ([#435](https://github.com/topos-protocol/topos/issues/435)) - ([4b0ec9b](https://github.com/topos-protocol/topos/commit/4b0ec9b2b3b6ab075d1b9bfde54dc8bb179bbde8)) ### ⚙️ Miscellaneous Tasks - Debug 0.0.11 synchronization ([#447](https://github.com/topos-protocol/topos/issues/447)) - ([edf86ee](https://github.com/topos-protocol/topos/commit/edf86ee32f8b34c5b11eecd5b0ed6fe5bdd0191a)) - Update dependencies ([#450](https://github.com/topos-protocol/topos/issues/450)) - ([62126e0](https://github.com/topos-protocol/topos/commit/62126e0417d8d4225eb8bb6eebb7fc0a0f526cc6)) - Remove mention of topos-api from coverage YAML ([#438](https://github.com/topos-protocol/topos/issues/438)) - ([6c7e342](https://github.com/topos-protocol/topos/commit/6c7e342715d86bcbd75e1bcd4054eb4cbeddf5cf)) - Bump version for topos to 0.0.11 ([#439](https://github.com/topos-protocol/topos/issues/439)) - ([917eaf9](https://github.com/topos-protocol/topos/commit/917eaf993336780a373dbcdfea4dd0d8b3e81f50)) - Refactor struct in topos-core ([#437](https://github.com/topos-protocol/topos/issues/437)) - ([acedac7](https://github.com/topos-protocol/topos/commit/acedac7e09094364b5406ee72f9a65241784c478)) - Update crates structure for api/uci and core ([#436](https://github.com/topos-protocol/topos/issues/436)) - ([355b08a](https://github.com/topos-protocol/topos/commit/355b08acf91052564dae65872904950d20b72ebd)) - Fix logs for pending ([#432](https://github.com/topos-protocol/topos/issues/432)) - ([342b2c7](https://github.com/topos-protocol/topos/commit/342b2c71ef0621a93b5f4460abd313f1c8b4c62b)) - Updating double echo for devnet ([#416](https://github.com/topos-protocol/topos/issues/416)) - ([1e91086](https://github.com/topos-protocol/topos/commit/1e91086a68ec01d304c5d8867e8fbcd671798599)) - Fixing clippy warning for 1.75.0 ([#425](https://github.com/topos-protocol/topos/issues/425)) - ([22a3745](https://github.com/topos-protocol/topos/commit/22a374506e087ab9cba68f9e0ed00682df6be6df)) - Refactor push certificate tests and cleanup regtest ([#399](https://github.com/topos-protocol/topos/issues/399)) - ([c60170d](https://github.com/topos-protocol/topos/commit/c60170dc19a9add84648afaf7962252ec77c160f)) - Update signal handle by tce ([#417](https://github.com/topos-protocol/topos/issues/417)) - ([beca28b](https://github.com/topos-protocol/topos/commit/beca28ba224b4a217a147f88142692acd1613a32)) - Cleanup topos tools ([#397](https://github.com/topos-protocol/topos/issues/397)) - ([0820306](https://github.com/topos-protocol/topos/commit/08203062a1cada7470d8b8207539d7a660ece466)) - Adding context on connection to self ([#413](https://github.com/topos-protocol/topos/issues/413)) - ([6e72999](https://github.com/topos-protocol/topos/commit/6e729992202ed4c56a1564cc39755ff1e0766ff8)) - Adding context on connection to self ([#411](https://github.com/topos-protocol/topos/issues/411)) - ([3a799ac](https://github.com/topos-protocol/topos/commit/3a799ac41542bd216a0512d9cf3a634a6ed2af07)) - Adding context to p2p msg received ([#410](https://github.com/topos-protocol/topos/issues/410)) - ([e1b2ccf](https://github.com/topos-protocol/topos/commit/e1b2ccf99a114a60e04c8ed187a8c9bd2cf066e4)) - Adding context to certificate broadcast ([#409](https://github.com/topos-protocol/topos/issues/409)) - ([d170b6b](https://github.com/topos-protocol/topos/commit/d170b6b386005d44457979bcd0a0f2435153f3f2)) - Adding storage context on startup ([#404](https://github.com/topos-protocol/topos/issues/404)) - ([ffae4c6](https://github.com/topos-protocol/topos/commit/ffae4c63d00bb099a49216c2af560f6b59601133)) - Remove audit ignore report ([#407](https://github.com/topos-protocol/topos/issues/407)) - ([70bce47](https://github.com/topos-protocol/topos/commit/70bce479d16c750ff1a322db1e88bfae08303855)) - Update SynchronizerService to remove unwrap ([#403](https://github.com/topos-protocol/topos/issues/403)) - ([b424aa9](https://github.com/topos-protocol/topos/commit/b424aa91f68197134faec742a23eec513cde837f)) - Adding no-color option to CLI ([#402](https://github.com/topos-protocol/topos/issues/402)) - ([4989936](https://github.com/topos-protocol/topos/commit/4989936ae000a85897d805453c91a53f4edd6580)) - Adding cross compilation ([#400](https://github.com/topos-protocol/topos/issues/400)) - ([887762f](https://github.com/topos-protocol/topos/commit/887762f740241d49d2886cd55d6a9da1d3d83e7e)) - Adding openssl as dependency ([#396](https://github.com/topos-protocol/topos/issues/396)) - ([60f873c](https://github.com/topos-protocol/topos/commit/60f873c619b8132a2276634205e3c39561eb3faf)) - Update release action target ([#395](https://github.com/topos-protocol/topos/issues/395)) - ([54db400](https://github.com/topos-protocol/topos/commit/54db40020e8e22d7b100b0b6944a45485e2939a4)) - Update release action target ([#394](https://github.com/topos-protocol/topos/issues/394)) - ([f0f28c3](https://github.com/topos-protocol/topos/commit/f0f28c33a5332232b2921b13051ae5de714cf58b)) - Adding aarch64 image ([#393](https://github.com/topos-protocol/topos/issues/393)) - ([9f48dc8](https://github.com/topos-protocol/topos/commit/9f48dc88582bfbc71cabb2c6bbc27297cc9b87ee)) - Cleanup topos test network setup ([#390](https://github.com/topos-protocol/topos/issues/390)) - ([2820664](https://github.com/topos-protocol/topos/commit/2820664a66bdfe039f1aaa80d79f4ff339c8a65c)) ================================================ FILE: Cargo.toml ================================================ [workspace] resolver = "2" default-members = ["crates/topos", "crates/topos-node"] members = [ "crates/*" ] [workspace.package] version = "0.1.0" [workspace.lints.rust] # Deny missing_docs = "allow" # Warn deprecated-in-future = "warn" [profile.release] strip = true [workspace.dependencies] topos-core = { path = "./crates/topos-core", default-features = false } topos-crypto = { path = "./crates/topos-crypto", default-features = false } topos-metrics = { path = "./crates/topos-metrics/", default-features = false } # Various utility crates clap = { version = "4.0", features = ["derive", "env", "string"] } lazy_static = "1" rand = { version = "0.8", default-features = false } rand_core = { version = "0.6", default-features = false } rand_distr = { version = "0.4", default-features = false } # Async & Tokio related async-stream = { version = "0.3", default-features = false } async-trait = { version = "0.1", default-features = false } futures = { version = "0.3" } tokio = { version = "1.24", default-features = false } tokio-util = { version = "0.7.8" } tokio-stream = { version = "0.1", default-features = false } tower = "0.4" # Blockchain ethereum-types = { version = "0.13.1"} secp256k1 = {version = "0.27", features = ["recovery"]} tiny-keccak = {version = "1.5"} ethers = {version = "2.0.9", features = ["legacy", "abigen-online"]} # Log, Tracing & telemetry opentelemetry = { version = "0.22", features = ["metrics"] } opentelemetry-otlp = { version = "0.15", features = ["grpc-tonic", "metrics", "tls-roots"] } opentelemetry_sdk = { version = "0.22" } prometheus = "0.13.3" prometheus-client = "0.22" tracing = { version = "0.1", default-features = false } tracing-attributes = "0.1" tracing-opentelemetry = "0.23" tracing-subscriber = { version = "0.3", default-features = false } # gRPC prost = {version = "0.12"} tonic = { version = "0.11", default-features = false } tonic-build = { version = "0.11", default-features = false, features = [ "prost", "transport" ] } # Axum server (GraphQL + Metrics) axum = "0.6" async-graphql = "6" async-graphql-axum = "6" http = "0.2.9" tower-http = { version = "0.4", features = ["cors"] } # P2P related libp2p = { version = "0.53", default-features = false, features = ["noise"]} # Serialization & Deserialization bincode = { version = "1.3", default-features = false } byteorder = { version = "1.4", default-features = false } bytes = { version = "1.3", default-features = false } hex = { version = "0.4", default-features = false } serde = { version = "1.0", default-features = false } serde_json = { version = "1.0", default-features = false } thiserror = { version = "1.0", default-features = false } uuid = { version = "1.1.2", default-features = false, features = ["v4"] } base64ct = { version = "1", features = ["alloc"] } # Network related backoff = { version = "0.4", features = ["tokio", "futures"] } hyper = { version = "0.14.26", features = ["full"] } reqwest = { version = "0.11", features = ["json"] } # Tests rstest = { version = "0.17.0", default-features = false } test-log = { version = "0.2", features = ["trace"] } env_logger = { version = "0.10.0"} # Needed by test-log to print traces in tests serial_test = {version = "0.9.0"} ================================================ FILE: Cross.toml ================================================ [build] pre-build = [ "apt update && apt install -y unzip", "curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v25.1/protoc-25.1-linux-x86_64.zip && unzip protoc-25.1-linux-x86_64.zip -d /usr/ && chmod 755 /usr/bin/protoc" ] [target.x86_64-unknown-linux-gnu] image = "ghcr.io/cross-rs/x86_64-unknown-linux-gnu:main" [target.aarch64-unknown-linux-gnu] image = "ghcr.io/cross-rs/aarch64-unknown-linux-gnu:main" ================================================ FILE: Dockerfile ================================================ ARG RUSTUP_TOOLCHAIN=stable FROM --platform=${BUILDPLATFORM:-linux/amd64} ghcr.io/topos-protocol/rust_builder:bullseye-${RUSTUP_TOOLCHAIN} AS base ARG FEATURES # Rust cache ARG SCCACHE_S3_KEY_PREFIX ARG SCCACHE_BUCKET ARG SCCACHE_REGION ARG RUSTC_WRAPPER ARG PROTOC_VERSION=22.2 WORKDIR /usr/src/app FROM --platform=${BUILDPLATFORM:-linux/amd64} base AS build COPY . . RUN --mount=type=secret,id=aws,target=/root/.aws/credentials \ --mount=type=cache,id=sccache,target=/root/.cache/sccache \ cargo build --release --no-default-features --features=${FEATURES} \ && sccache --show-stats FROM --platform=${BUILDPLATFORM:-linux/amd64} debian:bullseye-slim AS topos ENV TCE_PORT=9090 ENV USER=topos ENV UID=10001 ENV PATH="${PATH}:/usr/src/app" WORKDIR /usr/src/app COPY --from=build /usr/src/app/target/release/topos . RUN apt-get update && apt-get install -y \ ca-certificates \ jq \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* RUN mkdir /tmp/node_config RUN mkdir /tmp/shared ENTRYPOINT ["./topos"] ================================================ FILE: LICENSE ================================================ The Transmission Control Engine (TCE) and Sequencer are licensed under BSL-1.1. ----------------------------------------------------------------------------- Business Source License 1.1 License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. "Business Source License" is a trademark of MariaDB Corporation Ab. Parameters Licensor: zk Foundation Licensed Work: Code contained within the "topos" repository Transmission Control Engine (TCE) and Sequencer The Licensed Work is (c) zk Foundation Additional Use Grant: You may use the Licensed Work in a production environment in the following circumstances: (1) solely for non-malicious use of the Topos Protocol via zk Foundation promulgated networks for the operation of a node on a network or for the operation of software in support of connecting a Subnet to a network; and (2) for non-commercial academic research purposes only. “Topos Protocol” as used herein means the generalized implementation of “Topos: A Secure, Trustless, and Decentralized Interoperability Protocol” (https://arxiv.org/pdf/2206.03481.pdf) distributed by zk Foundation. A “Subnet” as used herein means a process, set of processes, computer system, database, or network of systems and/or processes as defined by the Topos Protocol. For any additional licensing arrangements, please contact zk Foundation at: info@zkfoundation.io. Change Date: July 1, 2025 Change License: GPL v3.0 ----------------------------------------------------------------------------- Terms The Licensor hereby grants you the right to copy, modify, create derivative works, redistribute, and make non-production use of the Licensed Work. The Licensor may make an Additional Use Grant, above, permitting limited production use. Effective on the Change Date, or the fourth anniversary of the first publicly available distribution of a specific version of the Licensed Work under this License, whichever comes first, the Licensor hereby grants you rights under the terms of the Change License, and the rights granted in the paragraph above terminate. If your use of the Licensed Work does not comply with the requirements currently in effect as described in this License, you must purchase a commercial license from the Licensor, its affiliated entities, or authorized resellers, or you must refrain from using the Licensed Work. All copies of the original and modified Licensed Work, and derivative works of the Licensed Work, are subject to this License. This License applies separately for each version of the Licensed Work and the Change Date may vary for each version of the Licensed Work released by Licensor. You must conspicuously display this License on each original or modified copy of the Licensed Work. If you receive the Licensed Work in original or modified form from a third party, the terms and conditions set forth in this License apply to your use of that work. Any use of the Licensed Work in violation of this License will automatically terminate your rights under this License for the current and all other versions of the Licensed Work. This License does not grant you any right in any trademark or logo of Licensor or its affiliates (provided that you may use a trademark or logo of Licensor as expressly required by this License). TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND TITLE. MariaDB hereby grants you permission to use this License’s text to license your works, and to refer to it using the trademark “Business Source License”, as long as you comply with the Covenants of Licensor below. Covenants of Licensor In consideration of the right to use this License’s text and the “Business Source License” name and trademark, Licensor covenants to MariaDB, and to all other recipients of the licensed work to be provided by Licensor: 1. To specify as the Change License the GPL Version 2.0 or any later version, or a license that is compatible with GPL Version 2.0 or a later version, where “compatible” means that software provided under the Change License can be included in a program with software provided under GPL Version 2.0 or a later version. Licensor may specify additional Change Licenses without limitation. 2. To either: (a) specify an additional grant of rights to use that does not impose any additional restriction on the right granted in this License, as the Additional Use Grant; or (b) insert the text “None”. 3. To specify a Change Date. 4. Not to modify this License in any other way. ----------------------------------------------------------------------------- Notice The Business Source License (this document, or the "License") is not an Open Source license. However, the Licensed Work will eventually be made available under an Open Source License, as stated in this License. ================================================ FILE: README.md ================================================

Logo Logo

topos is the unified command line interface to the Topos protocol.


![Test workflow](https://github.com/topos-protocol/topos/actions/workflows/test.yml/badge.svg) ![Quality workflow](https://github.com/topos-protocol/topos/actions/workflows/quality.yml/badge.svg) [![codecov](https://codecov.io/gh/topos-protocol/topos/branch/main/graph/badge.svg?token=FOH2B2GRL9)](https://codecov.io/gh/topos-protocol/topos) ![MSRV](https://img.shields.io/badge/MSRV-1.71.1-blue?labelColor=1C2C2E&logo=Rust) [![](https://dcbadge.vercel.app/api/server/7HZ8F8ykBT?style=flat)](https://discord.gg/7HZ8F8ykBT)
## Getting Started **Install Rust** The first step is to install Rust along with `cargo` by following the instructions [here](https://doc.rust-lang.org/book/ch01-01-installation.html#installing-rustup-on-linux-or-macos). **Install `topos`** ``` cargo install topos --git https://github.com/topos-protocol/topos ``` **Try out `topos`!** ``` topos --help ``` Find more about how topos works in the [documentation](https://docs.topos.technology/). ### Topos Docker image The docker images use `stable` Rust toolchain by default. You can use a different one by defining `RUSTUP_TOOLCHAIN` argument, the list of available toolchain is [here](https://github.com/topos-protocol/topos-ci-docker/pkgs/container/rust_builder) Build Topos docker image: ``` DOCKER_BUILDKIT=1 docker build . --build-arg RUSTUP_TOOLCHAIN=[...] -t topos:latest ``` Run Topos docker image: ``` docker run -it --rm topos:latest --help ``` ## Development Contributions are very welcomed, the guidelines are outlined in [`CONTRIBUTING.md`](https://github.com/topos-protocol/.github/blob/main/CONTRIBUTING.md).
## Support Feel free to [open an issue](https://github.com/topos-protocol/topos/issues/new) if you have any feature request or bug report.
If you have any questions, do not hesitate to reach us on [Discord](https://discord.gg/7HZ8F8ykBT)! ## Resources - Website: - Technical Documentation: - Medium: - Whitepaper: [Topos: A Secure, Trustless, and Decentralized Interoperability Protocol](https://arxiv.org/pdf/2206.03481.pdf) ## License This project is released under the terms specified in the [LICENSE](LICENSE) file. ================================================ FILE: cliff.toml ================================================ # git-cliff ~ configuration file # https://git-cliff.org/docs/configuration # # Lines starting with "#" are comments. # Configuration options are organized into tables and keys. # See documentation for more information on available options. [changelog] # changelog header header = """ ![topos](./.github/assets/topos_logo_dark.png) """ # template for the changelog body # https://keats.github.io/tera/docs/#introduction body = """ {%- macro remote_url() -%} https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }} {%- endmacro -%} {% macro print_commit(commit) -%} - {% if commit.scope %}*({{ commit.scope }})* {% endif %}\ {% if commit.breaking %}[**breaking**] {% endif %}\ {{ commit.message | upper_first }} - \ ([{{ commit.id | truncate(length=7, end="") }}]({{ self::remote_url() }}/commit/{{ commit.id }}))\ {% endmacro -%} {% if version %}\ {% if previous.version %}\ ## [{{ version | trim_start_matches(pat="v") }}]\ ({{ self::remote_url() }}/compare/{{ previous.version }}..{{ version }}) - {{ timestamp | date(format="%Y-%m-%d") }} {% else %}\ ## [{{ version | trim_start_matches(pat="v") }}] - {{ timestamp | date(format="%Y-%m-%d") }} {% endif %}\ {% else %}\ ## [unreleased] {% endif %}\ {% for group, commits in commits | group_by(attribute="group") %} ### {{ group | striptags | trim | upper_first }} {% for commit in commits | filter(attribute="scope") | sort(attribute="scope") %} {{ self::print_commit(commit=commit) }} {%- endfor -%} {% raw %}\n{% endraw %}\ {%- for commit in commits %} {%- if not commit.scope -%} {{ self::print_commit(commit=commit) }} {% endif -%} {% endfor -%} {% endfor %}\n """ # template for the changelog footer footer = """ """ # remove the leading and trailing whitespace from the templates trim = true # postprocessors postprocessors = [ { pattern = '', replace = "https://github.com/topos-protocol/topos" }, # replace repository URL ] [git] # parse the commits based on https://www.conventionalcommits.org conventional_commits = true # filter out the commits that are not conventional filter_unconventional = true # process each line of a commit as an individual commit split_commits = false # regex for preprocessing the commit messages commit_preprocessors = [ { pattern = '\((\w+\s)?#([0-9]+)\)', replace = "([#${2}](/issues/${2}))" }, # Check spelling of the commit with https://github.com/crate-ci/typos # If the spelling is incorrect, it will be automatically fixed. { pattern = '.*', replace_command = 'typos --write-changes -' }, ] # regex for parsing and grouping commits commit_parsers = [ { message = "^feat", group = "⛰️ Features" }, { message = "^fix", group = "🐛 Bug Fixes" }, { message = "^doc", group = "📚 Documentation" }, { message = "^perf", group = "⚡ Performance" }, { message = "^refactor\\(clippy\\)", skip = true }, { message = "^refactor", group = "🚜 Refactor" }, { message = "^style", group = "🎨 Styling" }, { message = "^test", group = "🧪 Testing" }, { message = "^chore\\(release\\): prepare for", skip = true }, { message = "^chore\\(deps.*\\)", skip = true }, { message = "^chore\\(pr\\)", skip = true }, { message = "^chore\\(pull\\)", skip = true }, { message = "^chore|^ci", group = "⚙️ Miscellaneous Tasks" }, { body = ".*security", group = "🛡️ Security" }, { message = "^revert", group = "◀️ Revert" }, ] # protect breaking changes from being skipped due to matching a skipping commit_parser protect_breaking_commits = false # filter out the commits that are not matched by commit parsers filter_commits = false # regex for matching git tags tag_pattern = "v[0-9].*" # regex for skipping tags skip_tags = "beta|alpha" # regex for ignoring tags ignore_tags = "rc|v2.1.0|v2.1.1" # sort the tags topologically topo_order = false # sort the commits inside sections by oldest/newest order sort_commits = "newest" ================================================ FILE: crates/topos/Cargo.toml ================================================ [package] name = "topos" version = "0.1.0" edition = "2021" [lints] workspace = true [dependencies] topos-node = { path = "../topos-node/" } topos-config = { path = "../topos-config/" } topos-tce = { path = "../topos-tce/" } topos-p2p = { path = "../topos-p2p" } topos-sequencer = { path = "../topos-sequencer" } topos-core = { workspace = true, features = ["api"] } topos-certificate-spammer = { path = "../topos-certificate-spammer" } topos-tce-broadcast = { path = "../topos-tce-broadcast", optional = true } topos-wallet = { path = "../topos-wallet" } topos-telemetry = { path = "../topos-telemetry/", features = ["tracing"] } async-stream.workspace = true async-trait.workspace = true clap.workspace = true hex.workspace = true futures.workspace = true opentelemetry.workspace = true serde.workspace = true serde_json.workspace = true tokio = { workspace = true, features = ["full"] } tokio-util.workspace = true tonic.workspace = true tower.workspace = true tracing = { workspace = true, features = ["log"] } tracing-opentelemetry.workspace = true tracing-subscriber = { workspace = true, features = ["env-filter", "json", "ansi", "fmt"] } uuid.workspace = true rand.workspace = true reqwest.workspace = true thiserror.workspace = true opentelemetry-otlp = { workspace = true, features = ["grpc-tonic", "metrics", "tls-roots"] } dirs = "5.0" tracing-log = { version = "0.1.3", features = ["env_logger"] } tar = "0.4.38" flate2 ="1.0.26" url = "2.3.1" once_cell = "1.17.1" regex = "1" rlp = "0.5.1" openssl = { version = "0.10.61", features = ["vendored"] } [dev-dependencies] toml = "0.7.4" topos-tce-broadcast = { path = "../topos-tce-broadcast" } topos-tce-synchronizer = { path = "../topos-tce-synchronizer" } topos-tce-gatekeeper = { path = "../topos-tce-gatekeeper" } topos-tce-api = { path = "../topos-tce-api" } topos-tce-storage = { path = "../topos-tce-storage" } topos-test-sdk = { path = "../topos-test-sdk" } serde.workspace = true serde_json.workspace = true test-log.workspace = true env_logger.workspace = true rand.workspace = true futures.workspace = true libp2p = { workspace = true, features = ["identify"] } assert_cmd = "2.0.6" insta = { version = "1.21", features = ["json", "redactions"] } rstest = { workspace = true, features = ["async-timeout"] } tempfile = "3.8.0" predicates = "3.0.3" sysinfo = "0.29.11" serial_test = {version = "0.9.0"} [features] default = [] ================================================ FILE: crates/topos/build.rs ================================================ use std::process::Command; const DEFAULT_VERSION: &str = "detached"; fn main() { // Set TOPOS_VERSION to HEAD short commit hash if None if std::option_env!("TOPOS_VERSION").is_none() { let output = Command::new("git") .args(["rev-parse", "--short", "HEAD"]) .output() .expect("failed to access the HEAD commit hash"); let git_hash = String::from_utf8(output.stdout).unwrap(); let topos_version = if git_hash.is_empty() { DEFAULT_VERSION } else { git_hash.as_str() }; println!("cargo:rustc-env=TOPOS_VERSION={topos_version}"); } } ================================================ FILE: crates/topos/src/components/mod.rs ================================================ pub(crate) mod node; pub(crate) mod regtest; pub(crate) mod setup; ================================================ FILE: crates/topos/src/components/node/commands/init.rs ================================================ use std::path::PathBuf; use clap::Args; use serde::Serialize; use topos_config::node::NodeRole; #[derive(Args, Debug, Serialize)] #[command(about = "Setup your node", trailing_var_arg = true)] #[serde(rename_all = "kebab-case")] pub struct Init { /// Name to identify your node #[arg(long, env = "TOPOS_NODE_NAME", default_value = "default")] pub name: Option, /// Role of your node #[arg(long, value_enum, env = "TOPOS_NODE_ROLE", default_value_t = NodeRole::Validator)] pub role: NodeRole, /// Subnet of your node #[arg(long, env = "TOPOS_NODE_SUBNET", default_value = "topos")] pub subnet: Option, /// The path to the SecretsManager config file. Used for Hashicorp Vault. /// If omitted, the local FS secrets manager is used #[arg(long, env = "TOPOS_SECRETS_MANAGER")] pub secrets_config: Option, /// For certain use cases, we manually provide private keys to a running node, and don't want to /// rely on polygon-edge during runtime. Example: A sequencer which runs for an external EVM chain #[arg(long, env = "TOPOS_NO_EDGE_PROCESS", action)] pub no_edge_process: bool, /// Installation directory path for Polygon Edge binary #[clap(from_global)] pub(crate) edge_path: PathBuf, } ================================================ FILE: crates/topos/src/components/node/commands/status.rs ================================================ use super::NodeArgument; use clap::Args; use serde::Serialize; #[derive(Args, Debug, Serialize)] #[command(about = "Get node status")] pub(crate) struct Status { #[command(flatten)] pub(crate) node_args: NodeArgument, #[arg(long)] pub(crate) sample: bool, } ================================================ FILE: crates/topos/src/components/node/commands/up.rs ================================================ use std::path::PathBuf; use clap::Args; use serde::Serialize; #[derive(Args, Clone, Debug, Serialize)] #[command(about = "Spawn your node")] #[serde(rename_all = "kebab-case")] pub struct Up { /// Name to identify your node #[arg(long, env = "TOPOS_NODE_NAME", default_value = "default")] pub name: Option, /// The path to the SecretsManager config file. Used for Hashicorp Vault. /// If omitted, the local FS secrets manager is used #[arg(long, env = "TOPOS_SECRETS_MANAGER")] pub secrets_config: Option, /// Defines that an external edge node will be use, replacing the one normally run by the node. /// Usable for cases where edge endpoint is available as infura (or similar cloud provider) endpoint #[arg(long, env = "TOPOS_NO_EDGE_PROCESS", action)] pub no_edge_process: bool, /// Socket of the opentelemetry agent endpoint. /// If not provided open telemetry will not be used #[arg(long, env = "TOPOS_OTLP_AGENT")] pub otlp_agent: Option, /// Otlp service name. /// If not provided open telemetry will not be used #[arg(long, env = "TOPOS_OTLP_SERVICE_NAME")] pub otlp_service_name: Option, /// Installation directory path for Polygon Edge binary #[clap(from_global)] pub(crate) edge_path: PathBuf, } ================================================ FILE: crates/topos/src/components/node/commands.rs ================================================ use std::path::PathBuf; use clap::{Args, Subcommand}; use serde::Serialize; mod init; mod status; mod up; pub(crate) use init::Init; pub(crate) use status::Status; pub(crate) use up::Up; #[derive(Args, Debug, Serialize)] pub(crate) struct NodeArgument { #[clap(short, long, default_value = "http://[::1]:1340")] pub(crate) node: String, } /// Utility to manage your nodes in the Topos network #[derive(Args, Debug)] pub(crate) struct NodeCommand { #[clap(from_global)] pub(crate) verbose: u8, #[clap(from_global)] pub(crate) no_color: bool, #[clap(from_global)] pub(crate) home: PathBuf, /// Installation directory path for Polygon Edge binary #[arg( global = true, long, env = "TOPOS_POLYGON_EDGE_BIN_PATH", default_value = "." )] pub(crate) edge_path: PathBuf, #[clap(subcommand)] pub(crate) subcommands: Option, } #[derive(Subcommand, Debug, Serialize)] pub(crate) enum NodeCommands { Up(Box), Init(Box), Status(Status), } #[cfg(test)] mod tests { use super::*; #[test] fn test_run() { assert!(NodeCommands::has_subcommand("up")); assert!(NodeCommands::has_subcommand("init")); } } ================================================ FILE: crates/topos/src/components/node/mod.rs ================================================ use std::{ fs::{create_dir_all, remove_dir_all, OpenOptions}, io::Write, }; use std::{path::Path, sync::Arc}; use tokio::sync::Mutex; use tonic::transport::{Channel, Endpoint}; use topos_telemetry::tracing::setup_tracing; use tower::Service; use tracing::error; use self::commands::{NodeCommand, NodeCommands}; use topos_config::node::NodeConfig; use topos_config::{edge::command::BINARY_NAME, Config}; use topos_core::api::grpc::tce::v1::console_service_client::ConsoleServiceClient; pub(crate) mod commands; pub(crate) mod services; pub(crate) struct NodeService { pub(crate) console_client: Arc>>, } impl NodeService { pub(crate) fn with_grpc_endpoint(endpoint: &str) -> Self { Self { console_client: setup_console_tce_grpc(endpoint), } } } pub(crate) async fn handle_command( NodeCommand { subcommands, verbose, no_color, home, edge_path, }: NodeCommand, ) -> Result<(), Box> { match subcommands { Some(NodeCommands::Init(cmd)) => { let cmd = *cmd; let name = cmd.name.clone().expect("No name or default was given"); _ = setup_tracing(verbose, no_color, None, None, env!("TOPOS_VERSION")); // Construct path to node config // will be $TOPOS_HOME/node/default/ with no given name // and $TOPOS_HOME/node// with a given name let node_path = home.join("node").join(&name); // If the folders don't exist yet, create it create_dir_all(&node_path).expect("failed to create node folder"); // Check if the config file exists let config_path = node_path.join("config.toml"); if Path::new(&config_path).exists() { println!("Config file: {} already exists", config_path.display()); std::process::exit(1); } if cmd.no_edge_process { println!("Init the node without polygon-edge process..."); } else { // Generate the Edge configuration match topos_config::edge::generate_edge_config( edge_path.join(BINARY_NAME), node_path.clone(), ) .await { Ok(Ok(status)) => { if let Some(0) = status.code() { println!("Edge configuration successfully generated"); } else { println!( "Edge configuration generation terminated with error status: {:?}", status ); remove_dir_all(node_path).expect("failed to remove config folder"); std::process::exit(1); } } Ok(Err(e)) => { println!("Failed to generate edge config with error {e}"); remove_dir_all(node_path).expect("failed to remove config folder"); std::process::exit(1); } Err(_) => { println!("Failed to generate edge config"); remove_dir_all(node_path).expect("failed to remove config folder"); std::process::exit(1); } } } let node_config = NodeConfig::create(&home, &name, Some(&cmd)); // Creating the TOML output let config_toml = match node_config.to_toml() { Ok(config) => config, Err(error) => { println!("Failed to generate TOML config: {error}"); remove_dir_all(node_path).expect("failed to remove config folder"); std::process::exit(1); } }; let config_path = node_path.join("config.toml"); let mut node_config_file = OpenOptions::new() .write(true) .create(true) .truncate(true) .open(config_path) .expect("failed to create default node file"); node_config_file .write_all(config_toml.to_string().as_bytes()) .expect("failed to write to default node file"); println!( "Created node config file at {}/config.toml", node_path.display() ); Ok(()) } Some(NodeCommands::Up(cmd)) => { let cmd_cloned = cmd.clone(); let command = *cmd; let name = cmd_cloned .name .as_ref() .expect("No name or default was given for node"); let config = NodeConfig::try_from(&home, name, Some(&command))?; topos_node::start( verbose, no_color, cmd_cloned.otlp_agent, cmd_cloned.otlp_service_name, cmd_cloned.no_edge_process, config, ) .await?; Ok(()) } Some(NodeCommands::Status(status)) => { let mut node_service = NodeService::with_grpc_endpoint(&status.node_args.node); let exit_code = i32::from(!(node_service.call(status).await?)); std::process::exit(exit_code); } None => Ok(()), } } fn setup_console_tce_grpc(endpoint: &str) -> Arc>> { match Endpoint::from_shared(endpoint.to_string()) { Ok(endpoint) => Arc::new(Mutex::new(ConsoleServiceClient::new( endpoint.connect_lazy(), ))), Err(e) => { error!("Failure to setup the gRPC API endpoint on {endpoint}: {e}"); std::process::exit(1); } } } ================================================ FILE: crates/topos/src/components/node/services/status.rs ================================================ use std::{ future::Future, io::Error, pin::Pin, task::{Context, Poll}, }; use futures::FutureExt; use topos_core::api::grpc::tce::v1::StatusRequest; use tower::Service; use tracing::{debug, error}; use crate::components::node::{commands::Status, NodeService}; impl Service for NodeService { type Response = bool; type Error = std::io::Error; type Future = Pin> + Send + 'static>>; fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) } fn call(&mut self, _: Status) -> Self::Future { let client = self.console_client.clone(); async move { debug!("Sending the request to the TCE server..."); match client.lock().await.status(StatusRequest {}).await { Ok(status_response) => { let status = status_response.into_inner(); debug!("Successfully fetched the status {:?} from the TCE", status); Ok(status.has_active_sample) } Err(err) => { error!("TCE server returned an error: {:?}", err); Err(Error::new(std::io::ErrorKind::Other, err)) } } } .boxed() } } ================================================ FILE: crates/topos/src/components/node/services.rs ================================================ pub(crate) mod status; ================================================ FILE: crates/topos/src/components/regtest/commands/spam.rs ================================================ use clap::Args; #[derive(Args, Debug)] #[command( about = "Run a test topos certificate spammer to send test certificates to the network, \ generating randomly among the `nb_subnets` subnets the batch of `cert_per_batch` \ certificates at every `batch-interval`" )] pub struct Spam { /// The target node api endpoint. /// Multiple nodes could be specified as comma separated list /// e.g. `--target-nodes=http://[::1]:1340,http://[::1]:1341` #[clap( long, env = "TOPOS_NETWORK_SPAMMER_TARGET_NODES", value_delimiter = ',' )] pub target_nodes: Option>, /// Path to json file with list of target nodes as alternative to `--target-nodes` #[clap(long, env = "TOPOS_NETWORK_SPAMMER_TARGET_NODES_PATH")] pub target_nodes_path: Option, /// Seed for generation of local private signing keys and corresponding subnet ids. #[arg( long, env = "TOPOS_NETWORK_SPAMMER_LOCAL_KEY_SEED", default_value = "1" )] pub local_key_seed: u64, /// Certificates generated in one batch. Batch is generated every `batch-interval` milliseconds. #[arg( long, env = "TOPOS_NETWORK_SPAMMER_CERT_PER_BATCH", default_value = "1" )] pub cert_per_batch: u64, /// Number of subnets to use for certificate generation. For every certificate subnet id will be picked randomly. #[arg( long, env = "TOPOS_NETWORK_SPAMMER_NUMBER_OF_SUBNETS", default_value = "1" )] pub nb_subnets: u8, /// Number of batches to generate before finishing execution. /// If not specified, batches will be generated indefinitely. #[arg(long, env = "TOPOS_NETWORK_SPAMMER_NUMBER_OF_BATCHES")] pub nb_batches: Option, /// Time interval in milliseconds between generated batches of certificates #[arg( long, env = "TOPOS_NETWORK_SPAMMER_BATCH_INTERVAL", default_value = "2000" )] pub batch_interval: u64, /// List of generated certificate target subnets. No target subnets by default. /// For example `--target-subnets=0x3bc19e36ff1673910575b6727a974a9abd80c9a875d41ab3e2648dbfb9e4b518,0xa00d60b2b408c2a14c5d70cdd2c205db8985ef737a7e55ad20ea32cc9e7c417c` #[arg( long, env = "TOPOS_NETWORK_SPAMMER_TARGET_SUBNETS", value_delimiter = ',' )] pub target_subnets: Option>, /// Socket of the opentelemetry agent endpoint. /// If not provided open telemetry will not be used #[arg(long, env = "TOPOS_OTLP_AGENT")] pub otlp_agent: Option, /// Otlp service name. /// If not provided open telemetry will not be used #[arg(long, env = "TOPOS_OTLP_SERVICE_NAME")] pub otlp_service_name: Option, /// Flag to indicate usage of Kubernetes. #[arg( long, env = "TOPOS_NETWORK_SPAMMER_BENCHMARK", requires = "target_hosts", requires = "number" )] pub benchmark: bool, /// Template for generating target node entrypoints. /// e.g. `--hosts="http://validator-{N}:1340"` #[arg( long, env = "TOPOS_NETWORK_SPAMMER_TARGET_HOSTS", requires = "benchmark" )] pub target_hosts: Option, /// Number of nodes to generate based on the DNS template. #[arg(long, env = "TOPOS_NETWORK_SPAMMER_NUMBER", requires = "benchmark")] pub number: Option, } impl Spam {} ================================================ FILE: crates/topos/src/components/regtest/commands.rs ================================================ use clap::{Args, Subcommand}; mod spam; pub(crate) use spam::Spam; /// Run test commands (e.g., pushing a certificate to a TCE process) #[derive(Args, Debug)] pub(crate) struct RegtestCommand { #[clap(from_global)] pub(crate) verbose: u8, #[clap(subcommand)] pub(crate) subcommands: Option, } #[derive(Subcommand, Debug)] pub(crate) enum RegtestCommands { Spam(Box), } #[cfg(test)] mod tests { use super::*; #[test] fn test_run() { assert!(RegtestCommands::has_subcommand("spam")); } } ================================================ FILE: crates/topos/src/components/regtest/mod.rs ================================================ use self::commands::{RegtestCommand, RegtestCommands}; use opentelemetry::global; use tokio::{ spawn, sync::{mpsc, oneshot}, }; use topos_certificate_spammer::{error::Error, CertificateSpammerConfig}; use topos_telemetry::tracing::setup_tracing; use tracing::{error, info}; pub(crate) mod commands; pub(crate) async fn handle_command( RegtestCommand { verbose, subcommands, }: RegtestCommand, ) -> Result<(), Box> { match subcommands { Some(RegtestCommands::Spam(cmd)) => { let config = CertificateSpammerConfig { target_nodes: cmd.target_nodes, target_nodes_path: cmd.target_nodes_path, local_key_seed: cmd.local_key_seed, cert_per_batch: cmd.cert_per_batch, nb_subnets: cmd.nb_subnets, nb_batches: cmd.nb_batches, batch_interval: cmd.batch_interval, target_subnets: cmd.target_subnets, benchmark: cmd.benchmark, target_hosts: cmd.target_hosts, number: cmd.number, }; // Setup instrumentation if both otlp agent and otlp service name // are provided as arguments setup_tracing( verbose, false, cmd.otlp_agent, cmd.otlp_service_name, env!("TOPOS_VERSION"), )?; let (shutdown_sender, shutdown_receiver) = mpsc::channel::>(1); let mut runtime = spawn(topos_certificate_spammer::run(config, shutdown_receiver)); loop { tokio::select! { _ = tokio::signal::ctrl_c() => { info!("Received ctrl_c, shutting down application..."); let (shutdown_finished_sender, shutdown_finished_receiver) = oneshot::channel::<()>(); if let Err(e) = shutdown_sender.send(shutdown_finished_sender).await { error!("Error sending shutdown signal to Spammer application: {e}"); } if let Err(e) = shutdown_finished_receiver.await { error!("Error with shutdown receiver: {e}"); } info!("Shutdown procedure finished, exiting..."); } result = &mut runtime =>{ global::shutdown_tracer_provider(); if let Ok(Err(Error::BenchmarkConfig(ref msg))) = result { error!("Benchmark configuration error:\n{}", msg); std::process::exit(1); } if let Err(ref error) = result { error!("Unable to execute network spam command due to: {error}"); std::process::exit(1); } break; } } } Ok(()) } None => Ok(()), } } ================================================ FILE: crates/topos/src/components/setup/commands/subnet.rs ================================================ use clap::Args; use std::path::PathBuf; #[derive(Args, Debug)] #[command(about = "Install Polygon Edge node binary")] pub struct Subnet { /// Installation directory path for Polygon Edge binary. /// If not provided, Polygon Edge binary will be installed to the current directory #[clap(long, env = "TOPOS_SETUP_POLYGON_EDGE_DIR", default_value = ".")] pub path: PathBuf, /// Polygon Edge release version. If not provided, latest release version will be installed #[arg(long, env = "TOPOS_SETUP_SUBNET_RELEASE")] pub release: Option, /// Polygon Edge Github repository #[arg( long, env = "TOPOS_SETUP_SUBNET_REPOSITORY", default_value = "topos-protocol/polygon-edge" )] pub repository: String, /// List all available Polygon Edge release versions without installation #[arg(long, action)] pub list_releases: bool, } impl Subnet {} ================================================ FILE: crates/topos/src/components/setup/commands.rs ================================================ use clap::{Args, Subcommand}; mod subnet; pub(crate) use subnet::Subnet; /// Topos CLI subcommand for the setup of various Topos related components (e.g., installation of Polygon Edge binary) #[derive(Args, Debug)] pub(crate) struct SetupCommand { #[clap(subcommand)] pub(crate) subcommands: Option, } #[derive(Subcommand, Debug)] pub(crate) enum SetupCommands { Subnet(Box), } #[cfg(test)] mod tests { use super::*; #[test] fn test_run() { assert!(SetupCommands::has_subcommand("subnet")); } } ================================================ FILE: crates/topos/src/components/setup/mod.rs ================================================ use self::commands::{SetupCommand, SetupCommands}; use tokio::{signal, spawn}; use tracing::{error, info}; use topos::{install_polygon_edge, list_polygon_edge_releases}; pub(crate) mod commands; pub(crate) async fn handle_command( SetupCommand { subcommands }: SetupCommand, ) -> Result<(), Box> { match subcommands { Some(SetupCommands::Subnet(cmd)) => { spawn(async move { if cmd.list_releases { info!( "Retrieving release version list from repository: {}", &cmd.repository ); if let Err(e) = list_polygon_edge_releases(cmd.repository).await { error!("Error listing Polygon Edge release versions: {e}"); std::process::exit(1); } else { std::process::exit(0); } } else { info!( "Starting installation of Polygon Edge binary to target path: {}", &cmd.path.display() ); println!( "Starting installation of Polygon Edge binary to target path: {}", &cmd.path.display() ); if let Err(e) = install_polygon_edge(cmd.repository, cmd.release, cmd.path.as_path()).await { error!("Error installing Polygon Edge: {e}"); eprintln!("Error installing Polygon Edge: {e}"); std::process::exit(1); } else { info!("Polygon Edge installation successful"); println!("Polygon Edge installation successful"); std::process::exit(0); } } }); signal::ctrl_c() .await .expect("failed to listen for signals"); Ok(()) } None => { error!("No subcommand provided. You can use `--help` to see available subcommands."); eprintln!("No subcommand provided. You can use `--help` to see available subcommands."); std::process::exit(1); } } } ================================================ FILE: crates/topos/src/lib.rs ================================================ use flate2::read::GzDecoder; use serde_json::Value; use std::collections::HashSet; use std::fs::File; use std::io::Write; use std::path::Path; use tar::Archive; use tracing::{error, info}; const GITHUB_REPO_API: &str = "https://api.github.com/repos/"; #[derive(Debug, thiserror::Error)] pub enum Error { #[error("Http client error: {0}")] Http(reqwest::Error), #[error("Json parsing error: {0}")] InvalidJson(serde_json::Error), #[error("There is no valid Polygon Edge release available")] NoValidRelease, #[error("Invalid release metadata")] InvalidReleaseMetadata, #[error("File io error: {0}")] File(std::io::Error), #[error("Invalid private key")] InvalidPrivateKey, #[error("Invalid Validator address")] InvalidValidatorAddress, } fn map_arch(arch: &str) -> &str { match arch { "x86" => "x86", "x86_64" => "amd64", "aarch64" => "arm64", _ => "unknown", } } fn map_os(arch: &str) -> &str { match arch { "linux" => "linux", "macos" => "darwin", "windows" => "windows", _ => "unknown", } } /// Calculate expected polygon edge binary name for this platform /// By convention it is in the format `polygon-edge--` fn determine_binary_release_name(release: &PolygonEdgeRelease) -> String { "polygon-edge".to_string() + "_" + &release.version[1..] + "_" + map_os(std::env::consts::OS) + "_" + map_arch(std::env::consts::ARCH) + ".tar.gz" } /// Download Polygon Edge binary from repository to requested target directory async fn download_binary(file_name: &str, uri: &str, target_directory: &Path) -> Result<(), Error> { info!( "Downloading binary `{}` to target directory: {}", file_name, target_directory.display() ); let response = reqwest::get(uri).await.map_err(Error::Http)?; let download_file_path = target_directory.join(Path::new(file_name)); { //Download file let mut target_archive_file = File::create(&download_file_path).map_err(|error| { error!("Unable to create file: {error}"); Error::File(error) })?; target_archive_file .write_all(response.bytes().await.map_err(Error::Http)?.as_ref()) .map_err(Error::File)?; } { // Decompress archive let archive_file = File::open(&download_file_path).map_err(Error::File)?; let mut archive = Archive::new(GzDecoder::new(archive_file)); archive.unpack(target_directory).map_err(Error::File)?; } // Remove downloaded archive std::fs::remove_file(&download_file_path).map_err(Error::File)?; Ok(()) } #[derive(Debug)] struct PolygonEdgeRelease { version: String, binary: String, download_url: String, } async fn get_available_releases(repository: &str) -> Result, Error> { // Retrieve list of releases let uri = GITHUB_REPO_API.to_string() + repository + "/releases"; info!("Retrieving Polygon Edge release list {uri}"); let client = reqwest::Client::new(); let body = client .get(&uri) .header(reqwest::header::USER_AGENT, "Topos CLI") .send() .await .map_err(Error::Http)? .text() .await .map_err(Error::Http)?; let body: Vec = match serde_json::from_str(&body) { Ok(v) => v, Err(e) => { error!("Error parsing release list response: {e}"); return Err(Error::InvalidJson(e)); } }; if body.is_empty() { error!("There is no valid Polygon Edge release available"); return Err(Error::NoValidRelease); } let mut releases: Vec = Vec::new(); // Parse all releases // List of retrieved releases is already sorted, latest release being // the first one in the list for release in &body { let tag_name = release .get("name") .ok_or(Error::InvalidReleaseMetadata)? .to_string() .replace('\"', ""); let assets = release .get("assets") .ok_or(Error::InvalidReleaseMetadata)? .as_array() .ok_or(Error::InvalidReleaseMetadata)?; for asset in assets { if let Some(name) = asset.get("name").map(|v| v.to_string().replace('\"', "")) { if let Some(url) = asset .get("browser_download_url") .map(|v| v.to_string().replace('\"', "")) { releases.push(PolygonEdgeRelease { binary: name, download_url: url, version: tag_name.clone(), }) } } } } Ok(releases) } /// Get list of releases from github repository /// Download required release by version, or latest one if desired release was not provided async fn get_release( repository: &str, version: &Option, ) -> Result { let releases = get_available_releases(repository).await?; for release in releases { let expected_binary = determine_binary_release_name(&release); if let Some(version) = version { if &release.version == version && release.binary == expected_binary { return Ok(release); } } else if release.binary == expected_binary { return Ok(release); } } Err(Error::NoValidRelease) } pub async fn install_polygon_edge( repository: String, release: Option, path: &Path, ) -> Result<(), Error> { // Select release for installation let release = get_release(repository.as_str(), &release).await?; info!( "Selected release: {} from {}", release.version, release.download_url ); // Download and install Polygon Edge binary if let Err(e) = download_binary(&release.binary, &release.download_url, path).await { error!("Unable to install Polygon Edge binary {e}"); return Err(e); } Ok(()) } pub async fn list_polygon_edge_releases(repository: String) -> Result<(), Error> { // Retrieve list of available releases from the Github repository let releases = get_available_releases(&repository).await?; println!("Available Polygon Edge releases:"); releases .into_iter() .map(|r| r.version) .collect::>() .iter() .for_each(|r| { println!(" {}", r); }); Ok(()) } ================================================ FILE: crates/topos/src/main.rs ================================================ use clap::Parser; pub(crate) mod components; pub(crate) mod options; use crate::options::ToposCommand; use tracing_log::LogTracer; #[tokio::main] async fn main() -> Result<(), Box> { LogTracer::init()?; let args = options::Opt::parse(); match args.commands { ToposCommand::Setup(cmd) => components::setup::handle_command(cmd).await, ToposCommand::Node(cmd) => components::node::handle_command(cmd).await, ToposCommand::Regtest(cmd) => components::regtest::handle_command(cmd).await, } } ================================================ FILE: crates/topos/src/options/input_format.rs ================================================ use clap::ValueEnum; use serde::Serialize; #[derive(ValueEnum, Copy, Clone, Debug, Serialize)] pub(crate) enum InputFormat { Json, Plain, } ================================================ FILE: crates/topos/src/options.rs ================================================ use clap::{Parser, Subcommand}; use std::{ffi::OsString, path::PathBuf}; use crate::components::node::commands::NodeCommand; use crate::components::regtest::commands::RegtestCommand; use crate::components::setup::commands::SetupCommand; pub(crate) mod input_format; #[derive(Parser, Debug)] #[clap(name = "topos", about = "Topos CLI")] pub(crate) struct Opt { /// Defines the verbosity level #[arg( long, short = 'v', action = clap::ArgAction::Count, global = true )] pub(crate) verbose: u8, /// Disable color in logs #[arg(long, global = true, env = "TOPOS_LOG_NOCOLOR")] no_color: bool, /// Home directory for the configuration #[arg( long, env = "TOPOS_HOME", default_value = get_default_home(), global = true )] pub(crate) home: PathBuf, #[command(subcommand)] pub(crate) commands: ToposCommand, } /// If no path is given for the --home argument, we use the default one /// ~/.config/topos for a UNIX subsystem fn get_default_home() -> OsString { let mut home = dirs::home_dir().unwrap(); home.push(".config"); home.push("topos"); home.into_os_string() } #[derive(Subcommand, Debug)] pub(crate) enum ToposCommand { Setup(SetupCommand), Node(NodeCommand), Regtest(RegtestCommand), } ================================================ FILE: crates/topos/tests/cert_delivery.rs ================================================ use futures::{future::join_all, StreamExt}; use libp2p::PeerId; use rand::seq::{IteratorRandom, SliceRandom}; use rstest::*; use serial_test::serial; use std::collections::{HashMap, HashSet}; use std::time::Duration; use test_log::test; use tokio::spawn; use tokio::sync::mpsc; use tonic::transport::Uri; use topos_core::{ api::grpc::{ shared::v1::checkpoints::TargetCheckpoint, tce::v1::{ api_service_client::ApiServiceClient, console_service_client::ConsoleServiceClient, watch_certificates_request::OpenStream, watch_certificates_response::{CertificatePushed, Event}, StatusRequest, SubmitCertificateRequest, }, }, uci::{Certificate, SubnetId, CERTIFICATE_ID_LENGTH, SUBNET_ID_LENGTH}, }; use topos_test_sdk::{certificates::create_certificate_chains, tce::create_network}; use tracing::{debug, info, warn}; const NUMBER_OF_SUBNETS_PER_CLIENT: usize = 1; fn get_subset_of_subnets(subnets: &[SubnetId], subset_size: usize) -> Vec { let mut rng = rand::thread_rng(); Vec::from_iter( subnets .iter() .cloned() .choose_multiple(&mut rng, subset_size), ) } #[rstest] #[test(tokio::test)] #[timeout(Duration::from_secs(10))] #[serial] async fn start_a_cluster() { let mut peers_context = create_network(5, &[]).await; let mut status: Vec = Vec::new(); for (_peer_id, client) in peers_context.iter_mut() { let response = client .console_grpc_client .status(StatusRequest {}) .await .expect("Can't get status"); status.push(response.into_inner().has_active_sample); } assert!(status.iter().all(|s| *s)); } #[rstest] #[tokio::test] #[timeout(Duration::from_secs(30))] #[serial] // FIXME: This test is flaky, it fails sometimes because of gRPC connection error (StreamClosed) async fn cert_delivery() { let subscriber = tracing_subscriber::FmtSubscriber::builder() .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) .with_test_writer() .finish(); let _ = tracing::subscriber::set_global_default(subscriber); let peer_number = 5; let number_of_certificates_per_subnet = 2; let number_of_subnets = 3; let all_subnets: Vec = (1..=number_of_subnets) .map(|v| [v as u8; SUBNET_ID_LENGTH].into()) .collect(); // Generate certificates with respect to parameters let mut subnet_certificates = create_certificate_chains(all_subnets.as_ref(), number_of_certificates_per_subnet) .into_iter() .map(|(s, v)| (s, v.into_iter().map(|v| v.certificate).collect::>())) .collect::>(); debug!( "Generated certificates for distribution per subnet: {:#?}", &subnet_certificates ); // Calculate expected final set of delivered certificates (every subnet should receive certificates that has cross // chain transaction targeting it) let mut expected_certificates: HashMap> = HashMap::new(); for certificates in subnet_certificates.values() { for cert in certificates { for target_subnet in &cert.target_subnets { expected_certificates .entry(*target_subnet) .or_default() .insert(cert.clone()); } } } warn!("Starting the cluster..."); // List of peers (tce nodes) with their context let mut peers_context = create_network(peer_number, &[]).await; warn!("Cluster started, starting clients..."); // Connected tce clients are passing received certificates to this mpsc::Receiver, collect all of them let mut clients_delivered_certificates: Vec> = Vec::new(); // (Peer Id, Subnet Id, Certificate) let mut client_tasks: Vec> = Vec::new(); // Clients connected to TCE API Service run in async tasks let mut assign_at_least_one_client_to_every_subnet = all_subnets.clone(); for (peer_id, ctx) in peers_context.iter_mut() { let peer_id = *peer_id; // Make sure that every subnet is represented (connected through client) to at least 1 peer // After that assign subnets randomly to clients, 1 subnet per connection to TCE // as it is assumed that NUMBER_OF_SUBNETS_PER_CLIENT is 1 - that is also realistic case, topos node representing one subnet let client_subnet_id: SubnetId = if assign_at_least_one_client_to_every_subnet.is_empty() { get_subset_of_subnets(&all_subnets, NUMBER_OF_SUBNETS_PER_CLIENT).remove(0) } else { assign_at_least_one_client_to_every_subnet.pop().unwrap() }; // Number of subnets one client is representing, normally 1 ctx.connected_subnets = Some(vec![client_subnet_id]); debug!( "Opening client for peer id: {} with subnet_ids: {:?}", &peer_id, &client_subnet_id, ); // (Peer id, Subnet Id, Certificate) let (tx, rx) = mpsc::channel::<(PeerId, SubnetId, Certificate)>( number_of_certificates_per_subnet * number_of_subnets, ); clients_delivered_certificates.push(rx); let in_stream_subnet_id = client_subnet_id; let in_stream = async_stream::stream! { yield OpenStream { target_checkpoint: Some(TargetCheckpoint{ target_subnet_ids: vec![in_stream_subnet_id.into()], positions: Vec::new() }), source_checkpoint: None }.into(); }; // Number of certificates expected to receive for every subnet, // to know when to close the TCE clients (and finish test) let mut incoming_certificates_number = expected_certificates.get(&client_subnet_id).unwrap().len(); // Open client connection to TCE service in separate async tasks let mut client = ctx.api_grpc_client.clone(); let expected_certificate_debug: Vec<_> = expected_certificates .get(&client_subnet_id) .unwrap() .iter() .map(|c| c.id) .collect(); let response = client.watch_certificates(in_stream).await.unwrap(); let client_task = spawn(async move { debug!( "Spawning client task for peer: {} waiting for {} certificates: {:?}", peer_id, incoming_certificates_number, expected_certificate_debug ); let mut resp_stream = response.into_inner(); while let Some(received) = resp_stream.next().await { let received = received.unwrap(); if let Some(Event::CertificatePushed(CertificatePushed { certificate: Some(certificate), .. })) = received.event { debug!( "Client peer_id: {} certificate id: {} delivered to subnet id {}, ", &peer_id, certificate.id.clone().unwrap(), &client_subnet_id ); tx.send((peer_id, client_subnet_id, certificate.try_into().unwrap())) .await .unwrap(); incoming_certificates_number -= 1; if incoming_certificates_number == 0 { // We have received all expected certificates for this subnet, end client break; } } } debug!("Finishing client for peer_id: {}", &peer_id); }); client_tasks.push(client_task); } info!( "Waiting for expected delivered certificates {:?}", expected_certificates ); // Delivery tasks collect certificates that clients of every TCE node // are receiving to reduce them to one channel (delivery_rx) let mut delivery_tasks = Vec::new(); // delivery_tx/delivery_rx Pass certificates from delivery tasks of every client to final collection of delivered certificates let (delivery_tx, mut delivery_rx) = mpsc::channel::<(PeerId, SubnetId, Certificate)>( peer_number * number_of_certificates_per_subnet * number_of_subnets, ); for (index, mut client_delivered_certificates) in clients_delivered_certificates.into_iter().enumerate() { let delivery_tx = delivery_tx.clone(); let delivery_task = tokio::spawn(async move { // Read certificates that every client has received info!("Delivery task for receiver {}", index); loop { let x = client_delivered_certificates.recv().await; match x { Some((peer_id, target_subnet_id, cert)) => { info!( "Delivered certificate on peer_Id: {} cert id: {} from source subnet \ id: {} to target subnet id {}", &peer_id, cert.id, cert.source_subnet_id, target_subnet_id ); // Send certificates from every peer to one delivery_rx receiver delivery_tx .send((peer_id, target_subnet_id, cert)) .await .unwrap(); } _ => break, } } // We will end this loop when sending TCE client has dropped channel sender and there // are not certificates in channel info!("End delivery task for receiver {}", index); }); delivery_tasks.push(delivery_task); } drop(delivery_tx); // Broadcast multiple certificates from all subnets info!("Broadcasting certificates..."); for (peer_id, client) in peers_context.iter_mut() { // If there exist of connected subnets to particular TCE if let Some(ref connected_subnets) = client.connected_subnets { // Iterate all subnets connected to TCE (normally 1) for subnet_id in connected_subnets { if let Some(certificates) = subnet_certificates.get_mut(subnet_id) { // Iterate all certificates meant to be sent to the particular network for cert in certificates.iter() { info!( "Sending certificate id={} from subnet id: {} to peer id: {}", &cert.id, &subnet_id, &peer_id ); let _ = client .api_grpc_client .submit_certificate(SubmitCertificateRequest { certificate: Some(cert.clone().into()), }) .await .expect("Can't send certificate"); } // Remove sent certificate, every certificate is sent only once to TCE network certificates.clear(); } } } } let assertion = async move { info!("Waiting for all delivery tasks"); join_all(delivery_tasks).await; info!("All expected clients delivered"); let mut delivered_certificates: HashMap>> = HashMap::new(); // Collect all certificates per peer_id and subnet_id while let Some((peer_id, receiving_subnet_id, cert)) = delivery_rx.recv().await { debug!("Counting delivered certificate cert id: {:?}", cert.id); delivered_certificates .entry(peer_id) .or_default() .entry(receiving_subnet_id) .or_default() .insert(cert); } info!("All incoming certificates received"); // Check received certificates for every peer and every subnet for delivered_certificates_per_peer in delivered_certificates.values() { for (subnet_id, delivered_certificates_per_subnet) in delivered_certificates_per_peer { assert_eq!( expected_certificates.get(subnet_id).unwrap().len(), delivered_certificates_per_subnet.len() ); assert_eq!( expected_certificates.get(subnet_id).unwrap(), delivered_certificates_per_subnet ); } } }; // Set big timeout to prevent flaky fails. Instead fail/panic early in the test to indicate actual error if tokio::time::timeout(std::time::Duration::from_secs(30), assertion) .await .is_err() { panic!("Timeout waiting for command"); } } // Picks a random peer and sends it a certificate. All other peers listen for broadcast certs. // Three possible outcomes: // 1. No errors, returns Ok // 2. There were errors, returns a list of all errors encountered // 3. timeout async fn assert_certificate_full_delivery( timeout_broadcast: Duration, peers: Vec, ) -> Result<(), Box> { use std::io::{Error, ErrorKind}; let random_peer: Uri = peers .choose(&mut rand::thread_rng()) .ok_or_else(|| { Error::new( ErrorKind::Other, "Unable to select a random peer from the list: {peers:?}", ) })? .try_into()?; let pushed_certificate = Certificate::new_with_default_fields( [0u8; CERTIFICATE_ID_LENGTH], [1u8; SUBNET_ID_LENGTH].into(), &[[2u8; SUBNET_ID_LENGTH].into()], )?; let certificate_id = pushed_certificate.id; let mut join_handlers = Vec::new(); // check that all nodes delivered the certificate for peer in peers { join_handlers.push(tokio::spawn(async move { let peer_string = peer.clone(); let mut client = ConsoleServiceClient::connect(peer_string.clone()) .await .map_err(|_| (peer_string.clone(), "Unable to connect to the api console"))?; let result = client.status(StatusRequest {}).await.map_err(|_| { ( peer_string.clone(), "Unable to get the status from the api console", ) })?; let status = result.into_inner(); if !status.has_active_sample { return Err((peer_string, "failed to find active sample")); } let mut client = ApiServiceClient::connect(peer_string.clone()) .await .map_err(|_| (peer_string.clone(), "Unable to connect to the TCE api"))?; let in_stream = async_stream::stream! { yield OpenStream { target_checkpoint: Some(TargetCheckpoint { target_subnet_ids: vec![[2u8; SUBNET_ID_LENGTH].into()], positions: vec![] }), source_checkpoint: None }.into() }; let response = client.watch_certificates(in_stream).await.map_err(|_| { ( peer_string.clone(), "Unable to execute the watch_certificates on TCE api", ) })?; let mut resp_stream = response.into_inner(); async move { while let Some(received) = resp_stream.next().await { let received = received.unwrap(); if let Some(Event::CertificatePushed(CertificatePushed { certificate: Some(certificate), .. })) = received.event { // unwrap is safe because we are sure that the certificate is present if certificate_id == certificate.id.unwrap() { debug!("Received the certificate on {}", peer_string); return Ok(()); } } } Err((peer_string.clone(), "didn't receive any certificate")) } .await })); } let mut client = ApiServiceClient::connect(random_peer.clone()).await?; // submit a certificate to one node _ = client .submit_certificate(SubmitCertificateRequest { certificate: Some(pushed_certificate.into()), }) .await?; tokio::time::sleep(timeout_broadcast).await; join_all(join_handlers) .await .iter() .for_each(|result| match result { Err(e) => { panic!("Join error: {e}"); } Ok(Err((peer, error))) => { panic!("Peer {peer} error: {error}"); } _ => {} }); Ok(()) } async fn run_assert_certificate_full_delivery( number_of_nodes: usize, timeout_broadcast: Duration, ) -> Result<(), Box> { let mut peers_context = create_network(number_of_nodes, &[]).await; for (_peer_id, client) in peers_context.iter_mut() { let response = client .console_grpc_client .status(StatusRequest {}) .await .expect("Can't get status"); assert!(response.into_inner().has_active_sample); } let nodes = peers_context .iter() .map(|peer| peer.1.api_entrypoint.clone()) .collect::>(); debug!("Nodes used in test: {:?}", nodes); let assertion = async move { let peers: Vec = nodes .into_iter() .map(TryInto::try_into) .collect::>() .map_err(|e| format!("Unable to parse node list: {e}")) .expect("Valid node list"); match assert_certificate_full_delivery(timeout_broadcast, peers).await { Ok(()) => { info!("Check certificate delivery passed for network of {number_of_nodes}!"); } Err(e) => { panic!("Test error: {e}"); } } }; assertion.await; Ok(()) } mod serial_integration { use super::*; #[rstest] #[case(5usize)] #[case(9usize)] #[test_log::test(tokio::test)] #[trace] #[timeout(Duration::from_secs(30))] async fn push_and_deliver_cert( #[case] number_of_nodes: usize, ) -> Result<(), Box> { run_assert_certificate_full_delivery(number_of_nodes, Duration::from_secs(10)).await } } ================================================ FILE: crates/topos/tests/config.rs ================================================ use assert_cmd::prelude::*; use regex::Regex; use std::path::PathBuf; use std::process::{Command, Stdio}; use tempfile::tempdir; use tokio::fs::OpenOptions; use tokio::io::{AsyncReadExt, AsyncSeekExt, AsyncWriteExt}; use toml::map::Map; use toml::Value; use topos_test_sdk::create_folder; use crate::utils::setup_polygon_edge; mod utils; mod serial_integration { use rstest::rstest; use sysinfo::{Pid, PidExt, ProcessExt, Signal, System, SystemExt}; use super::*; #[rstest] #[tokio::test] async fn handle_command_init( #[from(create_folder)] home: PathBuf, ) -> Result<(), Box> { let path = setup_polygon_edge(home.to_str().unwrap()).await; let mut cmd = Command::cargo_bin("topos")?; cmd.arg("node") .arg("--edge-path") .arg(path) .arg("init") .arg("--home") .arg(home.to_str().unwrap()); let output = cmd.assert().success(); let result: &str = std::str::from_utf8(&output.get_output().stdout)?; assert!(result.contains("Created node config file")); // Verification: check that the config file was created let config_path = home.join("node").join("default").join("config.toml"); assert!(config_path.exists()); // Further verification might include checking the contents of the config file let config_contents = std::fs::read_to_string(&config_path).unwrap(); assert!(config_contents.contains("[base]")); assert!(config_contents.contains("name = \"default\"")); assert!(config_contents.contains("[edge]")); assert!(config_contents.contains("[tce]")); Ok(()) } #[tokio::test] async fn handle_command_init_without_polygon_edge() -> Result<(), Box> { let tmp_home_dir = tempdir()?; let mut cmd = Command::cargo_bin("topos")?; cmd.arg("node") .arg("init") .arg("--home") .arg(tmp_home_dir.path().to_str().unwrap()) .arg("--no-edge-process"); let output = cmd.assert().success(); let result: &str = std::str::from_utf8(&output.get_output().stdout)?; assert!(result.contains("Created node config file")); let home = PathBuf::from(tmp_home_dir.path()); // Verification: check that the config file was created let config_path = home.join("node").join("default").join("config.toml"); assert!(config_path.exists()); // Further verification might include checking the contents of the config file let config_contents = std::fs::read_to_string(&config_path).unwrap(); assert!(config_contents.contains("[base]")); assert!(config_contents.contains("name = \"default\"")); assert!(config_contents.contains("[tce]")); Ok(()) } #[test] fn nothing_written_if_failure() -> Result<(), Box> { let tmp_home_dir = tempdir()?; let mut cmd = Command::cargo_bin("topos")?; cmd.arg("node") .arg("--edge-path") .arg("./inexistent/folder/") // Command will fail .arg("init") .arg("--home") .arg(tmp_home_dir.path().to_str().unwrap()); // Should fail cmd.assert().failure(); let home = PathBuf::from(tmp_home_dir.path().to_str().unwrap()); // Check that files were NOT created let config_path = home.join("node").join("default"); assert!(!config_path.exists()); Ok(()) } #[tokio::test] async fn handle_command_init_with_custom_name() -> Result<(), Box> { let tmp_home_dir = tempdir()?; let node_name = "TEST_NODE"; let path = setup_polygon_edge(tmp_home_dir.path().to_str().unwrap()).await; let mut cmd = Command::cargo_bin("topos")?; cmd.arg("node") .arg("--edge-path") .arg(path.clone()) .arg("init") .arg("--home") .arg(tmp_home_dir.path().to_str().unwrap()) .arg("--name") .arg(node_name); let output = cmd.assert().success(); let result: &str = std::str::from_utf8(&output.get_output().stdout)?; assert!(result.contains("Created node config file")); let home = PathBuf::from(path); // Verification: check that the config file was created let config_path = home.join("node").join(node_name).join("config.toml"); assert!(config_path.exists()); // Further verification might include checking the contents of the config file let config_contents = std::fs::read_to_string(&config_path).unwrap(); assert!(config_contents.contains("[base]")); assert!(config_contents.contains(node_name)); assert!(config_contents.contains("[tce]")); Ok(()) } /// Test node init env arguments #[rstest] #[tokio::test] async fn command_init_precedence_env( create_folder: PathBuf, ) -> Result<(), Box> { let tmp_home_directory = create_folder; // Test node init with env variables let node_init_home_env = tmp_home_directory .to_str() .expect("path names are valid utf-8"); let node_edge_path_env = setup_polygon_edge(node_init_home_env).await; let node_init_name_env = "TEST_NODE_ENV"; let node_init_role_env = "full-node"; let node_init_subnet_env = "topos-env"; let mut cmd = Command::cargo_bin("topos")?; cmd.arg("node") .env("TOPOS_POLYGON_EDGE_BIN_PATH", &node_edge_path_env) .env("TOPOS_HOME", node_init_home_env) .env("TOPOS_NODE_NAME", node_init_name_env) .env("TOPOS_NODE_ROLE", node_init_role_env) .env("TOPOS_NODE_SUBNET", node_init_subnet_env) .arg("init"); let output = cmd.assert().success(); let result: &str = std::str::from_utf8(&output.get_output().stdout)?; // Test node init with cli flags assert!(result.contains("Created node config file")); let home = PathBuf::from(node_init_home_env); // Verification: check that the config file was created let config_path = home .join("node") .join(node_init_name_env) .join("config.toml"); assert!(config_path.exists()); // Check if config file params are according to env params let config_contents = std::fs::read_to_string(&config_path).unwrap(); assert!(config_contents.contains("name = \"TEST_NODE_ENV\"")); assert!(config_contents.contains("role = \"fullnode\"")); assert!(config_contents.contains("subnet = \"topos-env\"")); Ok(()) } /// Test node cli arguments precedence over env arguments #[tokio::test] async fn command_init_precedence_cli_env() -> Result<(), Box> { let tmp_home_dir_env = create_folder("command_init_precedence_cli_env"); let tmp_home_dir_cli = create_folder("command_init_precedence_cli_env"); // Test node init with both cli and env flags // Cli arguments should take precedence over env variables let node_init_home_env = tmp_home_dir_env.to_str().unwrap(); let node_edge_path_env = setup_polygon_edge(node_init_home_env).await; let node_init_name_env = "TEST_NODE_ENV"; let node_init_role_env = "full-node"; let node_init_subnet_env = "topos-env"; let node_init_home_cli = tmp_home_dir_cli.to_str().unwrap(); let node_edge_path_cli = node_edge_path_env.clone(); let node_init_name_cli = "TEST_NODE_CLI"; let node_init_role_cli = "sequencer"; let node_init_subnet_cli = "topos-cli"; let mut cmd = Command::cargo_bin("topos")?; cmd.arg("node") .env("TOPOS_POLYGON_EDGE_BIN_PATH", &node_edge_path_env) .env("TOPOS_HOME", node_init_home_env) .env("TOPOS_NODE_NAME", node_init_name_env) .env("TOPOS_NODE_ROLE", node_init_role_env) .env("TOPOS_NODE_SUBNET", node_init_subnet_env) .arg("--edge-path") .arg(node_edge_path_cli) .arg("init") .arg("--name") .arg(node_init_name_cli) .arg("--home") .arg(node_init_home_cli) .arg("--role") .arg(node_init_role_cli) .arg("--subnet") .arg(node_init_subnet_cli); let output = cmd.assert().success(); let result: &str = std::str::from_utf8(&output.get_output().stdout)?; assert!(result.contains("Created node config file")); let home = PathBuf::from(node_init_home_cli); // Verification: check that the config file was created let config_path = home .join("node") .join(node_init_name_cli) .join("config.toml"); assert!(config_path.exists()); // Check if config file params are according to cli params let config_contents = std::fs::read_to_string(&config_path).unwrap(); assert!(config_contents.contains("name = \"TEST_NODE_CLI\"")); assert!(config_contents.contains("role = \"sequencer\"")); assert!(config_contents.contains("subnet = \"topos-cli\"")); Ok(()) } /// Test node up running from config file #[rstest] #[test_log::test(tokio::test)] async fn command_node_up( #[from(create_folder)] tmp_home_dir: PathBuf, ) -> Result<(), Box> { // Create config file let node_up_home_env = tmp_home_dir.to_str().unwrap(); let node_edge_path_env = setup_polygon_edge(node_up_home_env).await; let node_up_name_env = "TEST_NODE_UP"; let node_up_role_env = "full-node"; let node_up_subnet_env = "topos"; let mut cmd = Command::cargo_bin("topos")?; cmd.arg("node") .env("TOPOS_POLYGON_EDGE_BIN_PATH", &node_edge_path_env) .env("TOPOS_HOME", node_up_home_env) .env("TOPOS_NODE_NAME", node_up_name_env) .env("TOPOS_NODE_ROLE", node_up_role_env) .env("TOPOS_NODE_SUBNET", node_up_subnet_env) .arg("init"); let output = cmd.assert().success(); let result: &str = std::str::from_utf8(&output.get_output().stdout)?; assert!(result.contains("Created node config file")); // Run node init with cli flags let home = PathBuf::from(node_up_home_env); // Verification: check that the config file was created let config_path = home.join("node").join(node_up_name_env).join("config.toml"); assert!(config_path.exists()); // Generate polygon edge genesis file let polygon_edge_bin = format!("{}/polygon-edge", node_edge_path_env); utils::generate_polygon_edge_genesis_file( &polygon_edge_bin, node_up_home_env, node_up_name_env, node_up_subnet_env, ) .await?; let polygon_edge_genesis_path = home .join("subnet") .join(node_up_subnet_env) .join("genesis.json"); assert!(polygon_edge_genesis_path.exists()); let mut cmd = Command::cargo_bin("topos")?; cmd.arg("node") .env("TOPOS_POLYGON_EDGE_BIN_PATH", &node_edge_path_env) .env("TOPOS_HOME", node_up_home_env) .env("TOPOS_NODE_NAME", node_up_name_env) .env("RUST_LOG", "topos=debug") .arg("up") .stdout(Stdio::piped()); let cmd = tokio::process::Command::from(cmd).spawn().unwrap(); let pid = cmd.id().unwrap(); let _ = tokio::time::sleep(std::time::Duration::from_secs(10)).await; let s = System::new_all(); if let Some(process) = s.process(Pid::from_u32(pid)) { if process.kill_with(Signal::Term).is_none() { eprintln!("This signal isn't supported on this platform"); } } if let Ok(output) = cmd.wait_with_output().await { assert!(output.status.success()); let stdout = output.stdout; let stdout = String::from_utf8_lossy(&stdout); let reg = Regex::new(r#"Local node is listening on "\/ip4\/.*\/tcp\/9090\/p2p\/"#).unwrap(); assert!(reg.is_match(&stdout)); } else { panic!("Failed to shutdown gracefully"); } // Cleanup std::fs::remove_dir_all(node_up_home_env)?; Ok(()) } /// Test node up running from config file #[rstest::rstest] #[test_log::test(tokio::test)] async fn command_node_up_with_old_config( #[from(create_folder)] tmp_home_dir: PathBuf, ) -> Result<(), Box> { // Create config file let node_up_home_env = tmp_home_dir.to_str().unwrap(); let node_edge_path_env = setup_polygon_edge(node_up_home_env).await; let node_up_name_env = "test_node_up_old_config"; let node_up_subnet_env = "topos"; let mut cmd = Command::cargo_bin("topos")?; cmd.arg("node") .env("TOPOS_POLYGON_EDGE_BIN_PATH", &node_edge_path_env) .env("TOPOS_HOME", node_up_home_env) .env("TOPOS_NODE_NAME", node_up_name_env) .env("TOPOS_NODE_SUBNET", node_up_subnet_env) .arg("init"); let output = cmd.assert().success(); let result: &str = std::str::from_utf8(&output.get_output().stdout)?; assert!(result.contains("Created node config file")); // Run node init with cli flags let home = PathBuf::from(node_up_home_env); // Verification: check that the config file was created let config_path = home.join("node").join(node_up_name_env).join("config.toml"); assert!(config_path.exists()); let mut file = OpenOptions::new() .read(true) .write(true) .open(config_path.clone()) .await?; let mut buf = String::new(); let _ = file.read_to_string(&mut buf).await?; let mut current: Map = toml::from_str(&buf)?; let tce = current.get_mut("tce").unwrap(); if let Value::Table(tce_table) = tce { tce_table.insert( "libp2p-api-addr".to_string(), Value::String("0.0.0.0:9091".to_string()), ); tce_table.insert("minimum-tce-cluster-size".to_string(), Value::Integer(0)); tce_table.insert("network-bootstrap-timeout".to_string(), Value::Integer(5)); tce_table.remove("p2p"); } else { panic!("TCE configuration table malformed"); } let _ = file.set_len(0).await; let _ = file.seek(std::io::SeekFrom::Start(0)).await; let _ = file.write_all(toml::to_string(¤t)?.as_bytes()).await; drop(file); // Generate polygon edge genesis file let polygon_edge_bin = format!("{}/polygon-edge", node_edge_path_env); utils::generate_polygon_edge_genesis_file( &polygon_edge_bin, node_up_home_env, node_up_name_env, node_up_subnet_env, ) .await?; let polygon_edge_genesis_path = home .join("subnet") .join(node_up_subnet_env) .join("genesis.json"); assert!(polygon_edge_genesis_path.exists()); let mut cmd = Command::cargo_bin("topos")?; cmd.arg("node") .env("TOPOS_POLYGON_EDGE_BIN_PATH", &node_edge_path_env) .env("TOPOS_NODE_NAME", node_up_name_env) .env("TOPOS_HOME", node_up_home_env) .env("RUST_LOG", "topos=info") .arg("up") .stdout(Stdio::piped()); let cmd = tokio::process::Command::from(cmd).spawn().unwrap(); let pid = cmd.id().unwrap(); let _ = tokio::time::sleep(std::time::Duration::from_secs(10)).await; let s = System::new_all(); if let Some(process) = s.process(Pid::from_u32(pid)) { if process.kill_with(Signal::Term).is_none() { eprintln!("This signal isn't supported on this platform"); } } if let Ok(output) = cmd.wait_with_output().await { assert!(output.status.success()); let stdout = output.stdout; let stdout = String::from_utf8_lossy(&stdout); let reg = Regex::new(r#"Local node is listening on "\/ip4\/.*\/tcp\/9091\/p2p\/"#).unwrap(); assert!(reg.is_match(&stdout)); } else { panic!("Failed to shutdown gracefully"); } // Cleanup std::fs::remove_dir_all(node_up_home_env)?; Ok(()) } } ================================================ FILE: crates/topos/tests/node.rs ================================================ mod utils; use std::{path::PathBuf, process::Command}; use assert_cmd::prelude::*; use sysinfo::{Pid, PidExt, ProcessExt, Signal, System, SystemExt}; use tempfile::tempdir; use crate::utils::generate_polygon_edge_genesis_file; #[test] fn help_display() -> Result<(), Box> { let mut cmd = Command::cargo_bin("topos")?; cmd.arg("node").arg("-h"); let output = cmd.assert().success(); let result: &str = std::str::from_utf8(&output.get_output().stdout)?; insta::assert_snapshot!(utils::sanitize_config_folder_path(result)); Ok(()) } mod serial_integration { use super::*; /// Test node up running from config file #[test_log::test(tokio::test)] async fn command_node_up_sigterm() -> Result<(), Box> { let tmp_home_dir = tempdir()?; // Create config file let node_up_home_env = tmp_home_dir.path().to_str().unwrap(); let node_edge_path_env = utils::setup_polygon_edge(node_up_home_env).await; let node_up_name_env = "TEST_NODE_UP"; let node_up_role_env = "full-node"; let node_up_subnet_env = "topos-up-env-subnet"; let mut cmd = Command::cargo_bin("topos")?; cmd.arg("node") .env("TOPOS_POLYGON_EDGE_BIN_PATH", &node_edge_path_env) .env("TOPOS_HOME", node_up_home_env) .env("TOPOS_NODE_NAME", node_up_name_env) .env("TOPOS_NODE_ROLE", node_up_role_env) .env("TOPOS_NODE_SUBNET", node_up_subnet_env) .arg("init"); let output = cmd.assert().success(); let result: &str = std::str::from_utf8(&output.get_output().stdout)?; assert!(result.contains("Created node config file")); // Run node init with cli flags let home = PathBuf::from(node_up_home_env); // Verification: check that the config file was created let config_path = home.join("node").join(node_up_name_env).join("config.toml"); assert!(config_path.exists()); // Generate polygon edge genesis file let polygon_edge_bin = format!("{}/polygon-edge", node_edge_path_env); generate_polygon_edge_genesis_file( &polygon_edge_bin, node_up_home_env, node_up_name_env, node_up_subnet_env, ) .await?; let polygon_edge_genesis_path = home .join("subnet") .join(node_up_subnet_env) .join("genesis.json"); assert!(polygon_edge_genesis_path.exists()); let mut cmd = Command::cargo_bin("topos")?; cmd.arg("node") .env("TOPOS_POLYGON_EDGE_BIN_PATH", &node_edge_path_env) .env("TOPOS_HOME", node_up_home_env) .env("TOPOS_NODE_NAME", node_up_name_env) .arg("up"); let mut cmd = tokio::process::Command::from(cmd).spawn().unwrap(); let pid = cmd.id().unwrap(); let _ = tokio::time::sleep(std::time::Duration::from_secs(10)).await; let s = System::new_all(); if let Some(process) = s.process(Pid::from_u32(pid)) { if process.kill_with(Signal::Term).is_none() { eprintln!("This signal isn't supported on this platform"); } } if let Ok(code) = cmd.wait().await { assert!(code.success()); } else { panic!("Failed to shutdown gracefully"); } // Cleanup std::fs::remove_dir_all(node_up_home_env)?; Ok(()) } #[test_log::test(tokio::test)] async fn command_node_up_custom_polygon() -> Result<(), Box> { let tmp_home_dir = tempdir()?; // Create config file let node_up_home_env = tmp_home_dir.path().to_str().unwrap(); let custom_path = tmp_home_dir.path().join("custom_path"); let node_edge_path_env = utils::setup_polygon_edge(custom_path.to_str().unwrap()).await; let node_up_name_env = "TEST_NODE_UP"; let node_up_role_env = "full-node"; let node_up_subnet_env = "topos-up-env-subnet"; let mut cmd = Command::cargo_bin("topos")?; cmd.arg("node") .env("TOPOS_POLYGON_EDGE_BIN_PATH", &node_edge_path_env) .env("TOPOS_HOME", node_up_home_env) .env("TOPOS_NODE_NAME", node_up_name_env) .env("TOPOS_NODE_ROLE", node_up_role_env) .env("TOPOS_NODE_SUBNET", node_up_subnet_env) .arg("init"); let output = cmd.assert().success(); let result: &str = std::str::from_utf8(&output.get_output().stdout)?; assert!(result.contains("Created node config file")); // Run node init with cli flags let home = PathBuf::from(node_up_home_env); // Verification: check that the config file was created let config_path = home.join("node").join(node_up_name_env).join("config.toml"); assert!(config_path.exists()); // Generate polygon edge genesis file let polygon_edge_bin = format!("{}/polygon-edge", node_edge_path_env); generate_polygon_edge_genesis_file( &polygon_edge_bin, node_up_home_env, node_up_name_env, node_up_subnet_env, ) .await?; let polygon_edge_genesis_path = home .join("subnet") .join(node_up_subnet_env) .join("genesis.json"); assert!(polygon_edge_genesis_path.exists()); let mut cmd = Command::cargo_bin("topos")?; cmd.arg("node") .env("TOPOS_POLYGON_EDGE_BIN_PATH", &node_edge_path_env) .env("TOPOS_HOME", node_up_home_env) .env("TOPOS_NODE_NAME", node_up_name_env) .arg("up"); let mut cmd = tokio::process::Command::from(cmd).spawn().unwrap(); let pid = cmd.id().unwrap(); let _ = tokio::time::sleep(std::time::Duration::from_secs(10)).await; let s = System::new_all(); if let Some(process) = s.process(Pid::from_u32(pid)) { if process.kill_with(Signal::Term).is_none() { eprintln!("This signal isn't supported on this platform"); } } if let Ok(code) = cmd.wait().await { assert!(code.success()); } else { panic!("Failed to shutdown gracefully"); } // Cleanup std::fs::remove_dir_all(node_up_home_env)?; Ok(()) } } ================================================ FILE: crates/topos/tests/regtest.rs ================================================ mod utils; use std::process::Command; use assert_cmd::prelude::*; #[test] fn regtest_spam_help_display() -> Result<(), Box> { let mut cmd = Command::cargo_bin("topos")?; cmd.arg("regtest").arg("spam").arg("-h"); let output = cmd.assert().success(); let result: &str = std::str::from_utf8(&output.get_output().stdout)?; insta::assert_snapshot!(utils::sanitize_config_folder_path(result)); Ok(()) } #[test] fn regtest_spam_invalid_hosts() -> Result<(), Box> { let mut cmd = Command::cargo_bin("topos")?; cmd.arg("regtest") .arg("spam") .arg("--benchmark") .arg("--target-hosts") .arg("asd") .arg("--number") .arg("1"); let output = cmd.assert().failure(); let result: &str = std::str::from_utf8(&output.get_output().stdout)?; assert!(result.contains( "Invalid target-hosts pattern. Has to be in the format of http://validator-1:9090" )); Ok(()) } #[test] fn regtest_spam_invalid_number() -> Result<(), Box> { let mut cmd = Command::cargo_bin("topos")?; cmd.arg("regtest") .arg("spam") .arg("--benchmark") .arg("--target-hosts") .arg(" http://validator-{N}:9090") .arg("--number") .arg("dasd"); cmd.assert().failure(); Ok(()) } ================================================ FILE: crates/topos/tests/setup.rs ================================================ mod utils; use std::{fs, process::Command}; use assert_cmd::prelude::*; use tempfile::tempdir; #[test] fn setup_subnet_install_edge() -> Result<(), Box> { let tmp_home_dir = tempdir()?; let mut cmd = Command::cargo_bin("topos")?; cmd.arg("setup") .arg("subnet") .arg("--path") .arg(tmp_home_dir.path()); let output = cmd.assert().success(); let result: &str = std::str::from_utf8(&output.get_output().stdout)?; assert!(result.contains("Polygon Edge installation successful")); Ok(()) } #[test] fn setup_with_no_arguments() -> Result<(), Box> { let mut cmd = Command::cargo_bin("topos")?; cmd.arg("setup"); let output = cmd.assert().failure(); let result: &str = std::str::from_utf8(&output.get_output().stderr)?; assert!(result .contains("No subcommand provided. You can use `--help` to see available subcommands.")); Ok(()) } #[test] fn setup_subnet_fail_to_install_release() -> Result<(), Box> { let tmp_home_dir = tempdir()?; let mut cmd = Command::cargo_bin("topos")?; cmd.arg("setup") .arg("subnet") .arg("--path") .arg(tmp_home_dir.path()) .arg("--release") .arg("invalid"); let output = cmd.assert().failure(); let result: &str = std::str::from_utf8(&output.get_output().stderr)?; assert!(result.contains( "Error installing Polygon Edge: There is no valid Polygon Edge release available" )); Ok(()) } #[test] fn setup_subnet_install_edge_custom_path() -> Result<(), Box> { let tmp_home_dir = tempdir()?; let custom_path = tmp_home_dir.path().join("custom_path"); fs::create_dir(&custom_path).unwrap(); let mut cmd = Command::cargo_bin("topos")?; cmd.arg("setup") .arg("subnet") .arg("--path") .arg(&custom_path); let output = cmd.assert().success(); let result: &str = std::str::from_utf8(&output.get_output().stdout)?; assert!(result.contains("Polygon Edge installation successful")); let file = fs::read_dir(&custom_path) .unwrap() .filter_map(|x| match x.ok() { Some(f) if f.path().ends_with("polygon-edge") => Some(f), _ => None, }) .last() .unwrap(); assert!(file.path().starts_with(&custom_path)); Ok(()) } #[test] fn setup_subnet_install_edge_custom_path_env() -> Result<(), Box> { let tmp_home_dir = tempdir()?; let custom_path = tmp_home_dir.path().join("custom_path"); fs::create_dir(&custom_path).unwrap(); let mut cmd = Command::cargo_bin("topos")?; cmd.env("TOPOS_SETUP_POLYGON_EDGE_DIR", &custom_path) .arg("setup") .arg("subnet"); let output = cmd.assert().success(); let result: &str = std::str::from_utf8(&output.get_output().stdout)?; assert!(result.contains("Polygon Edge installation successful")); let file = fs::read_dir(&custom_path) .unwrap() .filter_map(|x| match x.ok() { Some(f) if f.path().ends_with("polygon-edge") => Some(f), _ => None, }) .last() .unwrap(); assert!(file.path().starts_with(&custom_path)); Ok(()) } ================================================ FILE: crates/topos/tests/snapshots/node__help_display.snap ================================================ --- source: crates/topos/tests/node.rs expression: "utils::sanitize_config_folder_path(result)" --- Utility to manage your nodes in the Topos network Usage: topos node [OPTIONS] [COMMAND] Commands: up Spawn your node init Setup your node status Get node status help Print this message or the help of the given subcommand(s) Options: --edge-path Installation directory path for Polygon Edge binary [env: TOPOS_POLYGON_EDGE_BIN_PATH=] [default: .] -v, --verbose... Defines the verbosity level --no-color Disable color in logs [env: TOPOS_LOG_NOCOLOR=] --home Home directory for the configuration [env: TOPOS_HOME=] [default: /home/runner/.config/topos] -h, --help Print help ================================================ FILE: crates/topos/tests/snapshots/push_certificate__help_display.snap ================================================ --- source: crates/topos/tests/push-certificate.rs expression: "utils::sanitize_config_folder_path(result)" --- Push a certificate to a TCE process Usage: topos regtest push-certificate [OPTIONS] Options: -f, --format [default: plain] [possible values: json, plain] -v, --verbose... Defines the verbosity level --no-color Disable color in logs [env: TOPOS_LOG_NOCOLOR=] -t, --timeout Global timeout for the command [default: 60] --home Home directory for the configuration [env: TOPOS_HOME=] [default: /home/runner/.config/topos] --timeout-broadcast Seconds to wait before asserting the broadcast [default: 30] -n, --nodes The node list to be used, can be a file path or a comma separated list of Uri. If not provided, stdin is listened [env: TARGET_NODES_PATH=] -h, --help Print help ================================================ FILE: crates/topos/tests/snapshots/regtest__regtest_spam_help_display.snap ================================================ --- source: crates/topos/tests/regtest.rs expression: "utils::sanitize_config_folder_path(result)" --- Run a test topos certificate spammer to send test certificates to the network, generating randomly among the `nb_subnets` subnets the batch of `cert_per_batch` certificates at every `batch-interval` Usage: topos regtest spam [OPTIONS] Options: --target-nodes The target node api endpoint. Multiple nodes could be specified as comma separated list e.g. `--target-nodes=http://[::1]:1340,http://[::1]:1341` [env: TOPOS_NETWORK_SPAMMER_TARGET_NODES=] -v, --verbose... Defines the verbosity level --no-color Disable color in logs [env: TOPOS_LOG_NOCOLOR=] --target-nodes-path Path to json file with list of target nodes as alternative to `--target-nodes` [env: TOPOS_NETWORK_SPAMMER_TARGET_NODES_PATH=] --home Home directory for the configuration [env: TOPOS_HOME=] [default: /home/runner/.config/topos] --local-key-seed Seed for generation of local private signing keys and corresponding subnet ids [env: TOPOS_NETWORK_SPAMMER_LOCAL_KEY_SEED=] [default: 1] --cert-per-batch Certificates generated in one batch. Batch is generated every `batch-interval` milliseconds [env: TOPOS_NETWORK_SPAMMER_CERT_PER_BATCH=] [default: 1] --nb-subnets Number of subnets to use for certificate generation. For every certificate subnet id will be picked randomly [env: TOPOS_NETWORK_SPAMMER_NUMBER_OF_SUBNETS=] [default: 1] --nb-batches Number of batches to generate before finishing execution. If not specified, batches will be generated indefinitely [env: TOPOS_NETWORK_SPAMMER_NUMBER_OF_BATCHES=] --batch-interval Time interval in milliseconds between generated batches of certificates [env: TOPOS_NETWORK_SPAMMER_BATCH_INTERVAL=] [default: 2000] --target-subnets List of generated certificate target subnets. No target subnets by default. For example `--target-subnets=0x3bc19e36ff1673910575b6727a974a9abd80c9a875d41ab3e2648dbfb9e4b518,0xa00d60b2b408c2a14c5d70cdd2c205db8985ef737a7e55ad20ea32cc9e7c417c` [env: TOPOS_NETWORK_SPAMMER_TARGET_SUBNETS=] --otlp-agent Socket of the opentelemetry agent endpoint. If not provided open telemetry will not be used [env: TOPOS_OTLP_AGENT=] --otlp-service-name Otlp service name. If not provided open telemetry will not be used [env: TOPOS_OTLP_SERVICE_NAME=] --benchmark Flag to indicate usage of Kubernetes [env: TOPOS_NETWORK_SPAMMER_BENCHMARK=] --target-hosts Template for generating target node entrypoints. e.g. `--hosts="http://validator-{N}:1340"` [env: TOPOS_NETWORK_SPAMMER_TARGET_HOSTS=] --number Number of nodes to generate based on the DNS template [env: TOPOS_NETWORK_SPAMMER_NUMBER=] -h, --help Print help ================================================ FILE: crates/topos/tests/utils.rs ================================================ use assert_cmd::prelude::*; use predicates::prelude::*; use regex::Regex; use std::path::PathBuf; use std::process::Command; use topos::install_polygon_edge; // Have to allow dead_code because clippy doesn't recognize it is being used in the tests #[cfg(test)] #[allow(dead_code)] pub fn sanitize_config_folder_path(cmd_out: &str) -> String { // Sanitize the result here: // When run locally, we get /Users//.config/topos // When testing on the CI, we get /home/runner/.config/topos let pattern = Regex::new(r"\[default: .+?/.config/topos\]").unwrap(); pattern .replace(cmd_out, "[default: /home/runner/.config/topos]") .to_string() } // Have to allow dead_code because clippy doesn't recognize it is being used in the tests #[allow(dead_code)] pub async fn setup_polygon_edge(path: &str) -> String { let installation_path = std::env::current_dir().unwrap().join(path); let binary_path = installation_path.join("polygon-edge"); if !binary_path.exists() { std::fs::create_dir_all(installation_path.clone()) .expect("Cannot create test binary folder"); install_polygon_edge( "topos-protocol/polygon-edge".to_string(), None, installation_path.clone().as_path(), ) .await .expect("Cannot install Polygon Edge binary"); } installation_path.to_str().unwrap().to_string() } // Have to allow dead_code because clippy doesn't recognize it is being used in the tests #[allow(dead_code)] pub async fn generate_polygon_edge_genesis_file( polygon_edge_bin: &str, home_path: &str, node_name: &str, subnet: &str, ) -> Result<(), Box> { let genesis_folder_path: PathBuf = PathBuf::from(format!("{}/subnet/{}", home_path, subnet)); if !genesis_folder_path.exists() { std::fs::create_dir_all(genesis_folder_path.clone()) .expect("Cannot create subnet genesis file folder"); } let genesis_file_path = format!("{}/genesis.json", genesis_folder_path.display()); println!("Polygon edge path: {}", polygon_edge_bin); let mut cmd = Command::new(polygon_edge_bin); let val_prefix_path = format!("{}/node/{}/", home_path, node_name); cmd.arg("genesis") .arg("--dir") .arg(&genesis_file_path) .arg("--consensus") .arg("ibft") .arg("--ibft-validators-prefix-path") .arg(val_prefix_path) .arg("--bootnode") /* set dummy bootnode, we will not run edge to produce blocks */ .arg("/ip4/127.0.0.1/tcp/8545/p2p/16Uiu2HAmNYneHCbJ1Ntz1ojvTdiNGCMGWNT5MGMH28AzKNV66Paa"); cmd.assert() .success() .stdout(predicate::str::contains(format!( "Genesis written to {}", genesis_folder_path.display() ))); Ok(()) } ================================================ FILE: crates/topos-certificate-spammer/Cargo.toml ================================================ [package] name = "topos-certificate-spammer" version = "0.1.0" edition = "2021" [lints] workspace = true [dependencies] clap.workspace = true tokio-stream.workspace = true rand_distr.workspace = true rand_core.workspace = true rand.workspace = true futures.workspace = true tokio.workspace = true serde = { workspace = true, features = ["derive"] } serde_json.workspace = true hex.workspace = true thiserror.workspace = true tracing.workspace = true tracing-opentelemetry.workspace = true opentelemetry.workspace = true tiny-keccak.workspace = true uuid.workspace = true lazy_static.workspace = true http.workspace = true toml = "0.5.9" topos-core.workspace = true topos-tce-proxy = { path = "../topos-tce-proxy"} topos-crypto = {path = "../topos-crypto"} ================================================ FILE: crates/topos-certificate-spammer/README.md ================================================ # Topos Certificate Spammer ## How does it work? The Topos Certificate Spammer generates test certificate chain and sends them to one or more target nodes, specified with parameter `--target-nodes`, e.g. `--target-nodes=http://[::1]:1340,http://[::1]:1341`. Every `--batch-interval`, a batch of certificates is generated and sent following the `--certs-per-batch` argument. Source subnet id and list of target subnets are randomly assigned to every certificate. When argument `--nb-batches` is specified, program will send specified number of batches and the command will gracefully shut down connections and exit. When unspecified, it will continuously generate and send batches of certificates. The time delay in milliseconds between two batches of two certificates is set with `--batch-interval`. Certificates are signed with secp256k1 private key, and seed for generation of `nb-subnets` private keys is infuenced by `--local-key-seed`. The dispatching of Certificate is done through the TCE service gRPC API. ## Commands To compile from the root `topos` workspace directory: ``` cargo build --release ``` The extended list of commands: ``` topos network spam -h ``` ## Example Continuously spam local tce node `http://[::1]:1340` with batch of 1 certificate every 2 seconds. Certificate target subnet list is empty: ``` topos network spam ``` Spam two tce target nodes with 3 batches (every batch containing 5 certificate with 2 possible source subnet id), also specifying two possible target subnets for every generated certificate: ``` topos network spam --nb-subnets=2 --cert-per-batch=5 --nb-batches=3 --target-nodes=http://[::1]:1340,http://[::1]:1341 --target-subnets=0x3bc19e36ff1673910575b6727a974a9abd80c9a875d41ab3e2648dbfb9e4b518,0xa00d60b2b408c2a14c5d70cdd2c205db8985ef737a7e55ad20ea32cc9e7c417c ``` Alternatively environment variables could be used instead of command line arguments to configure Topos Certificate Spammer: ``` TOPOS_NETWORK_SPAMMER_NUMBER_OF_CERTIFICATES TOPOS_NETWORK_SPAMMER_TARGET_NODES TOPOS_NETWORK_SPAMMER_SIGNING_KEY TOPOS_NETWORK_SPAMMER_INTERVAL TOPOS_NETWORK_SPAMMER_TARGET_SUBNETS ``` ## Instrumentation By specifying `--otlp-agent` and `--otlp-service-name` cli options, instrumentation event `NewTestCertificate` will be observable from Otlp/Jaeger. ================================================ FILE: crates/topos-certificate-spammer/config/target_nodes_example.json ================================================ { "nodes": [ "http://[::1]:1340", "http://[::1]:1341" ] } ================================================ FILE: crates/topos-certificate-spammer/src/config.rs ================================================ #[derive(Clone, Debug)] pub struct CertificateSpammerConfig { pub target_nodes: Option>, pub target_nodes_path: Option, pub local_key_seed: u64, pub cert_per_batch: u64, pub nb_subnets: u8, pub nb_batches: Option, pub batch_interval: u64, pub target_subnets: Option>, pub benchmark: bool, pub target_hosts: Option, pub number: Option, } ================================================ FILE: crates/topos-certificate-spammer/src/error.rs ================================================ #[derive(Debug, thiserror::Error)] pub enum Error { #[error("target nodes are not specified")] TargetNodesNotSpecified, #[error("error reading target nodes json file:{0}")] ReadingTargetNodesJsonFile(String), #[error("error parsing target nodes json file:{0}")] InvalidTargetNodesJsonFile(String), #[error("invalid subnet id error: {0}")] InvalidSubnetId(String), #[error("hex conversion error {0}")] HexConversion(hex::FromHexError), #[error("invalid signing key: {0}")] InvalidSigningKey(String), #[error("Tce node connection error {0}")] TCENodeConnection(topos_tce_proxy::Error), #[error("Certificate signing error: {0}")] CertificateSigning(topos_core::uci::Error), #[error("BenchmkarkConfigError config error: {0}")] BenchmarkConfig(String), } ================================================ FILE: crates/topos-certificate-spammer/src/lib.rs ================================================ //! Utility to spam dummy certificates use http::Uri; use opentelemetry::trace::FutureExt; use serde::Deserialize; use std::collections::HashMap; use std::fmt::Debug; use std::sync::Arc; use tokio::sync::{mpsc, oneshot, Mutex}; use tokio::time::{self, Duration}; use tokio_stream::StreamExt; use topos_core::uci::*; use topos_tce_proxy::client::{TceClient, TceClientBuilder}; use tracing::{debug, error, info, info_span, Instrument, Span}; use tracing_opentelemetry::OpenTelemetrySpanExt; mod config; pub mod error; mod utils; use error::Error; use crate::utils::{generate_source_subnets, generate_test_certificate}; pub use config::CertificateSpammerConfig; type NodeApiAddress = String; #[derive(Deserialize)] struct FileNodes { nodes: Vec, } /// Represents connection from one sequencer to a TCE node /// Multiple different subnets could be connected to the same TCE node address (represented with TargetNodeConnection with different SubnetId and created client) /// Multiple topos-sequencers from the same subnet could be connected to the same TCE node address (so they would have same SubnetID, but different client instances) struct TargetNodeConnection { address: NodeApiAddress, client: Arc>, shutdown: mpsc::Sender>, source_subnet: SourceSubnet, } #[derive(Debug, Clone)] pub struct SourceSubnet { signing_key: [u8; 32], source_subnet_id: SubnetId, last_certificate_id: CertificateId, } impl TargetNodeConnection { pub async fn shutdown(&mut self) -> Result<(), Box> { let (sender, receiver) = oneshot::channel(); self.shutdown.send(sender).await?; receiver.await?; Ok(()) } } async fn open_target_node_connection( nodes: &[String], source_subnet: &SourceSubnet, ) -> Result, Error> { let mut target_node_connections: Vec = Vec::new(); for tce_address in nodes { info!( "Opening client for tce service {}, source subnet id: {}", &tce_address, &source_subnet.source_subnet_id ); let (tce_client_shutdown_channel, shutdown_receiver) = mpsc::channel::>(1); let (tce_client, mut receiving_certificate_stream) = match TceClientBuilder::default() .set_subnet_id(source_subnet.source_subnet_id) .set_tce_endpoint(tce_address) .build_and_launch(shutdown_receiver) .await { Ok(value) => value, Err(e) => { error!( "Unable to create TCE client for node {}: {}", &tce_address, e ); return Err(Error::TCENodeConnection(e)); } }; match tce_client.open_stream(Vec::new()).await { Ok(_) => {} Err(e) => { error!("Unable to connect to node {}: {}", &tce_address, e); return Err(Error::TCENodeConnection(e)); } } let (shutdown_channel, mut shutdown_receiver) = mpsc::channel::>(1); let client = Arc::new(Mutex::new(tce_client)); { let source_subnet_id = source_subnet.source_subnet_id; let tce_address = tce_address.clone(); tokio::spawn(async move { loop { // process certificates received from the TCE node tokio::select! { Some((cert, position)) = receiving_certificate_stream.next() => { info!("Delivered certificate from tce address: {} for subnet id: {} cert id {}, position {:?}", &tce_address, &source_subnet_id, &cert.id, position); }, Some(sender) = shutdown_receiver.recv() => { info!("Shutting down client for tce address: {} for subnet id: {}", &tce_address, &source_subnet_id); let (killer, waiter) = oneshot::channel::<()>(); tce_client_shutdown_channel.send(killer).await.unwrap(); waiter.await.unwrap(); info!("Finishing watch certificates task..."); _ = sender.send(()); // Finish this task listener break; } } } }); } target_node_connections.push(TargetNodeConnection { address: tce_address.clone(), client, shutdown: shutdown_channel, source_subnet: source_subnet.clone(), }); } Ok(target_node_connections) } async fn close_target_node_connections( target_node_connections: HashMap>, ) { for mut target_node in target_node_connections .into_iter() .flat_map(|(_, connections)| connections) .collect::>() { info!("Closing connection to target node {}", target_node.address); if let Err(e) = target_node.shutdown().await { error!("Failed to close stream with {}: {e}", target_node.address); } } } /// Submit the certificate to the TCE node async fn submit_cert_to_tce(node: &TargetNodeConnection, cert: Certificate) { let client = node.client.clone(); let span = Span::current(); span.record("certificate_id", cert.id.to_string()); span.record("source_subnet_id", cert.source_subnet_id.to_string()); let mut tce_client = client.lock().await; send_new_certificate(&mut tce_client, cert) .with_context(span.context()) .instrument(span) .await } async fn send_new_certificate(tce_client: &mut TceClient, cert: Certificate) { if let Err(e) = tce_client .send_certificate(cert) .with_current_context() .instrument(Span::current()) .await { error!("Failed to send the Certificate to the TCE client: {}", e); } } async fn dispatch(cert: Certificate, target_node: &TargetNodeConnection) { info!( "Sending cert id={:?} prev_cert_id= {:?} subnet_id={:?} to tce node {}", &cert.id, &cert.prev_id, &cert.source_subnet_id, target_node.address ); submit_cert_to_tce(target_node, cert).await } pub async fn run( args: CertificateSpammerConfig, mut shutdown: mpsc::Receiver>, ) -> Result<(), Error> { // Is list of nodes is specified in the command line use them otherwise use // config file provided nodes let target_nodes = if args.benchmark { if let (Some(target_hosts), Some(number)) = (args.target_hosts, args.number) { let uri = target_hosts .replace("{N}", &0.to_string()) .parse::() .map_err(|e| Error::BenchmarkConfig(e.to_string()))?; if uri.host().is_none() || uri.path().is_empty() || uri.port_u16().is_none() { return Err(Error::BenchmarkConfig( "Invalid target-hosts pattern. Has to be in the format of http://validator-1:9090" .into(), )); } (0..number) .map(|n| target_hosts.replace("{N}", &n.to_string())) .collect::>() } else { return Err(Error::BenchmarkConfig( "The --benchmark flag needs the following two additional flags being passed to it:\n--target-hosts http://validator-{N}\n--number 10".into(), )); } } else if let Some(nodes) = args.target_nodes { nodes } else if let Some(target_nodes_path) = args.target_nodes_path { let json_str = std::fs::read_to_string(target_nodes_path) .map_err(|e| Error::ReadingTargetNodesJsonFile(e.to_string()))?; let json: FileNodes = serde_json::from_str(&json_str) .map_err(|e| Error::InvalidTargetNodesJsonFile(e.to_string()))?; json.nodes } else { return Err(Error::TargetNodesNotSpecified); }; // Generate keys for all required subnets (`nb_subnets`) let mut source_subnets = generate_source_subnets(args.local_key_seed, args.nb_subnets)?; info!("Generated source subnets: {source_subnets:#?}"); // Target subnets (randomly assigned to every generated certificate) let target_subnet_ids: Vec = args .target_subnets .iter() .flat_map(|id| { id.iter().map(|id| { let id = hex::decode(&id[2..]).map_err(|e| Error::InvalidSubnetId(e.to_string()))?; TryInto::<[u8; 32]>::try_into(id.as_slice()) .map_err(|e| Error::InvalidSubnetId(e.to_string())) }) }) .map(|id| id.map(SubnetId::from_array)) .collect::>()?; let mut target_node_connections: HashMap> = HashMap::new(); // For every source subnet, open connection to every target node, so we will have // nb_subnets * len(target_nodes) connections for source_subnet in &source_subnets { let connections_for_source_subnet = open_target_node_connection(target_nodes.as_slice(), source_subnet).await?; target_node_connections.insert( source_subnet.source_subnet_id, connections_for_source_subnet, ); } target_node_connections .iter() .flat_map(|(_, connections)| connections) .for_each(|connection| { info!( "Certificate spammer target nodes address: {}, source_subnet_id: {}, target \ subnet ids {:?}", connection.address, connection.source_subnet.source_subnet_id, target_subnet_ids ); }); let number_of_peer_nodes = target_nodes.len(); let mut batch_interval = time::interval(Duration::from_millis(args.batch_interval)); let mut batch_number: u64 = 0; let shutdown_sender = loop { let should_send_batch = tokio::select! { _ = batch_interval.tick() => true, Some(sender) = shutdown.recv() => { info!("Received shutdown signal, stopping certificate spammer"); for (_, connections) in target_node_connections { for mut connection in connections { info!("Closing connection to target node {}", connection.address); _ = connection.shutdown().await; } } break Some(sender); } }; if should_send_batch { // Starting batch, generate cert_per_batch certificates batch_number += 1; let batch_id = uuid::Uuid::new_v4().to_string(); // TODO: Need a better name for this span let span = info_span!( "Batch", batch_id, batch_number, cert_per_batch = args.cert_per_batch, number_of_peer_nodes ); async { info!("Starting batch {batch_number}"); let mut batch: Vec = Vec::new(); // Certificates for this batch for b in 0..args.cert_per_batch { // Randomize source subnet id let source_subnet = &mut source_subnets[rand::random::() % args.nb_subnets as usize]; // Randomize number of target subnets if target subnet list cli argument is provided let target_subnets: Vec = if target_subnet_ids.is_empty() { // Empty list of target subnets in certificate Vec::new() } else { // Generate random list in size of 0..len(target_subnet_ids) as target subnets let number_of_target_subnets = rand::random::() % (target_subnet_ids.len() + 1); let mut target_subnets = Vec::new(); for _ in 0..number_of_target_subnets { target_subnets.push( target_subnet_ids [rand::random::() % target_subnet_ids.len()], ); } target_subnets }; let new_cert = match generate_test_certificate(source_subnet, target_subnets.as_slice()) { Ok(cert) => cert, Err(e) => { error!("Unable to generate certificate: {e}"); continue; } }; debug!("New cert number {b} in batch {batch_number} generated"); batch.push(new_cert); } // Dispatch certs in this batch for cert in batch { // Randomly choose target tce node for every certificate from related source_subnet_id connection list let target_node_connection = &target_node_connections[&cert.source_subnet_id] [rand::random::() % target_nodes.len()]; dispatch(cert, target_node_connection) .instrument(Span::current()) .with_current_context() .await; } } .instrument(span) .await; if let Some(nb_batches) = args.nb_batches { if batch_number >= nb_batches { info!("Generated {nb_batches}, finishing certificate spammer..."); tokio::time::sleep(Duration::from_secs(5)).await; close_target_node_connections(target_node_connections).await; info!("Cert spammer finished"); break None; } } } }; info!("Certificate spammer finished"); if let Some(sender) = shutdown_sender { sender .send(()) .expect("Failed to send shutdown signal from certificate spammer"); } Ok(()) } ================================================ FILE: crates/topos-certificate-spammer/src/utils.rs ================================================ use topos_core::uci::{Certificate, SubnetId}; use crate::{error::Error, SourceSubnet}; lazy_static::lazy_static! { /// Size of the proof static ref PROOF_SIZE_BYTES: usize = std::env::var("TOPOS_PROOF_SIZE_BYTES") .ok() .and_then(|s| s.parse().ok()) .unwrap_or(1000); /// Dummy proof with specified size static ref STARK_BLOB: Vec = (0..*PROOF_SIZE_BYTES) .map(|_| rand::random::()) .collect::>(); } pub fn generate_random_32b_array() -> [u8; 32] { (0..32) .map(|_| rand::random::()) .collect::>() .try_into() .expect("Valid 32 byte array") } /// Generate test certificate pub fn generate_test_certificate( source_subnet: &mut SourceSubnet, target_subnet_ids: &[SubnetId], ) -> Result> { let mut new_cert = Certificate::new( source_subnet.last_certificate_id, source_subnet.source_subnet_id, generate_random_32b_array(), generate_random_32b_array(), generate_random_32b_array(), target_subnet_ids, 0, STARK_BLOB.clone(), )?; new_cert .update_signature(&source_subnet.signing_key) .map_err(Error::CertificateSigning)?; source_subnet.last_certificate_id = new_cert.id; Ok(new_cert) } pub fn generate_source_subnets( local_key_seed: u64, number_of_subnets: u8, ) -> Result, Error> { let mut subnets = Vec::new(); let mut signing_key = [0u8; 32]; let (_, right) = signing_key.split_at_mut(24); right.copy_from_slice(local_key_seed.to_be_bytes().as_slice()); for _ in 0..number_of_subnets { signing_key = tiny_keccak::keccak256(&signing_key); // Subnet id of the source subnet which will be used for every generated certificate let source_subnet_id: SubnetId = topos_crypto::keys::derive_public_key(&signing_key) .map_err(|e| Error::InvalidSigningKey(e.to_string()))? .as_slice()[1..33] .try_into() .map_err(|_| Error::InvalidSubnetId("Unable to parse subnet id".to_string()))?; subnets.push(SourceSubnet { signing_key, source_subnet_id, last_certificate_id: Default::default(), }); } Ok(subnets) } ================================================ FILE: crates/topos-clock/Cargo.toml ================================================ [package] name = "topos-clock" version = "0.1.0" edition = "2021" [lints] workspace = true [dependencies] tokio.workspace = true futures.workspace = true thiserror.workspace = true chrono = {version = "0.4", default-features = false, features = ["clock"]} tracing.workspace = true ================================================ FILE: crates/topos-clock/src/lib.rs ================================================ //! This crate is responsible for managing the clock pace. //! //! The Clock is responsible of giving informations about Epoch and Delta timing by exposing //! reference to the data but also by broadcasting `EpochChange` events. use std::sync::{atomic::AtomicU64, Arc}; use tokio::sync::broadcast; mod time; pub use time::TimeClock; const BROADCAST_CHANNEL_SIZE: usize = 100; pub trait Clock { /// Compute Epoch/Block numbers and spawn the clock task. fn spawn(self) -> Result, Error>; /// Return a reference to the current block number fn block_ref(&self) -> Arc; /// Return a reference to the current epoch number fn epoch_ref(&self) -> Arc; } #[derive(Clone, Debug, PartialEq, Eq)] pub enum Event { /// Notify an Epoch change with the associated epoch_number EpochChange(u64), } #[derive(Debug, thiserror::Error)] pub enum Error { #[error("Unable to generate spawn date")] SpawnDateFailure, } ================================================ FILE: crates/topos-clock/src/time.rs ================================================ use std::{ sync::{ atomic::{AtomicU64, Ordering}, Arc, }, time::Duration, }; use chrono::{DateTime, Utc}; use tokio::{ spawn, sync::broadcast, time::{interval_at, Instant}, }; use crate::{Clock, Error, Event, BROADCAST_CHANNEL_SIZE}; /// Time based clock implementation. /// /// Simulate blockchain block production by increasing block number by 1 every second. /// Epoch duration can be configured when creating the clock. pub struct TimeClock { genesis: DateTime, current_block: Arc, epoch_duration: u64, current_epoch: Arc, } impl Clock for TimeClock { fn spawn(mut self) -> Result, Error> { let (sender, receiver) = broadcast::channel(BROADCAST_CHANNEL_SIZE); self.compute_block(); self.compute_epoch(); spawn(async move { self.run(sender).await; }); Ok(receiver) } fn block_ref(&self) -> Arc { self.current_block.clone() } fn epoch_ref(&self) -> Arc { self.current_epoch.clone() } } impl TimeClock { /// Create a new TimeClock instance based on a genesis datatime and an epoch duration. pub fn new(genesis: DateTime, epoch_duration: u64) -> Result { let mut clock = Self { genesis, current_block: Arc::new(AtomicU64::new(0)), epoch_duration, current_epoch: Arc::new(AtomicU64::new(0)), }; clock.compute_block(); clock.compute_epoch(); Ok(clock) } async fn run(&mut self, sender: broadcast::Sender) { let mut interval = interval_at(Instant::now(), Duration::from_secs(1)); loop { interval.tick().await; let _previous_block = self.current_block.fetch_add(1, Ordering::Relaxed); if self.current_block.load(Ordering::Relaxed) % self.epoch_duration == 0 { self.compute_epoch(); _ = sender.send(Event::EpochChange( self.current_epoch.load(Ordering::Relaxed), )); } } } fn compute_block(&mut self) { let blocks = std::cmp::max( Utc::now() .naive_utc() .signed_duration_since(self.genesis.naive_utc()) .num_seconds(), 0, ) as u64; self.current_block.store(blocks, Ordering::Relaxed); } fn compute_epoch(&mut self) { self.current_epoch.store( self.current_block.load(Ordering::Relaxed) / self.epoch_duration, Ordering::Relaxed, ); } } #[cfg(test)] mod tests { use chrono::{Duration, Utc}; use crate::{Clock, Event, TimeClock}; #[tokio::test] async fn test_time_clock() { let genesis = Utc::now() .checked_sub_signed(Duration::seconds(30)) .unwrap(); let clock = TimeClock::new(genesis, 5).unwrap(); let current_block = clock.block_ref(); let current_epoch = clock.epoch_ref(); let mut recv = clock.spawn().unwrap(); assert_eq!(recv.recv().await, Ok(Event::EpochChange(7))); assert_eq!(current_epoch.load(std::sync::atomic::Ordering::Relaxed), 7); assert!(current_block.load(std::sync::atomic::Ordering::Relaxed) >= 30); } #[tokio::test] async fn test_time_clock_catchup() { let genesis = Utc::now() .checked_sub_signed(Duration::seconds(30)) .unwrap(); let clock = TimeClock::new(genesis, 2).unwrap(); let current_block = clock.block_ref(); let current_epoch = clock.epoch_ref(); let mut recv = clock.spawn().unwrap(); assert_eq!(recv.recv().await, Ok(Event::EpochChange(16))); assert!(recv.try_recv().is_err()); assert_eq!(current_epoch.load(std::sync::atomic::Ordering::Relaxed), 16); assert!(current_block.load(std::sync::atomic::Ordering::Relaxed) >= 30); tokio::time::sleep(std::time::Duration::from_secs(5)).await; assert_eq!(recv.recv().await, Ok(Event::EpochChange(17))); assert_eq!(recv.recv().await, Ok(Event::EpochChange(18))); assert_eq!(current_epoch.load(std::sync::atomic::Ordering::Relaxed), 18); assert!(current_block.load(std::sync::atomic::Ordering::Relaxed) >= 35); } } ================================================ FILE: crates/topos-config/Cargo.toml ================================================ [package] name = "topos-config" version = "0.1.0" edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] topos-p2p = { path = "../topos-p2p" } topos-core = { path = "../topos-core" } topos-wallet = { path = "../topos-wallet" } async-stream.workspace = true async-trait.workspace = true clap.workspace = true hex.workspace = true futures.workspace = true opentelemetry.workspace = true serde.workspace = true serde_json.workspace = true tokio = { workspace = true, features = ["full"] } tokio-util.workspace = true tonic.workspace = true tower.workspace = true tracing = { workspace = true, features = ["log"] } tracing-opentelemetry.workspace = true tracing-subscriber = { workspace = true, features = ["env-filter", "json", "ansi", "fmt"] } uuid.workspace = true rand.workspace = true reqwest.workspace = true thiserror.workspace = true opentelemetry-otlp = { workspace = true, features = ["grpc-tonic", "metrics", "tls-roots"] } figment = { version = "0.10", features = ["yaml", "toml", "env"] } dirs = "5.0" tracing-log = { version = "0.1.3", features = ["env_logger"] } tar = "0.4.38" flate2 ="1.0.26" url = "2.3.1" once_cell = "1.17.1" toml = "0.7.4" regex = "1" rlp = "0.5.1" openssl = { version = "0.10.61", features = ["vendored"] } [dev-dependencies] topos-tce-broadcast = { path = "../topos-tce-broadcast" } topos-tce-synchronizer = { path = "../topos-tce-synchronizer" } topos-tce-gatekeeper = { path = "../topos-tce-gatekeeper" } topos-tce-api = { path = "../topos-tce-api" } topos-tce-storage = { path = "../topos-tce-storage" } topos-test-sdk = { path = "../topos-test-sdk" } serde.workspace = true serde_json.workspace = true test-log.workspace = true env_logger.workspace = true rand.workspace = true futures.workspace = true libp2p = { workspace = true, features = ["identify"] } assert_cmd = "2.0.6" insta = { version = "1.21", features = ["json", "redactions"] } rstest = { workspace = true, features = ["async-timeout"] } tempfile = "3.8.0" predicates = "3.0.3" sysinfo = "0.29.11" serial_test = {version = "0.9.0"} [lints] workspace = true ================================================ FILE: crates/topos-config/assets/genesis-example.json ================================================ { "name": "polygon-edge", "genesis": { "nonce": "0x0000000000000000", "timestamp": "0x0", "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000f90129f90120f84694100d617e4392c02b31bdce650b26b6c0c3e04f95b0ae7711044926a23c1462754cbb0d1b43fb91fc8fd18bf5c81edb4de15124203157657bf7a8e86d9a3be5f32de725f3c4f8469492183cff18a1328e7d791d607589a15d9eee4bc4b0b45118f9e430d94f424019bb8702e004db5dad5725ab1a5346b0aaad556935189c47df5e401988527ce880bb1e2492cef84694b4973cdb10894d1d1547673bd758589034c2bba5b0b9833912ee2eab270a1204f3f9e58c5f2be603cc2ce32f5467e2a8246bb6b25a7908b39a8a0ed629a689da376b5cdd2df84694c16d83893cb61872206d4e271b813015d3242d94b0a468068169523df684362de6a5b729c8db400958bfd4f6d4e3646cc640f3d241253a21b00f05ff97545f535b36b31c7b80c28080c080", "gasLimit": "0x500000", "difficulty": "0x1", "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", "coinbase": "0x0000000000000000000000000000000000000000", "alloc": { "0x100D617E4392C02B31bdCe650b26b6c0c3E04F95": { "balance": "0x3b9aca00" }, "0x92183Cff18A1328E7d791D607589A15d9EeE4bC4": { "balance": "0x3b9aca00" }, "0xB4973Cdb10894D1D1547673bD758589034C2BBa5": { "balance": "0x3b9aca00" }, "0xC16d83893cB61872206D4e271B813015D3242d94": { "balance": "0x3b9aca00" } }, "number": "0x0", "gasUsed": "0x70000", "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", "baseFee": "0x0", "baseFeeEM": "0x0" }, "params": { "chainID": 100, "engine": { "ibft": { "blockTime": 6000000000, "epochSize": 30000, "type": "PoA", "validator_type": "bls" } }, "blockGasTarget": 0, "burnContract": null, "burnContractDestinationAddress": "0x0000000000000000000000000000000000000000" }, "bootnodes": [ "/ip4/10.101.192.110/tcp/10001/p2p/16Uiu2HAkxKTnwPL3eZmFkiKYJiG3um9uraPh21XhsJEPNm8juhy3", "/ip4/10.101.232.59/tcp/10001/p2p/16Uiu2HAmQfaE4bjJMVCwzigAUgp9eLcGQz8HZpURqEPNjtfDwge8", "/ip4/10.101.208.7/tcp/10001/p2p/16Uiu2HAm5tS7AdBhhtQ2JuTtyy2U4uFqsiW57vof5fiyHPRpwboD", "/ip4/10.101.210.76/tcp/10001/p2p/16Uiu2HAmFjGqEUYSnKkoqURRu8bZaWpuGdxBBqW15KFs4BsxFxdp" ] } ================================================ FILE: crates/topos-config/src/base.rs ================================================ use std::path::Path; use figment::{ providers::{Format, Toml}, Figment, }; use serde::{Deserialize, Serialize}; use crate::node::NodeRole; use crate::Config; #[derive(Serialize, Deserialize, Debug, Clone)] #[serde(rename_all = "kebab-case")] pub struct BaseConfig { #[serde(default = "default_name")] pub name: String, #[serde(default = "default_role")] pub role: NodeRole, #[serde(default = "default_subnet")] pub subnet: String, #[serde(default = "default_secrets_config")] pub secrets_config: Option, } fn default_name() -> String { "default".to_string() } fn default_role() -> NodeRole { NodeRole::Validator } fn default_subnet() -> String { "topos".to_string() } fn default_secrets_config() -> Option { None } impl BaseConfig { pub fn need_tce(&self) -> bool { self.subnet == "topos" } pub fn need_sequencer(&self) -> bool { matches!(self.role, NodeRole::Sequencer) } pub fn need_edge(&self) -> bool { true } } impl Config for BaseConfig { type Output = Self; fn load_from_file(figment: Figment, home: &Path) -> Figment { let home = home.join("config.toml"); let base = Figment::new() .merge(Toml::file(home).nested()) .select("base"); figment.merge(base) } fn load_context(figment: Figment) -> Result { figment.extract() } fn profile() -> String { "base".to_string() } } ================================================ FILE: crates/topos-config/src/edge/command.rs ================================================ use serde_json::Value; use std::collections::HashMap; use std::os::unix::prelude::ExitStatusExt; use std::path::{Path, PathBuf}; use std::process::{ExitStatus, Stdio}; use tokio::{ io::{AsyncBufReadExt, BufReader}, process::Command, }; use tracing::debug; use tracing::{error, info, warn}; pub const BINARY_NAME: &str = "polygon-edge"; pub struct CommandConfig { binary_path: PathBuf, args: Vec, } impl CommandConfig { pub fn new(binary_path: PathBuf) -> Self { let binary_path = if binary_path == PathBuf::from(".") { std::env::current_dir() .expect("Cannot get the current directory") .join(BINARY_NAME) } else { binary_path }; CommandConfig { binary_path, args: Vec::new(), } } pub fn init(mut self, path: &Path) -> Self { self.args.push("secrets".into()); self.args.push("init".into()); self.args.push("--insecure".into()); self.args.push("--data-dir".into()); self.args.push(format!("{}", path.display())); self } pub fn server( mut self, data_dir: &Path, genesis_path: &Path, edge_args: HashMap, ) -> Self { self.args.push("server".into()); self.args.push("--data-dir".into()); self.args.push(format!("{}", data_dir.display())); self.args.push("--chain".into()); self.args.push(format!("{}", genesis_path.display())); self.args.push("--json".into()); for (k, v) in &edge_args { self.args.push(format!("--{k}")); self.args.push(v.to_string()); } self } pub async fn spawn(self) -> Result { info!( "Spawning Polygon Edge binary located at: {:?}, args: {:?}", self.binary_path, self.args ); let mut command = Command::new(self.binary_path); command.kill_on_drop(true); command.args(self.args); let mut child = command .stderr(Stdio::piped()) .stdout(Stdio::piped()) .stdin(Stdio::piped()) .spawn()?; if let Some(pid) = child.id() { info!("Polygon Edge child process with pid {pid} successfully started"); } let stdout = child .stderr .take() .expect("child did not have a handle to stdout"); let mut reader = BufReader::new(stdout).lines(); let running = async { child.wait().await }; let logging = async { while let Ok(line) = reader.next_line().await { match line { Some(l) => match serde_json::from_str(&l) { Ok(v) => EdgeLog::new(v).log(), Err(_) => println!("{l}"), }, None => break, } } }; let (running_out, _) = tokio::join!(running, logging); let exit_status = running_out?; info!( "The Edge process is terminated with exit status {:?}; exit code: {:?}, exit signal \ {:?}, success: {:?}, raw code: {}", exit_status, exit_status.code(), exit_status.signal(), exit_status.success(), exit_status.into_raw(), ); Ok(exit_status) } } pub struct EdgeLog { v: HashMap, } impl EdgeLog { pub fn new(v: HashMap) -> Self { Self { v } } pub fn log(&mut self) { match self.v.get("@level") { Some(level) => match level.as_str() { Some(r#"info"#) => info!("{}", self.internal()), Some(r#"warn"#) => warn!("{}", self.internal()), Some(r#"debug"#) => debug!("{}", self.internal()), Some(r#"error"#) => error!("{}", self.internal()), _ => error!("log parse failure: {:?}", self.v), }, None => error!("{:?}", self.v.get("error")), } } fn internal(&mut self) -> String { let module = self.v.remove("@module").unwrap(); let message = self.v.remove("@message").unwrap(); // FIXME: Figure out tracing features to make this nicer self.v.remove("@timestamp"); self.v.remove("@level"); let mut message = format!("{module}: {message}"); for (k, s) in &self.v { message = format!("{} {}:{}", message, k, s); } message } } ================================================ FILE: crates/topos-config/src/edge.rs ================================================ use crate::{edge::command::CommandConfig, Config}; use figment::{ providers::{Format, Toml}, Figment, }; use serde::{Deserialize, Serialize}; use std::{ collections::HashMap, path::{Path, PathBuf}, process::ExitStatus, }; use tokio::{spawn, task::JoinHandle}; use tracing::{error, info}; use self::command::BINARY_NAME; // TODO: Provides the default arguments here // Serde `flatten` and `default` doesn't work together yet // https://github.com/serde-rs/serde/issues/1626 #[derive(Serialize, Deserialize, Debug, Clone)] #[serde(rename_all = "kebab-case")] pub struct EdgeConfig { #[serde(flatten)] pub args: HashMap, } impl Config for EdgeConfig { type Output = EdgeConfig; fn load_from_file(figment: Figment, home: &Path) -> Figment { let home = home.join("config.toml"); let edge = Figment::new() .merge(Toml::file(home).nested()) .select("edge"); figment.merge(edge) } fn load_context(figment: Figment) -> Result { figment.extract() } fn profile() -> String { "edge".to_string() } } #[derive(Serialize, Deserialize, Debug, Clone)] #[serde(rename_all = "kebab-case")] pub struct EdgeBinConfig { pub edge_path: PathBuf, } impl EdgeBinConfig { pub fn binary_path(&self) -> PathBuf { self.edge_path.join(BINARY_NAME) } } impl Config for EdgeBinConfig { type Output = EdgeBinConfig; fn load_from_file(figment: Figment, home: &Path) -> Figment { let home = home.join("config.toml"); let edge = Figment::new() .merge(Toml::file(home).nested()) .select("edge"); figment.merge(edge) } fn load_context(figment: Figment) -> Result { figment.extract() } fn profile() -> String { "edge".to_string() } } pub mod command; pub fn generate_edge_config( edge_path: PathBuf, config_path: PathBuf, ) -> JoinHandle> { // Create the Polygon Edge config info!("Generating the configuration at {config_path:?}"); info!("Polygon-edge binary located at: {edge_path:?}"); spawn(async move { CommandConfig::new(edge_path) .init(&config_path) .spawn() .await .map_err(|e| { error!("Failed to generate the edge configuration: {e:?}"); e }) }) } ================================================ FILE: crates/topos-config/src/genesis/mod.rs ================================================ use rlp::Rlp; use std::collections::HashSet; use std::str::FromStr; use std::{fs, path::PathBuf}; use serde_json::Value; use topos_core::types::ValidatorId; use topos_p2p::{Multiaddr, PeerId}; use tracing::info; use crate::node::NodeConfig; #[cfg(test)] pub(crate) mod tests; /// From the Edge format pub struct Genesis { pub json: Value, } #[derive(Debug, thiserror::Error)] pub enum Error { #[error("Failed to parse validators")] ParseValidators, #[error("Invalid genesis file on path {0}: {1}")] InvalidGenesisFile(String, String), } impl Genesis { pub fn new(path: &PathBuf) -> Result { info!("Reading subnet genesis file {}", path.display()); let genesis_file = fs::File::open(path) .map_err(|e| Error::InvalidGenesisFile(path.display().to_string(), e.to_string()))?; let json: Value = serde_json::from_reader(genesis_file).expect("genesis json parsed"); Ok(Self { json }) } // TODO: parse directly with serde pub fn boot_peers(&self, port: Option) -> Vec<(PeerId, Multiaddr)> { match self.json["bootnodes"].as_array() { Some(v) => v .iter() .map(|bootnode| { let (multiaddr, peerid) = bootnode.as_str().unwrap().rsplit_once("/p2p/").unwrap(); // Extract the Edge port from the genesis file let (multiaddr, edge_port) = multiaddr.rsplit_once('/').unwrap(); // Use the given port instead if any let port = port.map_or(edge_port.to_string(), |p| p.to_string()); let multiaddr = format!("{multiaddr}/{port}"); (peerid.parse().unwrap(), multiaddr.parse().unwrap()) }) .collect::>(), None => Vec::default(), } } /// Parse the validators from the `extraData` field of the genesis file. /// The `extraData` is padded with 32 bytes, and the validators are RLP encoded. /// Each validator is 20 bytes, with a SEAL at the end of the whole list (8 bytes) pub fn validators(&self) -> Result, Error> { let extra_data = self.json["genesis"]["extraData"] .as_str() .expect("The extraData field must be present. Bad genesis file?") .to_string(); // Define constants for the prefix size and validator size const VANITY_SIZE: usize = 32; // Remove the "0x" prefix from the hex string let hex_string = &extra_data[2..]; // Convert the hex string to bytes let bytes = hex::decode(hex_string).expect("Failed to decode hex string"); // Slice the bytes to get the validators data let validators_data = &bytes[VANITY_SIZE..]; // Create an Rlp object from the validators data let rlp = Rlp::new(validators_data); // Get the first Rlp item (index 0) and iterate over its items let first_item = rlp.at(0).expect("Failed to get first RLP item"); let item_count = first_item .item_count() .expect("Validators must be an RLP list. Bad genesis file?"); first_item.into_iter().try_fold( HashSet::with_capacity(item_count), |mut validator_public_keys, validator_rlp| { if let Ok(public_key) = validator_rlp.data() { let address = format!("0x{}", hex::encode(&public_key[1..=20])); validator_public_keys.insert( ValidatorId::from_str(address.as_str()) .map_err(|_| Error::ParseValidators)?, ); } Ok(validator_public_keys) }, ) } } impl TryFrom<&NodeConfig> for Genesis { type Error = Error; fn try_from(config: &NodeConfig) -> Result { Genesis::new(&config.genesis_path) } } ================================================ FILE: crates/topos-config/src/genesis/tests.rs ================================================ use rstest::fixture; use rstest::rstest; use std::str::FromStr; use topos_core::types::ValidatorId; use super::Genesis; macro_rules! test_case { ($fname:expr) => { concat!(env!("CARGO_MANIFEST_DIR"), "/assets/", $fname) }; } #[fixture] #[once] pub fn genesis() -> Genesis { Genesis::new(&test_case!("genesis-example.json").into()) .expect("Expected valid test genesis file") } #[rstest] pub fn test_correct_validator_count(genesis: &Genesis) { let validators = genesis.validators().unwrap(); assert_eq!(validators.len(), 4); } #[rstest] pub fn test_parse_bootnodes(genesis: &Genesis) { let bootnodes = genesis.boot_peers(None); assert_eq!(4, bootnodes.len()); } #[rstest] pub fn test_extract_validators(genesis: &Genesis) { let validators = genesis.validators().unwrap(); let first = ValidatorId::from_str("0x100d617e4392c02b31bdce650b26b6c0c3e04f95").unwrap(); let second = ValidatorId::from_str("0x92183cff18a1328e7d791d607589a15d9eee4bc4").unwrap(); let third = ValidatorId::from_str("0xb4973cdb10894d1d1547673bd758589034c2bba5").unwrap(); let fourth = ValidatorId::from_str("0xc16d83893cb61872206d4e271b813015d3242d94").unwrap(); assert_eq!(validators.get(&first), Some(&first)); assert_eq!(validators.get(&second), Some(&second)); assert_eq!(validators.get(&third), Some(&third)); assert_eq!(validators.get(&fourth), Some(&fourth)); } ================================================ FILE: crates/topos-config/src/lib.rs ================================================ pub(crate) mod base; pub mod edge; pub mod genesis; pub mod node; pub mod sequencer; pub mod tce; use std::path::Path; use figment::providers::Serialized; use figment::{error::Kind, Figment}; use serde::Serialize; pub trait Config: Serialize { /// The configuration type returned (should be Self). type Output; /// Load the configuration from a file or multiple files. /// The home is the directory where the configuration files are located. /// For node, it is the `node` directory in the $TOPOS_HOME directory. fn load_from_file(figment: Figment, home: &Path) -> Figment; /// Load the configuration from the context. /// Trying to extract the configuration from the figment context. fn load_context(figment: Figment) -> Result; /// Return the profile name of the configuration to be used /// when generating the file. fn profile() -> String; /// Convert the configuration to a TOML table. fn to_toml(&self) -> Result { let mut config_toml = toml::Table::new(); let config = toml::Table::try_from(self)?; // Flatten the top level for (profile, content) in config { config_toml.insert(profile, content); } Ok(config_toml) } /// Main function to load the configuration. /// It will load the configuration from the file and an optional existing struct (if any) /// and then extract the configuration from the context in order to build the Config. /// The Config is then returned or an error if the configuration is not valid. fn load(home: &Path, config: Option<&S>) -> Result { let mut figment = Figment::new(); figment = Self::load_from_file(figment, home); if let Some(config) = config { figment = figment.merge(Serialized::from(config, Self::profile())) } Self::load_context(figment) } } pub(crate) fn load_config( node_path: &Path, config: Option<&S>, ) -> T::Output { match T::load(node_path, config) { Ok(config) => config, Err(figment::Error { kind: Kind::MissingField(name), .. }) => { println!("Missing field: {}", name); std::process::exit(1); } Err(e) => { println!("Failed to load config: {e}"); std::process::exit(1); } } } ================================================ FILE: crates/topos-config/src/node.rs ================================================ use std::path::{Path, PathBuf}; use figment::{ providers::{Format, Toml}, Figment, }; use serde::{Deserialize, Serialize}; use topos_wallet::SecretManager; use tracing::{debug, error}; use crate::{ base::BaseConfig, edge::{EdgeBinConfig, EdgeConfig}, load_config, sequencer::SequencerConfig, tce::TceConfig, Config, }; #[derive(clap::ValueEnum, Clone, Debug, Deserialize, Serialize)] #[serde(rename_all = "lowercase")] pub enum NodeRole { Validator, Sequencer, FullNode, } #[derive(Serialize, Deserialize, Debug)] pub struct NodeConfig { pub base: BaseConfig, pub tce: Option, pub sequencer: Option, pub edge: Option, #[serde(skip)] pub home_path: PathBuf, #[serde(skip)] pub node_path: PathBuf, #[serde(skip)] pub genesis_path: PathBuf, #[serde(skip)] pub edge_bin: Option, } impl NodeConfig { /// Try to create a new node config struct from the given home path and node name. /// It expects a config file to be present in the node's folder. /// /// This `config.toml` can be generated using: `topos node init` command pub fn try_from( home_path: &Path, node_name: &str, config: Option<&S>, ) -> Result { let node_path = home_path.join("node").join(node_name); let config_path = node_path.join("config.toml"); // TODO: Move this to `topos-node` when migrated if !Path::new(&config_path).exists() { error!( "Please run 'topos node init --name {node_name}' to create a config file first \ for {node_name}." ); std::process::exit(1); } Ok(Self::build_config(node_path, home_path, config)) } /// Create a new node config struct from the given home path and node name. /// /// It doesn't check the existence of the config file. /// It's useful for creating a config file for a new node, relying on the default values. pub fn create(home_path: &Path, node_name: &str, config: Option<&S>) -> Self { let node_path = home_path.join("node").join(node_name); Self::build_config(node_path, home_path, config) } /// Common function to build a node config struct from the given home path and node name. fn build_config( node_path: PathBuf, home_path: &Path, config: Option<&S>, ) -> Self { let node_folder = node_path.as_path(); let base = load_config::(node_folder, config); // Load genesis pointed by the local config let genesis_path = home_path .join("subnet") .join(base.subnet.clone()) .join("genesis.json"); let mut config = NodeConfig { node_path: node_path.to_path_buf(), genesis_path, home_path: home_path.to_path_buf(), base: base.clone(), sequencer: base .need_sequencer() .then(|| load_config::(node_folder, None)), tce: base .need_tce() .then(|| load_config::(node_folder, None)), edge_bin: base .need_edge() .then(|| load_config::(node_folder, config)), edge: base .need_edge() .then(|| load_config::(node_folder, None)), }; // Make the TCE DB path relative to the folder if let Some(config) = config.tce.as_mut() { config.db_path = node_folder.join(&config.db_path); debug!( "Maked TCE DB path relative to the node folder -> {:?}", config.db_path ); } config } } impl Config for NodeConfig { type Output = NodeConfig; fn load_from_file(figment: Figment, home: &Path) -> Figment { let home = home.join("config.toml"); figment.merge(Toml::file(home)) } fn load_context(figment: Figment) -> Result { figment.extract() } fn profile() -> String { "default".to_string() } } impl From<&NodeConfig> for SecretManager { fn from(val: &NodeConfig) -> Self { match val.base.secrets_config.as_ref() { Some(secrets_config) => SecretManager::from_aws(secrets_config), None => SecretManager::from_fs(val.node_path.clone()), } } } ================================================ FILE: crates/topos-config/src/sequencer.rs ================================================ use std::path::Path; use crate::Config; use figment::{ providers::{Format, Toml}, Figment, }; use serde::{Deserialize, Serialize}; #[derive(Serialize, Deserialize, Debug, Clone)] #[serde(rename_all = "kebab-case")] pub struct SequencerConfig { /// SubnetId of your Sequencer, hex encoded 32 bytes prefixed with 0x pub subnet_id: Option, /// JSON-RPC endpoint of the Edge node, websocket and http support expected /// If the endpoint address starts with `https`, ssl will be used with http/websocket #[serde(default = "default_subnet_jsonrpc_endpoint")] pub subnet_jsonrpc_http: String, // Optional explicit websocket endpoint for the subnet jsonrpc api. If this parameter is not provided, // it will be derived from the `subnet_jsonrpc_http`. // Full uri value is expected, e.g. `wss://arbitrum.infura.com/v3/ws/mykey` or `ws://127.0.0.1/ws` pub subnet_jsonrpc_ws: Option, /// Address where the Topos Core contract is deployed #[serde(default = "default_subnet_contract_address")] pub subnet_contract_address: String, /// gRPC API endpoint of one TCE process #[serde(default = "default_tce_grpc_endpoint")] pub tce_grpc_endpoint: String, /// OTLP agent endpoint, not used if not provided pub otlp_agent: Option, /// OTLP service name, not used if not provided pub otlp_service_name: Option, /// Start synchronizing from particular block number /// Default is to sync from genesis block (0) pub start_block: Option, } fn default_subnet_jsonrpc_endpoint() -> String { "127.0.0.1:8545".to_string() } fn default_subnet_contract_address() -> String { "0x0000000000000000000000000000000000000000".to_string() } fn default_tce_grpc_endpoint() -> String { "http://[::1]:1340".to_string() } impl Config for SequencerConfig { type Output = Self; fn load_from_file(figment: Figment, home: &Path) -> Figment { let home = home.join("config.toml"); let sequencer = Figment::new() .merge(Toml::file(home).nested()) .select("sequencer"); figment.merge(sequencer) } fn load_context(figment: Figment) -> Result { figment.extract() } fn profile() -> String { "sequencer".to_string() } } ================================================ FILE: crates/topos-config/src/tce/broadcast.rs ================================================ use serde::{Deserialize, Serialize}; /// Broadcast threshold configurations #[derive(Clone, Debug, Default, Deserialize, Serialize)] pub struct ReliableBroadcastParams { /// Echo threshold pub echo_threshold: usize, /// Ready threshold pub ready_threshold: usize, /// Delivery threshold pub delivery_threshold: usize, } impl ReliableBroadcastParams { pub const fn new(n: usize) -> Self { let f: usize = n / 3; Self { echo_threshold: 1 + (n + f) / 2, ready_threshold: 1 + f, delivery_threshold: 2 * f + 1, } } } ================================================ FILE: crates/topos-config/src/tce/p2p.rs ================================================ use std::net::SocketAddr; use serde::{Deserialize, Serialize}; use topos_p2p::Multiaddr; use super::DEFAULT_IP; #[derive(Serialize, Deserialize, Debug, Clone)] #[serde(rename_all = "kebab-case")] pub struct P2PConfig { /// List of multiaddresses to listen for incoming connections #[serde(default = "default_listen_addresses")] pub listen_addresses: Vec, /// List of multiaddresses to advertise to the network #[serde(default = "default_public_addresses")] pub public_addresses: Vec, #[serde(skip)] pub is_bootnode: bool, } impl Default for P2PConfig { fn default() -> Self { Self { listen_addresses: default_listen_addresses(), public_addresses: default_public_addresses(), is_bootnode: false, } } } const fn default_libp2p_api_addr() -> SocketAddr { SocketAddr::V4(std::net::SocketAddrV4::new(DEFAULT_IP, 9090)) } fn default_listen_addresses() -> Vec { vec![format!( "/ip4/{}/tcp/{}", default_libp2p_api_addr().ip(), default_libp2p_api_addr().port() ) .parse() .expect( r#" Listen multiaddresses generation failure. This is a critical bug that need to be report on `https://github.com/topos-protocol/topos/issues` "#, )] } fn default_public_addresses() -> Vec { vec![format!( "/ip4/{}/tcp/{}", default_libp2p_api_addr().ip(), default_libp2p_api_addr().port() ) .parse() .expect( r#" Public multiaddresses generation failure. This is a critical bug that need to be report on `https://github.com/topos-protocol/topos/issues` "#, )] } ================================================ FILE: crates/topos-config/src/tce/synchronization.rs ================================================ use serde::{Deserialize, Serialize}; /// Configuration for the TCE synchronization #[derive(Serialize, Deserialize, Debug, Clone)] #[serde(rename_all = "kebab-case")] pub struct SynchronizationConfig { /// Interval in seconds to synchronize the TCE #[serde(default = "SynchronizationConfig::default_interval_seconds")] pub interval_seconds: u64, /// Maximum number of Proof of delivery per query per subnet #[serde(default = "SynchronizationConfig::default_limit_per_subnet")] pub limit_per_subnet: usize, } impl Default for SynchronizationConfig { fn default() -> Self { Self { interval_seconds: SynchronizationConfig::INTERVAL_SECONDS, limit_per_subnet: SynchronizationConfig::LIMIT_PER_SUBNET, } } } impl SynchronizationConfig { pub const INTERVAL_SECONDS: u64 = 10; pub const LIMIT_PER_SUBNET: usize = 100; const fn default_interval_seconds() -> u64 { Self::INTERVAL_SECONDS } const fn default_limit_per_subnet() -> usize { Self::LIMIT_PER_SUBNET } } ================================================ FILE: crates/topos-config/src/tce.rs ================================================ use std::collections::HashSet; use std::path::Path; use std::{net::SocketAddr, path::PathBuf}; use figment::{ providers::{Format, Toml}, Figment, }; use serde::{Deserialize, Serialize}; use topos_core::types::ValidatorId; use topos_p2p::config::NetworkConfig; use crate::Config; use topos_p2p::{Multiaddr, PeerId}; use self::broadcast::ReliableBroadcastParams; use self::p2p::P2PConfig; use self::synchronization::SynchronizationConfig; pub mod broadcast; pub mod p2p; pub mod synchronization; const DEFAULT_IP: std::net::Ipv4Addr = std::net::Ipv4Addr::new(0, 0, 0, 0); #[derive(Debug)] pub enum AuthKey { Seed(Vec), PrivateKey(Vec), } #[derive(Default, Debug)] pub enum StorageConfiguration { #[default] RAM, RocksDB(Option), } #[derive(Serialize, Deserialize, Debug)] #[serde(rename_all = "kebab-case")] pub struct TceConfig { #[serde(skip)] pub auth_key: Option, #[serde(skip)] pub signing_key: Option, #[serde(skip)] pub tce_params: ReliableBroadcastParams, #[serde(skip)] pub boot_peers: Vec<(PeerId, Multiaddr)>, #[serde(skip)] pub validators: HashSet, #[serde(skip)] pub storage: StorageConfiguration, #[serde(skip)] pub version: &'static str, /// Storage database path, if not set RAM storage is used #[serde(default = "default_db_path")] pub db_path: PathBuf, /// Array of extra boot nodes to connect to pub extra_boot_peers: Option, /// Connection degree for the GossipSub overlay #[serde(default = "default_minimum_tce_cluster_size")] pub minimum_tce_cluster_size: usize, /// libp2p addresses pub libp2p_api_addr: Option, /// P2P configuration #[serde(default)] pub p2p: P2PConfig, /// Synchronization configuration #[serde(default)] pub synchronization: SynchronizationConfig, /// gRPC API Addr #[serde(default = "default_grpc_api_addr")] pub grpc_api_addr: SocketAddr, /// GraphQL API Addr #[serde(default = "default_graphql_api_addr")] pub graphql_api_addr: SocketAddr, /// Metrics server API Addr #[serde(default = "default_metrics_api_addr")] pub metrics_api_addr: SocketAddr, /// Socket of the opentelemetry agent endpoint /// If not provided open telemetry will not be used pub otlp_agent: Option, /// Otlp service name /// If not provided open telemetry will not be used pub otlp_service_name: Option, #[serde(default = "default_network_bootstrap_timeout")] pub network_bootstrap_timeout: u64, } const fn default_network_bootstrap_timeout() -> u64 { 90 } fn default_db_path() -> PathBuf { PathBuf::from("./tce_rocksdb") } const fn default_minimum_tce_cluster_size() -> usize { NetworkConfig::MINIMUM_CLUSTER_SIZE } const fn default_grpc_api_addr() -> SocketAddr { SocketAddr::V4(std::net::SocketAddrV4::new(DEFAULT_IP, 1340)) } const fn default_graphql_api_addr() -> SocketAddr { SocketAddr::V4(std::net::SocketAddrV4::new(DEFAULT_IP, 4030)) } const fn default_metrics_api_addr() -> SocketAddr { SocketAddr::V4(std::net::SocketAddrV4::new(DEFAULT_IP, 3000)) } impl TceConfig { pub fn parse_boot_peers(&self) -> Vec<(PeerId, Multiaddr)> { self.extra_boot_peers .clone() .unwrap_or_default() .split(&[',', ' ']) .map(|s| s.to_string()) .collect::>() .chunks(2) .filter_map(|pair| { if pair.len() > 1 { Some(( pair[0].as_str().parse().unwrap(), pair[1].as_str().parse().unwrap(), )) } else { None } }) .collect() } } impl Config for TceConfig { type Output = TceConfig; fn load_from_file(figment: Figment, home: &Path) -> Figment { let home = home.join("config.toml"); let tce = Figment::new() .merge(Toml::file(home).nested()) .select("tce"); figment.merge(tce) } fn load_context(figment: Figment) -> Result { figment.extract() } fn profile() -> String { "tce".to_string() } } ================================================ FILE: crates/topos-core/.rustfmt.toml ================================================ unstable_features = true ignore = [ "src/generated", ] ================================================ FILE: crates/topos-core/Cargo.toml ================================================ [package] name = "topos-core" version = "0.1.0" edition = "2021" [lints] workspace = true [dependencies] topos-crypto = { path = "../topos-crypto" } bincode.workspace = true thiserror.workspace = true hex.workspace = true ethereum-types.workspace = true tonic = { workspace = true, default-features = false, features = [ "prost", "codegen", "transport", ] } async-graphql.workspace = true async-trait.workspace = true base64ct.workspace = true prost.workspace = true serde = { workspace = true, features = ["derive"] } tracing.workspace = true uuid.workspace = true [build-dependencies] tonic-build = { version = "0.11", default-features = false, features = [ "prost", "transport" ] } [dev-dependencies] async-stream.workspace = true env_logger.workspace = true futures.workspace = true rstest.workspace = true test-log.workspace = true tokio-stream.workspace = true tokio.workspace = true tracing-subscriber = { workspace = true, features = ["env-filter", "fmt"] } tracing.workspace = true topos-test-sdk = { path = "../topos-test-sdk/" } [features] default = [] uci = [] api = [] [package.metadata.docs.rs] all-features = true # enable unstable features in the documentation rustc-args = ["--cfg", "docsrs"] ================================================ FILE: crates/topos-core/build.rs ================================================ use std::path::PathBuf; fn main() -> Result<(), Box> { let descriptor_path = PathBuf::from("src/api/grpc/generated").join("topos.bin"); tonic_build::configure() .file_descriptor_set_path(descriptor_path) .type_attribute( ".topos.shared.v1.UUID", "#[derive(Copy, serde::Deserialize, serde::Serialize)]", ) .type_attribute( ".topos.shared.v1.SubnetId", "#[derive(Eq, Hash, serde::Deserialize, serde::Serialize)]", ) .type_attribute( ".topos.shared.v1.CertificateId", "#[derive(Eq, Hash, serde::Deserialize, serde::Serialize)]", ) .type_attribute( ".topos.shared.v1.Frost", "#[derive(Eq, Hash, serde::Deserialize, serde::Serialize)]", ) .type_attribute( ".topos.shared.v1.StarkProof", "#[derive(Eq, Hash, serde::Deserialize, serde::Serialize)]", ) .type_attribute( ".topos.tce.v1.SignedReady", "#[derive(serde::Deserialize, serde::Serialize)]", ) .type_attribute( ".topos.shared.v1.Positions.SourceStreamPosition", "#[derive(serde::Deserialize, serde::Serialize)]", ) .type_attribute( ".topos.tce.v1.ProofOfDelivery", "#[derive(serde::Deserialize, serde::Serialize)]", ) .type_attribute( ".topos.tce.v1.CheckpointResponse", "#[derive(serde::Deserialize, serde::Serialize)]", ) .type_attribute( ".topos.tce.v1.CheckpointRequest", "#[derive(serde::Deserialize, serde::Serialize)]", ) .type_attribute( ".topos.tce.v1.CheckpointMapFieldEntry", "#[derive(serde::Deserialize, serde::Serialize)]", ) .type_attribute( ".topos.shared.v1.EcdsaSignature", "#[derive(Eq, Hash, serde::Deserialize, serde::Serialize)]", ) .type_attribute( ".topos.shared.v1.ValidatorId", "#[derive(Eq, Hash, serde::Deserialize, serde::Serialize)]", ) .type_attribute( ".topos.tce.v1.Gossip", "#[derive(serde::Deserialize, serde::Serialize)]", ) .type_attribute( ".topos.tce.v1.Echo", "#[derive(serde::Deserialize, serde::Serialize)]", ) .type_attribute( ".topos.tce.v1.Ready", "#[derive(serde::Deserialize, serde::Serialize)]", ) .type_attribute( ".topos.tce.v1.DoubleEchoRequest", "#[derive(serde::Deserialize, serde::Serialize)]", ) .type_attribute( ".topos.tce.v1.Batch", "#[derive(serde::Deserialize, serde::Serialize)]", ) .type_attribute( ".topos.uci.v1.Certificate", "#[derive(Eq, Hash, serde::Deserialize, serde::Serialize)]", ) .out_dir("src/api/grpc/generated") .compile( &[ "proto/topos/shared/v1/uuid.proto", "proto/topos/shared/v1/subnet.proto", "proto/topos/shared/v1/validator_id.proto", "proto/topos/tce/v1/api.proto", "proto/topos/tce/v1/console.proto", "proto/topos/tce/v1/synchronization.proto", "proto/topos/tce/v1/double_echo.proto", "proto/topos/tce/v1/gossipsub.proto", "proto/topos/uci/v1/certification.proto", "proto/topos/p2p/info.proto", ], &["proto"], )?; Ok(()) } ================================================ FILE: crates/topos-core/proto/buf.yaml ================================================ version: v1 breaking: use: - FILE lint: use: - DEFAULT ================================================ FILE: crates/topos-core/proto/topos/p2p/info.proto ================================================ syntax = "proto3"; package topos.p2p; service InfoService { } ================================================ FILE: crates/topos-core/proto/topos/shared/v1/certificate.proto ================================================ syntax = "proto3"; package topos.shared.v1; message CertificateId { bytes value = 1; } ================================================ FILE: crates/topos-core/proto/topos/shared/v1/checkpoints.proto ================================================ syntax = "proto3"; package topos.shared.v1; import "topos/shared/v1/certificate.proto"; import "topos/shared/v1/subnet.proto"; // Checkpoints are used to walk through streams message Checkpoints { // SourceCheckpoint represents a snapshot of multiple stream's positions regarding // one or multiple source subnets. message SourceCheckpoint { repeated SubnetId source_subnet_ids = 1; repeated Positions.SourceStreamPosition positions = 2; } // TargetCheckpoint represents a snapshot of multiple stream's positions regarding // one or multiple target subnets. message TargetCheckpoint { repeated SubnetId target_subnet_ids = 1; repeated Positions.TargetStreamPosition positions = 2; } } message Positions { // SourceStreamPosition represents a single point in a source stream. // It is defined by a source_subnet_id and a position, resolving to a certificate_id message SourceStreamPosition { // The source_subnet_id is a mandatory field for the SourceStreamPosition SubnetId source_subnet_id = 1; uint64 position = 2; CertificateId certificate_id = 3; } // TargetStreamPosition represents a single point in a target stream regarding a source subnet. // It is defined by a target_subnet_id, source_subnet_id and a position, resolving to a certificate_id message TargetStreamPosition { // The source_subnet_id is a mandatory field for the TargetStreamPosition SubnetId source_subnet_id = 1; // The target_subnet_id is a mandatory field for the TargetStreamPosition SubnetId target_subnet_id = 2; uint64 position = 3; CertificateId certificate_id = 4; } } ================================================ FILE: crates/topos-core/proto/topos/shared/v1/frost.proto ================================================ syntax = "proto3"; package topos.shared.v1; message Frost { bytes value = 1; } ================================================ FILE: crates/topos-core/proto/topos/shared/v1/signature.proto ================================================ syntax = "proto3"; package topos.shared.v1; // A signature using the ECDSA algorithm. // Used to sign double echo protocol messages. message EcdsaSignature { bytes r = 1; bytes s = 2; uint64 v = 3; } ================================================ FILE: crates/topos-core/proto/topos/shared/v1/stark_proof.proto ================================================ syntax = "proto3"; package topos.shared.v1; message StarkProof { bytes value = 1; } ================================================ FILE: crates/topos-core/proto/topos/shared/v1/subnet.proto ================================================ syntax = "proto3"; package topos.shared.v1; message SubnetId { bytes value = 1; } ================================================ FILE: crates/topos-core/proto/topos/shared/v1/uuid.proto ================================================ syntax = "proto3"; package topos.shared.v1; message UUID { uint64 most_significant_bits = 1; uint64 least_significant_bits = 2; } ================================================ FILE: crates/topos-core/proto/topos/shared/v1/validator_id.proto ================================================ syntax = "proto3"; package topos.shared.v1; // Id of the validator in the Topos protocol network // This is the same as the validator's H160 address in the Ethereum compatible network message ValidatorId { // The validator's H160 address bytes value = 1; } ================================================ FILE: crates/topos-core/proto/topos/tce/v1/api.proto ================================================ syntax = "proto3"; package topos.tce.v1; import "topos/shared/v1/checkpoints.proto"; import "topos/shared/v1/subnet.proto"; import "topos/shared/v1/uuid.proto"; import "topos/tce/v1/synchronization.proto"; import "topos/uci/v1/certification.proto"; import "topos/shared/v1/certificate.proto"; service APIService { rpc SubmitCertificate(SubmitCertificateRequest) returns (SubmitCertificateResponse); rpc GetSourceHead(GetSourceHeadRequest) returns (GetSourceHeadResponse); /// This RPC allows a client to get latest pending certificates for /// requested subnets (by their subnet id) /// /// Returns a map of subnet_id -> last pending certificate /// If there are no pending certificate for a subnet, returns None for that subnet id rpc GetLastPendingCertificates(GetLastPendingCertificatesRequest) returns (GetLastPendingCertificatesResponse); // This RPC allows a client to open a bidirectional stream with a TCE rpc WatchCertificates(stream WatchCertificatesRequest) returns (stream WatchCertificatesResponse); } message SubmitCertificateRequest { topos.uci.v1.Certificate certificate = 1; } message SubmitCertificateResponse {} message GetSourceHeadRequest { topos.shared.v1.SubnetId subnet_id = 1; } message GetSourceHeadResponse { topos.shared.v1.Positions.SourceStreamPosition position = 1; topos.uci.v1.Certificate certificate = 2; } message GetLastPendingCertificatesRequest { repeated topos.shared.v1.SubnetId subnet_ids = 1; } message LastPendingCertificate { topos.uci.v1.Certificate value = 1; // Pending certificate index (effectively total number of pending certificates) uint64 index = 2; } message GetLastPendingCertificatesResponse { // Bytes and array types (SubnetId) could not be key in the map type according to specifications, // so we use SubnetId hex encoded string with 0x prefix as key map last_pending_certificate = 1; } message WatchCertificatesRequest { // Provide a request_id to track response topos.shared.v1.UUID request_id = 1; // Define which command needs to be performed oneof command { OpenStream open_stream = 2; } // Sent to start receiving events and being able to send further command message OpenStream { topos.shared.v1.Checkpoints.TargetCheckpoint target_checkpoint = 1; topos.shared.v1.Checkpoints.SourceCheckpoint source_checkpoint = 2; } } message WatchCertificatesResponse { // If the response is directly linked to a request this ID allow one to track it topos.shared.v1.UUID request_id = 1; oneof event { StreamOpened stream_opened = 2; CertificatePushed certificate_pushed = 3; } // Sent by the TCE when the stream is ready to be used and // that certificates will start being pushed message StreamOpened { repeated topos.shared.v1.SubnetId subnet_ids = 1; } // Target Certificate pushed from the TCE to the sequencer message CertificatePushed { topos.uci.v1.Certificate certificate = 1; repeated topos.shared.v1.Positions.TargetStreamPosition positions = 2; } } ================================================ FILE: crates/topos-core/proto/topos/tce/v1/console.proto ================================================ syntax = "proto3"; package topos.tce.v1; import "topos/shared/v1/uuid.proto"; service ConsoleService { rpc Status(StatusRequest) returns (StatusResponse); } message StatusRequest {} message StatusResponse { bool has_active_sample = 1; } ================================================ FILE: crates/topos-core/proto/topos/tce/v1/double_echo.proto ================================================ syntax = "proto3"; package topos.tce.v1; import "topos/shared/v1/certificate.proto"; import "topos/shared/v1/signature.proto"; import "topos/shared/v1/validator_id.proto"; import "topos/uci/v1/certification.proto"; message Gossip { topos.uci.v1.Certificate certificate = 1; } message Echo { topos.shared.v1.CertificateId certificate_id = 1; topos.shared.v1.EcdsaSignature signature = 2; topos.shared.v1.ValidatorId validator_id = 3; } message Ready { topos.shared.v1.CertificateId certificate_id = 1; topos.shared.v1.EcdsaSignature signature = 2; topos.shared.v1.ValidatorId validator_id = 3; } message DoubleEchoRequest { oneof request { Gossip gossip = 1; Echo echo = 2; Ready ready = 3; } } ================================================ FILE: crates/topos-core/proto/topos/tce/v1/gossipsub.proto ================================================ syntax = "proto3"; package topos.tce.v1; import "topos/tce/v1/double_echo.proto"; message Batch { repeated bytes messages = 1; } ================================================ FILE: crates/topos-core/proto/topos/tce/v1/synchronization.proto ================================================ syntax = "proto3"; package topos.tce.v1; import "topos/shared/v1/checkpoints.proto"; import "topos/shared/v1/certificate.proto"; import "topos/shared/v1/subnet.proto"; import "topos/shared/v1/uuid.proto"; import "topos/uci/v1/certification.proto"; service SynchronizerService { rpc fetch_checkpoint(CheckpointRequest) returns (CheckpointResponse); rpc fetch_certificates(FetchCertificatesRequest) returns (FetchCertificatesResponse); } message CheckpointRequest { // Provide a request_id to track response topos.shared.v1.UUID request_id = 1; repeated ProofOfDelivery checkpoint = 2; uint64 limit_per_subnet = 3; } message CheckpointResponse { // If the response is directly linked to a request this ID allow one to track it topos.shared.v1.UUID request_id = 1; repeated CheckpointMapFieldEntry checkpoint_diff = 2; } message CheckpointMapFieldEntry { string key = 1; repeated ProofOfDelivery value = 2; } message FetchCertificatesRequest { // Provide a request_id to track response topos.shared.v1.UUID request_id = 1; repeated topos.shared.v1.CertificateId certificates = 2; } message FetchCertificatesResponse { // Provide a request_id to track response topos.shared.v1.UUID request_id = 1; repeated topos.uci.v1.Certificate certificates =2; } message ProofOfDelivery { topos.shared.v1.Positions.SourceStreamPosition delivery_position = 1; repeated SignedReady readies = 2; uint64 threshold = 3; } message SignedReady { string ready = 1; string signature = 2; } ================================================ FILE: crates/topos-core/proto/topos/uci/v1/certification.proto ================================================ syntax = "proto3"; package topos.uci.v1; import "topos/shared/v1/certificate.proto"; import "topos/shared/v1/frost.proto"; import "topos/shared/v1/stark_proof.proto"; import "topos/shared/v1/subnet.proto"; // Certificate - main exchange item message Certificate { topos.shared.v1.CertificateId prev_id = 1; topos.shared.v1.SubnetId source_subnet_id = 2; bytes state_root = 3; bytes tx_root_hash = 4; bytes receipts_root_hash = 5; repeated topos.shared.v1.SubnetId target_subnets = 6; uint32 verifier = 7; topos.shared.v1.CertificateId id = 8; topos.shared.v1.StarkProof proof = 9; topos.shared.v1.Frost signature = 10; } message OptionalCertificate { Certificate value = 1; } ================================================ FILE: crates/topos-core/src/api/graphql/certificate.rs ================================================ use async_graphql::{NewType, SimpleObject}; use serde::{Deserialize, Serialize}; use crate::{types::CertificateDelivered, uci}; use super::{checkpoint::SourceStreamPosition, subnet::SubnetId}; #[derive(Serialize, Deserialize, Debug, NewType)] pub struct CertificateId(String); impl From for CertificateId { fn from(value: uci::CertificateId) -> Self { Self(value.to_string()) } } #[derive(Serialize, Deserialize, Debug, SimpleObject)] #[serde(rename_all = "camelCase")] pub struct CertificatePositions { source: SourceStreamPosition, } /// A certificate that has been delivered #[derive(Debug, Serialize, Deserialize, SimpleObject)] #[serde(rename_all = "camelCase")] pub struct Certificate { pub id: CertificateId, pub prev_id: CertificateId, pub proof: String, pub signature: String, pub source_subnet_id: SubnetId, pub state_root: String, pub target_subnets: Vec, pub tx_root_hash: String, pub receipts_root_hash: String, pub verifier: u32, pub positions: CertificatePositions, } /// A certificate that has not been delivered yet #[derive(Debug, Serialize, Deserialize, SimpleObject)] #[serde(rename_all = "camelCase")] pub struct UndeliveredCertificate { pub id: CertificateId, pub prev_id: CertificateId, pub proof: String, pub signature: String, pub source_subnet_id: SubnetId, pub state_root: String, pub target_subnets: Vec, pub tx_root_hash: String, pub receipts_root_hash: String, pub verifier: u32, } impl From<&uci::Certificate> for UndeliveredCertificate { fn from(value: &crate::uci::Certificate) -> Self { Self { id: CertificateId(value.id.to_string()), prev_id: CertificateId(value.prev_id.to_string()), proof: hex::encode(&value.proof), signature: hex::encode(&value.signature), source_subnet_id: (&value.source_subnet_id).into(), state_root: hex::encode(value.state_root), target_subnets: value.target_subnets.iter().map(Into::into).collect(), tx_root_hash: hex::encode(value.tx_root_hash), receipts_root_hash: format!("0x{}", hex::encode(value.receipts_root_hash)), verifier: value.verifier, } } } #[derive(Debug, Serialize, Deserialize, SimpleObject)] pub struct Ready { message: String, signature: String, } impl From<&CertificateDelivered> for Certificate { fn from(value: &CertificateDelivered) -> Self { let uci_cert = &value.certificate; Self { id: CertificateId(uci_cert.id.to_string()), prev_id: CertificateId(uci_cert.prev_id.to_string()), proof: hex::encode(&uci_cert.proof), signature: hex::encode(&uci_cert.signature), source_subnet_id: (&uci_cert.source_subnet_id).into(), state_root: hex::encode(uci_cert.state_root), target_subnets: uci_cert.target_subnets.iter().map(Into::into).collect(), tx_root_hash: hex::encode(uci_cert.tx_root_hash), receipts_root_hash: format!("0x{}", hex::encode(uci_cert.receipts_root_hash)), verifier: uci_cert.verifier, positions: CertificatePositions { source: (&value.proof_of_delivery).into(), }, } } } impl TryFrom for crate::uci::CertificateId { type Error = uci::Error; fn try_from(value: CertificateId) -> Result { crate::uci::CertificateId::try_from(value.0.as_bytes()) } } ================================================ FILE: crates/topos-core/src/api/graphql/checkpoint.rs ================================================ use async_graphql::{InputObject, SimpleObject}; use serde::{Deserialize, Serialize}; use crate::types::ProofOfDelivery; use super::{certificate::CertificateId, subnet::SubnetId}; #[derive(InputObject)] pub struct SourceStreamPositionInput { pub source_subnet_id: SubnetId, pub position: u64, pub certificate_id: Option, } #[derive(Debug, Deserialize, Serialize, SimpleObject)] #[serde(rename_all = "camelCase")] pub struct SourceStreamPosition { pub source_subnet_id: SubnetId, pub position: u64, pub certificate_id: CertificateId, } impl From<&ProofOfDelivery> for SourceStreamPosition { fn from(value: &ProofOfDelivery) -> Self { Self { certificate_id: value.certificate_id.into(), source_subnet_id: (&value.delivery_position.subnet_id).into(), position: *value.delivery_position.position, } } } #[derive(InputObject)] pub struct SourceCheckpointInput { pub source_subnet_ids: Vec, pub positions: Vec, } ================================================ FILE: crates/topos-core/src/api/graphql/errors.rs ================================================ #[derive(Debug, thiserror::Error)] pub enum GraphQLServerError { #[error("The provided data layer is invalid")] ParseDataConnector, #[error("The provided subnet_id is not a proper HEX value")] ParseSubnetId, #[error("The provided certificate_id is not a proper HEX value")] ParseCertificateId, #[error("Internal Server Error")] StorageError, #[error("Certificate not found")] CertificateNotFound, #[error("Unable to create transient stream: {0}")] TransientStream(String), #[error("Internal API error: {0}")] InternalError(&'static str), } ================================================ FILE: crates/topos-core/src/api/graphql/filter.rs ================================================ use crate::api::graphql::subnet::SubnetId; #[derive(Debug, serde::Serialize, serde::Deserialize, async_graphql::OneofObject)] pub enum SubnetFilter { Source(SubnetId), Target(SubnetId), } ================================================ FILE: crates/topos-core/src/api/graphql/mod.rs ================================================ pub mod certificate; pub mod checkpoint; pub mod errors; pub mod filter; pub mod query; pub mod subnet; ================================================ FILE: crates/topos-core/src/api/graphql/query.rs ================================================ use crate::api::graphql::certificate::{Certificate, CertificateId}; use crate::api::graphql::checkpoint::SourceCheckpointInput; use crate::api::graphql::errors::GraphQLServerError; use async_graphql::Context; use async_trait::async_trait; #[async_trait] pub trait CertificateQuery { async fn certificates_per_subnet( ctx: &Context<'_>, from_source_checkpoint: SourceCheckpointInput, first: usize, ) -> Result, GraphQLServerError>; async fn certificate_by_id( ctx: &Context<'_>, certificate_id: CertificateId, ) -> Result; } ================================================ FILE: crates/topos-core/src/api/graphql/subnet.rs ================================================ use async_graphql::NewType; use serde::{Deserialize, Serialize}; use std::str::FromStr; use tracing::error; use super::errors::GraphQLServerError; #[derive(Clone, Debug, Serialize, Deserialize, NewType, PartialEq, Eq)] pub struct SubnetId(pub(crate) String); impl TryFrom<&SubnetId> for crate::uci::SubnetId { type Error = GraphQLServerError; fn try_from(value: &SubnetId) -> Result { Self::from_str(value.0.as_str()).map_err(|e| { error!("Failed to convert SubnetId from GraphQL input {e:?}"); GraphQLServerError::ParseDataConnector }) } } impl From<&crate::uci::SubnetId> for SubnetId { fn from(uci_id: &crate::uci::SubnetId) -> Self { Self(uci_id.to_string()) } } impl PartialEq for SubnetId { fn eq(&self, other: &crate::uci::SubnetId) -> bool { if let Ok(current) = crate::uci::SubnetId::from_str(&self.0) { other.as_array().eq(current.as_array()) } else { error!("Failed to parse the subnet id {} during comparison", self.0); false } } } ================================================ FILE: crates/topos-core/src/api/grpc/checkpoints/errors.rs ================================================ use crate::api::grpc::shared::v1_conversions_subnet::Error; #[derive(Debug, thiserror::Error)] pub enum TargetCheckpointError { #[error("Subnet format is invalid")] InvalidSubnetFormat, #[error("Invalid target stream position")] InvalidTargetStreamPosition, #[error("Checkpoint parse error")] ParseError, } #[derive(Debug, thiserror::Error)] pub enum StreamPositionError { #[error("The target_subnet_id field is missing")] MissingTargetSubnetId, #[error("The source_subnet_id field is missing")] MissingSourceSubnetId, #[error("Unable to parse SubnetId: {0}")] InvalidSubnetFormat(#[from] Error), #[error("Unable to parse CertificateId")] InvalidCertificateIdFormat, } ================================================ FILE: crates/topos-core/src/api/grpc/checkpoints/mod.rs ================================================ use crate::api::grpc::shared::v1 as shared_v1; use crate::uci::SubnetId; mod errors; mod positions; pub use errors::*; pub use positions::*; #[derive(Debug, Default, PartialEq, Eq)] pub struct TargetCheckpoint { pub target_subnet_ids: Vec, pub positions: Vec, } impl TryFrom for TargetCheckpoint { type Error = TargetCheckpointError; fn try_from(value: shared_v1::checkpoints::TargetCheckpoint) -> Result { Ok(TargetCheckpoint { target_subnet_ids: value .target_subnet_ids .into_iter() .map(TryInto::try_into) .collect::, _>>() .map_err(|_| TargetCheckpointError::InvalidSubnetFormat)?, positions: value .positions .into_iter() .map(TryInto::try_into) .collect::, _>>() .map_err(|_| TargetCheckpointError::InvalidTargetStreamPosition)?, }) } } impl From for shared_v1::checkpoints::TargetCheckpoint { fn from(value: TargetCheckpoint) -> Self { Self { target_subnet_ids: value .target_subnet_ids .into_iter() .map(Into::into) .collect(), positions: value.positions.into_iter().map(Into::into).collect(), } } } ================================================ FILE: crates/topos-core/src/api/grpc/checkpoints/positions.rs ================================================ use crate::api::grpc::checkpoints::StreamPositionError; use crate::api::grpc::shared::v1 as shared_v1; use crate::uci::{CertificateId, SubnetId}; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct TargetStreamPosition { pub target_subnet_id: SubnetId, pub source_subnet_id: SubnetId, pub position: u64, pub certificate_id: Option, } impl TryFrom for TargetStreamPosition { type Error = StreamPositionError; fn try_from(value: shared_v1::positions::TargetStreamPosition) -> Result { Ok(Self { target_subnet_id: value .target_subnet_id .map(TryInto::try_into) .ok_or(StreamPositionError::MissingTargetSubnetId)??, source_subnet_id: value .source_subnet_id .map(TryInto::try_into) .ok_or(StreamPositionError::MissingSourceSubnetId)??, position: value.position, certificate_id: value .certificate_id .map(TryInto::try_into) .map_or(Ok(None), |v| { v.map(Some) .map_err(|_| StreamPositionError::InvalidCertificateIdFormat) })?, }) } } impl From for shared_v1::positions::TargetStreamPosition { fn from(value: TargetStreamPosition) -> Self { Self { source_subnet_id: Some(value.source_subnet_id.into()), target_subnet_id: Some(value.target_subnet_id.into()), position: value.position, certificate_id: value.certificate_id.map(Into::into), } } } #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct SourceStreamPosition { pub source_subnet_id: SubnetId, pub position: u64, pub certificate_id: Option, } impl TryFrom for SourceStreamPosition { type Error = StreamPositionError; fn try_from(value: shared_v1::positions::SourceStreamPosition) -> Result { Ok(Self { source_subnet_id: value .source_subnet_id .map(TryInto::try_into) .ok_or(StreamPositionError::MissingSourceSubnetId)??, position: value.position, certificate_id: value .certificate_id .map(TryInto::try_into) .map_or(Ok(None), |v| { v.map(Some) .map_err(|_| StreamPositionError::InvalidCertificateIdFormat) })?, }) } } impl From for shared_v1::positions::SourceStreamPosition { fn from(value: SourceStreamPosition) -> Self { Self { source_subnet_id: Some(value.source_subnet_id.into()), position: value.position, certificate_id: value.certificate_id.map(Into::into), } } } ================================================ FILE: crates/topos-core/src/api/grpc/conversions/shared/v1/certificate.rs ================================================ use crate::uci::CERTIFICATE_ID_LENGTH; use super::v1::CertificateId; impl std::fmt::Display for CertificateId { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "0x{}", hex::encode(&self.value)) } } #[derive(Debug, thiserror::Error)] pub enum Error { #[error("Unable to parse certificateId ({0})")] ValidationError(CertificateId), } impl From<[u8; CERTIFICATE_ID_LENGTH]> for CertificateId { fn from(value: [u8; CERTIFICATE_ID_LENGTH]) -> Self { CertificateId { value: value.to_vec(), } } } impl From for CertificateId { fn from(value: crate::uci::CertificateId) -> Self { CertificateId { value: value.as_array().to_vec(), } } } impl TryFrom for crate::uci::CertificateId { type Error = Error; fn try_from(value: CertificateId) -> Result { if value.value.len() != CERTIFICATE_ID_LENGTH { return Err(Error::ValidationError(value)); } let mut id = [0; CERTIFICATE_ID_LENGTH]; id.copy_from_slice(value.value.as_slice()); Ok(id.into()) } } impl PartialEq for crate::uci::CertificateId { fn eq(&self, other: &CertificateId) -> bool { if other.value.len() != CERTIFICATE_ID_LENGTH { return false; } self.as_array() == &other.value[..CERTIFICATE_ID_LENGTH] } } ================================================ FILE: crates/topos-core/src/api/grpc/conversions/shared/v1/signature.rs ================================================ use super::v1::EcdsaSignature; use topos_crypto::messages::U256; impl From for topos_crypto::messages::Signature { fn from(proto: EcdsaSignature) -> Self { topos_crypto::messages::Signature { r: U256::from_big_endian(&proto.r), s: U256::from_big_endian(&proto.s), v: proto.v, } } } impl From for EcdsaSignature { fn from(other: topos_crypto::messages::Signature) -> Self { let mut ecdsa_signature = EcdsaSignature { r: vec![0; 32], s: vec![0; 32], v: 0, }; other.r.to_big_endian(&mut ecdsa_signature.r); other.s.to_big_endian(&mut ecdsa_signature.s); ecdsa_signature.v = other.v; ecdsa_signature } } ================================================ FILE: crates/topos-core/src/api/grpc/conversions/shared/v1/subnet.rs ================================================ use crate::uci::SUBNET_ID_LENGTH; use super::v1::SubnetId; use base64ct::{Base64, Encoding}; impl std::fmt::Display for SubnetId { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", Base64::encode_string(&self.value)) } } #[derive(Debug, thiserror::Error)] pub enum Error { #[error("Unable to parse subnetId ({0})")] ValidationError(SubnetId), #[error("Unable to parse UCI field ({0}))")] UCI(#[from] crate::uci::Error), #[error("Missing mandatory field: {0}")] MissingField(&'static str), #[error("Invalid or missing state_root")] InvalidStateRoot, #[error("Invalid or missing tx_root_hash")] InvalidTxRootHash, #[error("Invalid or missing receipts_root_hash")] InvalidReceiptsRootHash, } impl From<[u8; SUBNET_ID_LENGTH]> for SubnetId { fn from(value: [u8; SUBNET_ID_LENGTH]) -> Self { SubnetId { value: value.to_vec(), } } } impl TryFrom for [u8; SUBNET_ID_LENGTH] { type Error = Error; fn try_from(value: SubnetId) -> Result { if value.value.len() != SUBNET_ID_LENGTH { return Err(Error::ValidationError(value)); } let mut id = [0; SUBNET_ID_LENGTH]; id.copy_from_slice(value.value.as_slice()); Ok(id) } } impl From for SubnetId { fn from(value: crate::uci::SubnetId) -> Self { SubnetId { value: value.as_array().to_vec(), } } } impl TryFrom for crate::uci::SubnetId { type Error = Error; fn try_from(value: SubnetId) -> Result { if value.value.len() != SUBNET_ID_LENGTH { return Err(Error::ValidationError(value)); } let mut id = [0; SUBNET_ID_LENGTH]; id.copy_from_slice(value.value.as_slice()); Ok(id.into()) } } ================================================ FILE: crates/topos-core/src/api/grpc/conversions/shared/v1/uuid.rs ================================================ use super::v1::Uuid; impl From<(u64, u64)> for Uuid { fn from((most_significant_bits, least_significant_bits): (u64, u64)) -> Self { Self { most_significant_bits, least_significant_bits, } } } impl From for uuid::Uuid { fn from(proto: Uuid) -> Self { Self::from_u64_pair(proto.most_significant_bits, proto.least_significant_bits) } } impl From for Uuid { fn from(uuid: uuid::Uuid) -> Self { uuid.as_u64_pair().into() } } ================================================ FILE: crates/topos-core/src/api/grpc/conversions/shared/v1/validator_id.rs ================================================ use super::v1::ValidatorId; use topos_crypto::messages::H160; use topos_crypto::validator_id::{Error, VALIDATOR_ID_LENGTH}; impl std::fmt::Display for ValidatorId { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "0x{}", hex::encode(&self.value)) } } impl From for ValidatorId { fn from(other: topos_crypto::validator_id::ValidatorId) -> Self { ValidatorId { value: other.as_bytes().to_vec(), } } } impl TryFrom for topos_crypto::validator_id::ValidatorId { type Error = Error; fn try_from(other: ValidatorId) -> Result { if other.value.len() != VALIDATOR_ID_LENGTH { return Err(Error::InvalidByteLength(hex::encode(other.value))); } let mut value = [0; VALIDATOR_ID_LENGTH]; value.copy_from_slice(other.value.as_slice()); Ok(H160::from_slice(&value).into()) } } impl PartialEq for topos_crypto::validator_id::ValidatorId { fn eq(&self, other: &ValidatorId) -> bool { if other.value.len() != VALIDATOR_ID_LENGTH { return false; } self.as_bytes() == &other.value[..VALIDATOR_ID_LENGTH] } } ================================================ FILE: crates/topos-core/src/api/grpc/conversions/tce/v1/api.rs ================================================ use crate::api::grpc::tce::v1::{ watch_certificates_request::{Command, OpenStream}, watch_certificates_response::{CertificatePushed, Event, StreamOpened}, WatchCertificatesRequest, WatchCertificatesResponse, }; macro_rules! impl_command_conversion { ($type: ident) => { impl From<$type> for WatchCertificatesRequest { fn from(command: $type) -> Self { Self { request_id: Some(uuid::Uuid::new_v4().as_u64_pair().into()), command: Some(Command::$type(command)), } } } }; } macro_rules! impl_event_conversion { ($type: ident) => { impl From<$type> for WatchCertificatesResponse { fn from(event: $type) -> Self { Self { request_id: None, event: Some(Event::$type(event)), } } } }; } impl_command_conversion!(OpenStream); impl_event_conversion!(StreamOpened); impl_event_conversion!(CertificatePushed); ================================================ FILE: crates/topos-core/src/api/grpc/conversions/tce/v1/mod.rs ================================================ pub mod api; pub mod synchronization; ================================================ FILE: crates/topos-core/src/api/grpc/conversions/tce/v1/synchronization.rs ================================================ use prost::{bytes::Bytes, Message}; use crate::api::grpc::tce::v1::{ CheckpointRequest, CheckpointResponse, FetchCertificatesRequest, FetchCertificatesResponse, }; use crate::api::grpc::ConversionError; macro_rules! impl_to_vec_conversion { ($($type: ident),*) => { $( impl From<$type> for Vec { fn from(val: $type) -> Self { val.encode_to_vec() } } )* }; } macro_rules! impl_from_vec_conversion { ($($type: ident),*) => { $( impl TryFrom> for $type { type Error = ConversionError; fn try_from(input: Vec) -> Result { let bytes = Bytes::from(input); Ok(Self::decode(bytes)?) } } )* }; } impl_to_vec_conversion!( CheckpointRequest, CheckpointResponse, FetchCertificatesRequest, FetchCertificatesResponse ); impl_from_vec_conversion!( CheckpointResponse, CheckpointRequest, FetchCertificatesRequest, FetchCertificatesResponse ); ================================================ FILE: crates/topos-core/src/api/grpc/conversions/uci/v1/uci.rs ================================================ //! //! Protobuf generated/native Rust structures related conversions for GRPC API //! use crate::api::grpc::shared::v1_conversions_subnet::Error; use crate::api::grpc::uci::v1 as proto_v1; impl TryFrom for crate::uci::Certificate { type Error = Error; fn try_from(certificate: proto_v1::Certificate) -> Result { Ok(crate::uci::Certificate { prev_id: certificate .prev_id .ok_or(Error::MissingField("certificate.prev_id"))? .value .as_slice() .try_into()?, source_subnet_id: certificate .source_subnet_id .ok_or(Error::MissingField("certificate.source_subnet_id"))? .value .as_slice() .try_into()?, state_root: certificate .state_root .try_into() .map_err(|_| Error::InvalidStateRoot)?, tx_root_hash: certificate .tx_root_hash .try_into() .map_err(|_| Error::InvalidTxRootHash)?, receipts_root_hash: certificate .receipts_root_hash .try_into() .map_err(|_| Error::InvalidReceiptsRootHash)?, target_subnets: certificate .target_subnets .into_iter() .map(TryInto::try_into) .collect::, _>>()?, verifier: certificate.verifier, id: certificate .id .ok_or(Error::MissingField("certificate.id"))? .value .as_slice() .try_into()?, proof: certificate.proof.expect("valid proof").value, signature: certificate.signature.expect("valid frost signature").value, }) } } impl From for proto_v1::Certificate { fn from(certificate: crate::uci::Certificate) -> Self { proto_v1::Certificate { prev_id: Some(crate::api::grpc::shared::v1::CertificateId { value: certificate.prev_id.into(), }), source_subnet_id: Some(crate::api::grpc::shared::v1::SubnetId { value: certificate.source_subnet_id.into(), }), state_root: certificate.state_root.to_vec(), tx_root_hash: certificate.tx_root_hash.to_vec(), receipts_root_hash: certificate.receipts_root_hash.to_vec(), verifier: certificate.verifier, target_subnets: certificate .target_subnets .into_iter() .map(|target_subnet| target_subnet.into()) .collect(), id: Some(crate::api::grpc::shared::v1::CertificateId { value: certificate.id.into(), }), proof: Some(crate::api::grpc::shared::v1::StarkProof { value: certificate.proof, }), signature: Some(crate::api::grpc::shared::v1::Frost { value: certificate.signature, }), } } } #[test] fn test_proto_uci_certificate_conversion_id_random_0x() { use crate::api::grpc::shared::v1::{CertificateId, Frost, StarkProof, SubnetId}; let valid_cert = proto_v1::Certificate { prev_id: Some(CertificateId { value: vec![ 134, 103, 37, 44, 159, 78, 218, 73, 112, 17, 202, 189, 112, 180, 121, 0, 12, 128, 186, 116, 161, 18, 122, 129, 75, 151, 144, 95, 63, 203, 218, 69, ], }), source_subnet_id: Some(SubnetId { value: vec![ 98, 139, 93, 91, 125, 115, 135, 224, 46, 222, 68, 33, 52, 2, 83, 179, 100, 2, 44, 97, 103, 55, 128, 90, 14, 40, 56, 72, 66, 59, 0, 181, ], }), state_root: vec![ 145, 239, 242, 24, 12, 214, 83, 202, 223, 162, 240, 11, 146, 240, 28, 179, 163, 174, 70, 6, 216, 40, 150, 1, 195, 33, 156, 132, 21, 43, 6, 236, ], tx_root_hash: vec![ 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33, ], receipts_root_hash: vec![ 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33, ], target_subnets: Vec::new(), verifier: 0, id: Some(CertificateId { value: vec![ 48, 120, 230, 118, 216, 103, 205, 65, 12, 143, 205, 166, 153, 107, 194, 94, 158, 29, 135, 167, 231, 50, 238, 173, 96, 165, 27, 215, 255, 94, 18, 199, ], }), proof: Some(StarkProof { value: Vec::new() }), signature: Some(Frost { value: vec![ 76, 181, 52, 25, 163, 103, 87, 142, 229, 64, 163, 77, 11, 225, 135, 96, 181, 34, 168, 13, 152, 69, 90, 202, 11, 235, 122, 214, 103, 26, 31, 109, 94, 117, 53, 83, 195, 74, 47, 175, 189, 3, 134, 164, 186, 179, 73, 86, 202, 172, 213, 195, 160, 139, 240, 230, 103, 81, 227, 99, 241, 130, 157, 188, ], }), }; if let Err(e) = crate::uci::Certificate::try_from(valid_cert) { panic!("Unable to perform certificate conversion: {e}"); }; } #[test] fn test_proto_uci_certificate_conversion_id_starts_with_0x() { use crate::api::grpc::shared::v1::{CertificateId, Frost, StarkProof, SubnetId}; let mut prev_id = vec![b'0', b'x']; prev_id.append( &mut hex::decode("aac03cadfff6846c9ce72956eee2498011dd7b08689565d6f29e25c0a967ef14") .expect("Valid id"), ); let id = "504b5d01948bc777ba1510ba92a901f516408e4b2a1a5b97fed719430acc9ec9"; let valid_cert = proto_v1::Certificate { prev_id: Some(CertificateId { value: prev_id }), id: Some(CertificateId { value: hex::decode(id).expect("Valid id"), }), source_subnet_id: Some(SubnetId::from([0u8; 32])), state_root: [0u8; 32].to_vec(), tx_root_hash: [0u8; 32].to_vec(), receipts_root_hash: [0u8; 32].to_vec(), proof: Some(StarkProof { value: Vec::new() }), signature: Some(Frost { value: Vec::new() }), ..Default::default() }; let cert: crate::uci::Certificate = match crate::uci::Certificate::try_from(valid_cert) { Ok(cert) => cert, Err(e) => { panic!("Unable to perform certificate conversion: {e}"); } }; println!( "First certificate converted prev_id={}, id={}", cert.prev_id, cert.id ); let prev_id = "0xFF4b5d01948bc777ba1510ba92a901f516408e4b2a1a5b97fed719430acc9ec9" .to_string() .into_bytes(); let id = "AA4b5d01948bc777ba1510ba92a901f516408e4b2a1a5b97fed719430acc9ec9" .to_string() .into_bytes(); let valid_cert_2 = proto_v1::Certificate { prev_id: Some(CertificateId { value: prev_id }), id: Some(CertificateId { value: id }), source_subnet_id: Some(SubnetId::from([0u8; 32])), state_root: [0u8; 32].to_vec(), tx_root_hash: [0u8; 32].to_vec(), receipts_root_hash: [0u8; 32].to_vec(), proof: Some(StarkProof { value: Vec::new() }), signature: Some(Frost { value: Vec::new() }), ..Default::default() }; let cert_2: crate::uci::Certificate = match crate::uci::Certificate::try_from(valid_cert_2) { Ok(cert) => cert, Err(e) => { panic!("Unable to perform certificate conversion: {e}"); } }; println!( "Second certificate converted prev_id={}, id={}", cert_2.prev_id, cert_2.id ); } ================================================ FILE: crates/topos-core/src/api/grpc/generated/topos.p2p.rs ================================================ /// Generated client implementations. pub mod info_service_client { #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] use tonic::codegen::*; use tonic::codegen::http::Uri; #[derive(Debug, Clone)] pub struct InfoServiceClient { inner: tonic::client::Grpc, } impl InfoServiceClient { /// Attempt to create a new client by connecting to a given endpoint. pub async fn connect(dst: D) -> Result where D: TryInto, D::Error: Into, { let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; Ok(Self::new(conn)) } } impl InfoServiceClient where T: tonic::client::GrpcService, T::Error: Into, T::ResponseBody: Body + Send + 'static, ::Error: Into + Send, { pub fn new(inner: T) -> Self { let inner = tonic::client::Grpc::new(inner); Self { inner } } pub fn with_origin(inner: T, origin: Uri) -> Self { let inner = tonic::client::Grpc::with_origin(inner, origin); Self { inner } } pub fn with_interceptor( inner: T, interceptor: F, ) -> InfoServiceClient> where F: tonic::service::Interceptor, T::ResponseBody: Default, T: tonic::codegen::Service< http::Request, Response = http::Response< >::ResponseBody, >, >, , >>::Error: Into + Send + Sync, { InfoServiceClient::new(InterceptedService::new(inner, interceptor)) } /// Compress requests with the given encoding. /// /// This requires the server to support it otherwise it might respond with an /// error. #[must_use] pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { self.inner = self.inner.send_compressed(encoding); self } /// Enable decompressing responses. #[must_use] pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { self.inner = self.inner.accept_compressed(encoding); self } /// Limits the maximum size of a decoded message. /// /// Default: `4MB` #[must_use] pub fn max_decoding_message_size(mut self, limit: usize) -> Self { self.inner = self.inner.max_decoding_message_size(limit); self } /// Limits the maximum size of an encoded message. /// /// Default: `usize::MAX` #[must_use] pub fn max_encoding_message_size(mut self, limit: usize) -> Self { self.inner = self.inner.max_encoding_message_size(limit); self } } } /// Generated server implementations. pub mod info_service_server { #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] use tonic::codegen::*; /// Generated trait containing gRPC methods that should be implemented for use with InfoServiceServer. #[async_trait] pub trait InfoService: Send + Sync + 'static {} #[derive(Debug)] pub struct InfoServiceServer { inner: _Inner, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, max_decoding_message_size: Option, max_encoding_message_size: Option, } struct _Inner(Arc); impl InfoServiceServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } pub fn from_arc(inner: Arc) -> Self { let inner = _Inner(inner); Self { inner, accept_compression_encodings: Default::default(), send_compression_encodings: Default::default(), max_decoding_message_size: None, max_encoding_message_size: None, } } pub fn with_interceptor( inner: T, interceptor: F, ) -> InterceptedService where F: tonic::service::Interceptor, { InterceptedService::new(Self::new(inner), interceptor) } /// Enable decompressing requests with the given encoding. #[must_use] pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { self.accept_compression_encodings.enable(encoding); self } /// Compress responses with the given encoding, if the client supports it. #[must_use] pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { self.send_compression_encodings.enable(encoding); self } /// Limits the maximum size of a decoded message. /// /// Default: `4MB` #[must_use] pub fn max_decoding_message_size(mut self, limit: usize) -> Self { self.max_decoding_message_size = Some(limit); self } /// Limits the maximum size of an encoded message. /// /// Default: `usize::MAX` #[must_use] pub fn max_encoding_message_size(mut self, limit: usize) -> Self { self.max_encoding_message_size = Some(limit); self } } impl tonic::codegen::Service> for InfoServiceServer where T: InfoService, B: Body + Send + 'static, B::Error: Into + Send + 'static, { type Response = http::Response; type Error = std::convert::Infallible; type Future = BoxFuture; fn poll_ready( &mut self, _cx: &mut Context<'_>, ) -> Poll> { Poll::Ready(Ok(())) } fn call(&mut self, req: http::Request) -> Self::Future { let inner = self.inner.clone(); match req.uri().path() { _ => { Box::pin(async move { Ok( http::Response::builder() .status(200) .header("grpc-status", "12") .header("content-type", "application/grpc") .body(empty_body()) .unwrap(), ) }) } } } } impl Clone for InfoServiceServer { fn clone(&self) -> Self { let inner = self.inner.clone(); Self { inner, accept_compression_encodings: self.accept_compression_encodings, send_compression_encodings: self.send_compression_encodings, max_decoding_message_size: self.max_decoding_message_size, max_encoding_message_size: self.max_encoding_message_size, } } } impl Clone for _Inner { fn clone(&self) -> Self { Self(Arc::clone(&self.0)) } } impl std::fmt::Debug for _Inner { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{:?}", self.0) } } impl tonic::server::NamedService for InfoServiceServer { const NAME: &'static str = "topos.p2p.InfoService"; } } ================================================ FILE: crates/topos-core/src/api/grpc/generated/topos.shared.v1.rs ================================================ #[derive(Copy, serde::Deserialize, serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Uuid { #[prost(uint64, tag = "1")] pub most_significant_bits: u64, #[prost(uint64, tag = "2")] pub least_significant_bits: u64, } #[derive(Eq, Hash, serde::Deserialize, serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct SubnetId { #[prost(bytes = "vec", tag = "1")] pub value: ::prost::alloc::vec::Vec, } /// Id of the validator in the Topos protocol network /// This is the same as the validator's H160 address in the Ethereum compatible network #[derive(Eq, Hash, serde::Deserialize, serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ValidatorId { /// The validator's H160 address #[prost(bytes = "vec", tag = "1")] pub value: ::prost::alloc::vec::Vec, } #[derive(Eq, Hash, serde::Deserialize, serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct CertificateId { #[prost(bytes = "vec", tag = "1")] pub value: ::prost::alloc::vec::Vec, } /// Checkpoints are used to walk through streams #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Checkpoints {} /// Nested message and enum types in `Checkpoints`. pub mod checkpoints { /// SourceCheckpoint represents a snapshot of multiple stream's positions regarding /// one or multiple source subnets. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct SourceCheckpoint { #[prost(message, repeated, tag = "1")] pub source_subnet_ids: ::prost::alloc::vec::Vec, #[prost(message, repeated, tag = "2")] pub positions: ::prost::alloc::vec::Vec, } /// TargetCheckpoint represents a snapshot of multiple stream's positions regarding /// one or multiple target subnets. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TargetCheckpoint { #[prost(message, repeated, tag = "1")] pub target_subnet_ids: ::prost::alloc::vec::Vec, #[prost(message, repeated, tag = "2")] pub positions: ::prost::alloc::vec::Vec, } } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Positions {} /// Nested message and enum types in `Positions`. pub mod positions { /// SourceStreamPosition represents a single point in a source stream. /// It is defined by a source_subnet_id and a position, resolving to a certificate_id #[derive(serde::Deserialize, serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct SourceStreamPosition { /// The source_subnet_id is a mandatory field for the SourceStreamPosition #[prost(message, optional, tag = "1")] pub source_subnet_id: ::core::option::Option, #[prost(uint64, tag = "2")] pub position: u64, #[prost(message, optional, tag = "3")] pub certificate_id: ::core::option::Option, } /// TargetStreamPosition represents a single point in a target stream regarding a source subnet. /// It is defined by a target_subnet_id, source_subnet_id and a position, resolving to a certificate_id #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TargetStreamPosition { /// The source_subnet_id is a mandatory field for the TargetStreamPosition #[prost(message, optional, tag = "1")] pub source_subnet_id: ::core::option::Option, /// The target_subnet_id is a mandatory field for the TargetStreamPosition #[prost(message, optional, tag = "2")] pub target_subnet_id: ::core::option::Option, #[prost(uint64, tag = "3")] pub position: u64, #[prost(message, optional, tag = "4")] pub certificate_id: ::core::option::Option, } } #[derive(Eq, Hash, serde::Deserialize, serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Frost { #[prost(bytes = "vec", tag = "1")] pub value: ::prost::alloc::vec::Vec, } #[derive(Eq, Hash, serde::Deserialize, serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct StarkProof { #[prost(bytes = "vec", tag = "1")] pub value: ::prost::alloc::vec::Vec, } /// A signature using the ECDSA algorithm. /// Used to sign double echo protocol messages. #[derive(Eq, Hash, serde::Deserialize, serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct EcdsaSignature { #[prost(bytes = "vec", tag = "1")] pub r: ::prost::alloc::vec::Vec, #[prost(bytes = "vec", tag = "2")] pub s: ::prost::alloc::vec::Vec, #[prost(uint64, tag = "3")] pub v: u64, } ================================================ FILE: crates/topos-core/src/api/grpc/generated/topos.tce.v1.rs ================================================ #[derive(serde::Deserialize, serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct CheckpointRequest { /// Provide a request_id to track response #[prost(message, optional, tag = "1")] pub request_id: ::core::option::Option, #[prost(message, repeated, tag = "2")] pub checkpoint: ::prost::alloc::vec::Vec, #[prost(uint64, tag = "3")] pub limit_per_subnet: u64, } #[derive(serde::Deserialize, serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct CheckpointResponse { /// If the response is directly linked to a request this ID allow one to track it #[prost(message, optional, tag = "1")] pub request_id: ::core::option::Option, #[prost(message, repeated, tag = "2")] pub checkpoint_diff: ::prost::alloc::vec::Vec, } #[derive(serde::Deserialize, serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct CheckpointMapFieldEntry { #[prost(string, tag = "1")] pub key: ::prost::alloc::string::String, #[prost(message, repeated, tag = "2")] pub value: ::prost::alloc::vec::Vec, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct FetchCertificatesRequest { /// Provide a request_id to track response #[prost(message, optional, tag = "1")] pub request_id: ::core::option::Option, #[prost(message, repeated, tag = "2")] pub certificates: ::prost::alloc::vec::Vec, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct FetchCertificatesResponse { /// Provide a request_id to track response #[prost(message, optional, tag = "1")] pub request_id: ::core::option::Option, #[prost(message, repeated, tag = "2")] pub certificates: ::prost::alloc::vec::Vec, } #[derive(serde::Deserialize, serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ProofOfDelivery { #[prost(message, optional, tag = "1")] pub delivery_position: ::core::option::Option< super::super::shared::v1::positions::SourceStreamPosition, >, #[prost(message, repeated, tag = "2")] pub readies: ::prost::alloc::vec::Vec, #[prost(uint64, tag = "3")] pub threshold: u64, } #[derive(serde::Deserialize, serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct SignedReady { #[prost(string, tag = "1")] pub ready: ::prost::alloc::string::String, #[prost(string, tag = "2")] pub signature: ::prost::alloc::string::String, } /// Generated client implementations. pub mod synchronizer_service_client { #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] use tonic::codegen::*; use tonic::codegen::http::Uri; #[derive(Debug, Clone)] pub struct SynchronizerServiceClient { inner: tonic::client::Grpc, } impl SynchronizerServiceClient { /// Attempt to create a new client by connecting to a given endpoint. pub async fn connect(dst: D) -> Result where D: TryInto, D::Error: Into, { let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; Ok(Self::new(conn)) } } impl SynchronizerServiceClient where T: tonic::client::GrpcService, T::Error: Into, T::ResponseBody: Body + Send + 'static, ::Error: Into + Send, { pub fn new(inner: T) -> Self { let inner = tonic::client::Grpc::new(inner); Self { inner } } pub fn with_origin(inner: T, origin: Uri) -> Self { let inner = tonic::client::Grpc::with_origin(inner, origin); Self { inner } } pub fn with_interceptor( inner: T, interceptor: F, ) -> SynchronizerServiceClient> where F: tonic::service::Interceptor, T::ResponseBody: Default, T: tonic::codegen::Service< http::Request, Response = http::Response< >::ResponseBody, >, >, , >>::Error: Into + Send + Sync, { SynchronizerServiceClient::new(InterceptedService::new(inner, interceptor)) } /// Compress requests with the given encoding. /// /// This requires the server to support it otherwise it might respond with an /// error. #[must_use] pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { self.inner = self.inner.send_compressed(encoding); self } /// Enable decompressing responses. #[must_use] pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { self.inner = self.inner.accept_compressed(encoding); self } /// Limits the maximum size of a decoded message. /// /// Default: `4MB` #[must_use] pub fn max_decoding_message_size(mut self, limit: usize) -> Self { self.inner = self.inner.max_decoding_message_size(limit); self } /// Limits the maximum size of an encoded message. /// /// Default: `usize::MAX` #[must_use] pub fn max_encoding_message_size(mut self, limit: usize) -> Self { self.inner = self.inner.max_encoding_message_size(limit); self } pub async fn fetch_checkpoint( &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result< tonic::Response, tonic::Status, > { self.inner .ready() .await .map_err(|e| { tonic::Status::new( tonic::Code::Unknown, format!("Service was not ready: {}", e.into()), ) })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/topos.tce.v1.SynchronizerService/fetch_checkpoint", ); let mut req = request.into_request(); req.extensions_mut() .insert( GrpcMethod::new( "topos.tce.v1.SynchronizerService", "fetch_checkpoint", ), ); self.inner.unary(req, path, codec).await } pub async fn fetch_certificates( &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result< tonic::Response, tonic::Status, > { self.inner .ready() .await .map_err(|e| { tonic::Status::new( tonic::Code::Unknown, format!("Service was not ready: {}", e.into()), ) })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/topos.tce.v1.SynchronizerService/fetch_certificates", ); let mut req = request.into_request(); req.extensions_mut() .insert( GrpcMethod::new( "topos.tce.v1.SynchronizerService", "fetch_certificates", ), ); self.inner.unary(req, path, codec).await } } } /// Generated server implementations. pub mod synchronizer_service_server { #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] use tonic::codegen::*; /// Generated trait containing gRPC methods that should be implemented for use with SynchronizerServiceServer. #[async_trait] pub trait SynchronizerService: Send + Sync + 'static { async fn fetch_checkpoint( &self, request: tonic::Request, ) -> std::result::Result< tonic::Response, tonic::Status, >; async fn fetch_certificates( &self, request: tonic::Request, ) -> std::result::Result< tonic::Response, tonic::Status, >; } #[derive(Debug)] pub struct SynchronizerServiceServer { inner: _Inner, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, max_decoding_message_size: Option, max_encoding_message_size: Option, } struct _Inner(Arc); impl SynchronizerServiceServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } pub fn from_arc(inner: Arc) -> Self { let inner = _Inner(inner); Self { inner, accept_compression_encodings: Default::default(), send_compression_encodings: Default::default(), max_decoding_message_size: None, max_encoding_message_size: None, } } pub fn with_interceptor( inner: T, interceptor: F, ) -> InterceptedService where F: tonic::service::Interceptor, { InterceptedService::new(Self::new(inner), interceptor) } /// Enable decompressing requests with the given encoding. #[must_use] pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { self.accept_compression_encodings.enable(encoding); self } /// Compress responses with the given encoding, if the client supports it. #[must_use] pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { self.send_compression_encodings.enable(encoding); self } /// Limits the maximum size of a decoded message. /// /// Default: `4MB` #[must_use] pub fn max_decoding_message_size(mut self, limit: usize) -> Self { self.max_decoding_message_size = Some(limit); self } /// Limits the maximum size of an encoded message. /// /// Default: `usize::MAX` #[must_use] pub fn max_encoding_message_size(mut self, limit: usize) -> Self { self.max_encoding_message_size = Some(limit); self } } impl tonic::codegen::Service> for SynchronizerServiceServer where T: SynchronizerService, B: Body + Send + 'static, B::Error: Into + Send + 'static, { type Response = http::Response; type Error = std::convert::Infallible; type Future = BoxFuture; fn poll_ready( &mut self, _cx: &mut Context<'_>, ) -> Poll> { Poll::Ready(Ok(())) } fn call(&mut self, req: http::Request) -> Self::Future { let inner = self.inner.clone(); match req.uri().path() { "/topos.tce.v1.SynchronizerService/fetch_checkpoint" => { #[allow(non_camel_case_types)] struct fetch_checkpointSvc(pub Arc); impl< T: SynchronizerService, > tonic::server::UnaryService for fetch_checkpointSvc { type Response = super::CheckpointResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { ::fetch_checkpoint( &inner, request, ) .await }; Box::pin(fut) } } let accept_compression_encodings = self.accept_compression_encodings; let send_compression_encodings = self.send_compression_encodings; let max_decoding_message_size = self.max_decoding_message_size; let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { let inner = inner.0; let method = fetch_checkpointSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( accept_compression_encodings, send_compression_encodings, ) .apply_max_message_size_config( max_decoding_message_size, max_encoding_message_size, ); let res = grpc.unary(method, req).await; Ok(res) }; Box::pin(fut) } "/topos.tce.v1.SynchronizerService/fetch_certificates" => { #[allow(non_camel_case_types)] struct fetch_certificatesSvc(pub Arc); impl< T: SynchronizerService, > tonic::server::UnaryService for fetch_certificatesSvc { type Response = super::FetchCertificatesResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { ::fetch_certificates( &inner, request, ) .await }; Box::pin(fut) } } let accept_compression_encodings = self.accept_compression_encodings; let send_compression_encodings = self.send_compression_encodings; let max_decoding_message_size = self.max_decoding_message_size; let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { let inner = inner.0; let method = fetch_certificatesSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( accept_compression_encodings, send_compression_encodings, ) .apply_max_message_size_config( max_decoding_message_size, max_encoding_message_size, ); let res = grpc.unary(method, req).await; Ok(res) }; Box::pin(fut) } _ => { Box::pin(async move { Ok( http::Response::builder() .status(200) .header("grpc-status", "12") .header("content-type", "application/grpc") .body(empty_body()) .unwrap(), ) }) } } } } impl Clone for SynchronizerServiceServer { fn clone(&self) -> Self { let inner = self.inner.clone(); Self { inner, accept_compression_encodings: self.accept_compression_encodings, send_compression_encodings: self.send_compression_encodings, max_decoding_message_size: self.max_decoding_message_size, max_encoding_message_size: self.max_encoding_message_size, } } } impl Clone for _Inner { fn clone(&self) -> Self { Self(Arc::clone(&self.0)) } } impl std::fmt::Debug for _Inner { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{:?}", self.0) } } impl tonic::server::NamedService for SynchronizerServiceServer { const NAME: &'static str = "topos.tce.v1.SynchronizerService"; } } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct SubmitCertificateRequest { #[prost(message, optional, tag = "1")] pub certificate: ::core::option::Option, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct SubmitCertificateResponse {} #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GetSourceHeadRequest { #[prost(message, optional, tag = "1")] pub subnet_id: ::core::option::Option, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GetSourceHeadResponse { #[prost(message, optional, tag = "1")] pub position: ::core::option::Option< super::super::shared::v1::positions::SourceStreamPosition, >, #[prost(message, optional, tag = "2")] pub certificate: ::core::option::Option, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GetLastPendingCertificatesRequest { #[prost(message, repeated, tag = "1")] pub subnet_ids: ::prost::alloc::vec::Vec, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct LastPendingCertificate { #[prost(message, optional, tag = "1")] pub value: ::core::option::Option, /// Pending certificate index (effectively total number of pending certificates) #[prost(uint64, tag = "2")] pub index: u64, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GetLastPendingCertificatesResponse { /// Bytes and array types (SubnetId) could not be key in the map type according to specifications, /// so we use SubnetId hex encoded string with 0x prefix as key #[prost(map = "string, message", tag = "1")] pub last_pending_certificate: ::std::collections::HashMap< ::prost::alloc::string::String, LastPendingCertificate, >, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct WatchCertificatesRequest { /// Provide a request_id to track response #[prost(message, optional, tag = "1")] pub request_id: ::core::option::Option, /// Define which command needs to be performed #[prost(oneof = "watch_certificates_request::Command", tags = "2")] pub command: ::core::option::Option, } /// Nested message and enum types in `WatchCertificatesRequest`. pub mod watch_certificates_request { /// Sent to start receiving events and being able to send further command #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct OpenStream { #[prost(message, optional, tag = "1")] pub target_checkpoint: ::core::option::Option< super::super::super::shared::v1::checkpoints::TargetCheckpoint, >, #[prost(message, optional, tag = "2")] pub source_checkpoint: ::core::option::Option< super::super::super::shared::v1::checkpoints::SourceCheckpoint, >, } /// Define which command needs to be performed #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Command { #[prost(message, tag = "2")] OpenStream(OpenStream), } } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct WatchCertificatesResponse { /// If the response is directly linked to a request this ID allow one to track it #[prost(message, optional, tag = "1")] pub request_id: ::core::option::Option, #[prost(oneof = "watch_certificates_response::Event", tags = "2, 3")] pub event: ::core::option::Option, } /// Nested message and enum types in `WatchCertificatesResponse`. pub mod watch_certificates_response { /// Sent by the TCE when the stream is ready to be used and /// that certificates will start being pushed #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct StreamOpened { #[prost(message, repeated, tag = "1")] pub subnet_ids: ::prost::alloc::vec::Vec< super::super::super::shared::v1::SubnetId, >, } /// Target Certificate pushed from the TCE to the sequencer #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct CertificatePushed { #[prost(message, optional, tag = "1")] pub certificate: ::core::option::Option< super::super::super::uci::v1::Certificate, >, #[prost(message, repeated, tag = "2")] pub positions: ::prost::alloc::vec::Vec< super::super::super::shared::v1::positions::TargetStreamPosition, >, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Event { #[prost(message, tag = "2")] StreamOpened(StreamOpened), #[prost(message, tag = "3")] CertificatePushed(CertificatePushed), } } /// Generated client implementations. pub mod api_service_client { #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] use tonic::codegen::*; use tonic::codegen::http::Uri; #[derive(Debug, Clone)] pub struct ApiServiceClient { inner: tonic::client::Grpc, } impl ApiServiceClient { /// Attempt to create a new client by connecting to a given endpoint. pub async fn connect(dst: D) -> Result where D: TryInto, D::Error: Into, { let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; Ok(Self::new(conn)) } } impl ApiServiceClient where T: tonic::client::GrpcService, T::Error: Into, T::ResponseBody: Body + Send + 'static, ::Error: Into + Send, { pub fn new(inner: T) -> Self { let inner = tonic::client::Grpc::new(inner); Self { inner } } pub fn with_origin(inner: T, origin: Uri) -> Self { let inner = tonic::client::Grpc::with_origin(inner, origin); Self { inner } } pub fn with_interceptor( inner: T, interceptor: F, ) -> ApiServiceClient> where F: tonic::service::Interceptor, T::ResponseBody: Default, T: tonic::codegen::Service< http::Request, Response = http::Response< >::ResponseBody, >, >, , >>::Error: Into + Send + Sync, { ApiServiceClient::new(InterceptedService::new(inner, interceptor)) } /// Compress requests with the given encoding. /// /// This requires the server to support it otherwise it might respond with an /// error. #[must_use] pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { self.inner = self.inner.send_compressed(encoding); self } /// Enable decompressing responses. #[must_use] pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { self.inner = self.inner.accept_compressed(encoding); self } /// Limits the maximum size of a decoded message. /// /// Default: `4MB` #[must_use] pub fn max_decoding_message_size(mut self, limit: usize) -> Self { self.inner = self.inner.max_decoding_message_size(limit); self } /// Limits the maximum size of an encoded message. /// /// Default: `usize::MAX` #[must_use] pub fn max_encoding_message_size(mut self, limit: usize) -> Self { self.inner = self.inner.max_encoding_message_size(limit); self } pub async fn submit_certificate( &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result< tonic::Response, tonic::Status, > { self.inner .ready() .await .map_err(|e| { tonic::Status::new( tonic::Code::Unknown, format!("Service was not ready: {}", e.into()), ) })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/topos.tce.v1.APIService/SubmitCertificate", ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("topos.tce.v1.APIService", "SubmitCertificate")); self.inner.unary(req, path, codec).await } pub async fn get_source_head( &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result< tonic::Response, tonic::Status, > { self.inner .ready() .await .map_err(|e| { tonic::Status::new( tonic::Code::Unknown, format!("Service was not ready: {}", e.into()), ) })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/topos.tce.v1.APIService/GetSourceHead", ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("topos.tce.v1.APIService", "GetSourceHead")); self.inner.unary(req, path, codec).await } /// / This RPC allows a client to get latest pending certificates for /// / requested subnets (by their subnet id) /// / /// / Returns a map of subnet_id -> last pending certificate /// / If there are no pending certificate for a subnet, returns None for that subnet id pub async fn get_last_pending_certificates( &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result< tonic::Response, tonic::Status, > { self.inner .ready() .await .map_err(|e| { tonic::Status::new( tonic::Code::Unknown, format!("Service was not ready: {}", e.into()), ) })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/topos.tce.v1.APIService/GetLastPendingCertificates", ); let mut req = request.into_request(); req.extensions_mut() .insert( GrpcMethod::new( "topos.tce.v1.APIService", "GetLastPendingCertificates", ), ); self.inner.unary(req, path, codec).await } /// This RPC allows a client to open a bidirectional stream with a TCE pub async fn watch_certificates( &mut self, request: impl tonic::IntoStreamingRequest< Message = super::WatchCertificatesRequest, >, ) -> std::result::Result< tonic::Response>, tonic::Status, > { self.inner .ready() .await .map_err(|e| { tonic::Status::new( tonic::Code::Unknown, format!("Service was not ready: {}", e.into()), ) })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/topos.tce.v1.APIService/WatchCertificates", ); let mut req = request.into_streaming_request(); req.extensions_mut() .insert(GrpcMethod::new("topos.tce.v1.APIService", "WatchCertificates")); self.inner.streaming(req, path, codec).await } } } /// Generated server implementations. pub mod api_service_server { #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] use tonic::codegen::*; /// Generated trait containing gRPC methods that should be implemented for use with ApiServiceServer. #[async_trait] pub trait ApiService: Send + Sync + 'static { async fn submit_certificate( &self, request: tonic::Request, ) -> std::result::Result< tonic::Response, tonic::Status, >; async fn get_source_head( &self, request: tonic::Request, ) -> std::result::Result< tonic::Response, tonic::Status, >; /// / This RPC allows a client to get latest pending certificates for /// / requested subnets (by their subnet id) /// / /// / Returns a map of subnet_id -> last pending certificate /// / If there are no pending certificate for a subnet, returns None for that subnet id async fn get_last_pending_certificates( &self, request: tonic::Request, ) -> std::result::Result< tonic::Response, tonic::Status, >; /// Server streaming response type for the WatchCertificates method. type WatchCertificatesStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result< super::WatchCertificatesResponse, tonic::Status, >, > + Send + 'static; /// This RPC allows a client to open a bidirectional stream with a TCE async fn watch_certificates( &self, request: tonic::Request>, ) -> std::result::Result< tonic::Response, tonic::Status, >; } #[derive(Debug)] pub struct ApiServiceServer { inner: _Inner, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, max_decoding_message_size: Option, max_encoding_message_size: Option, } struct _Inner(Arc); impl ApiServiceServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } pub fn from_arc(inner: Arc) -> Self { let inner = _Inner(inner); Self { inner, accept_compression_encodings: Default::default(), send_compression_encodings: Default::default(), max_decoding_message_size: None, max_encoding_message_size: None, } } pub fn with_interceptor( inner: T, interceptor: F, ) -> InterceptedService where F: tonic::service::Interceptor, { InterceptedService::new(Self::new(inner), interceptor) } /// Enable decompressing requests with the given encoding. #[must_use] pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { self.accept_compression_encodings.enable(encoding); self } /// Compress responses with the given encoding, if the client supports it. #[must_use] pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { self.send_compression_encodings.enable(encoding); self } /// Limits the maximum size of a decoded message. /// /// Default: `4MB` #[must_use] pub fn max_decoding_message_size(mut self, limit: usize) -> Self { self.max_decoding_message_size = Some(limit); self } /// Limits the maximum size of an encoded message. /// /// Default: `usize::MAX` #[must_use] pub fn max_encoding_message_size(mut self, limit: usize) -> Self { self.max_encoding_message_size = Some(limit); self } } impl tonic::codegen::Service> for ApiServiceServer where T: ApiService, B: Body + Send + 'static, B::Error: Into + Send + 'static, { type Response = http::Response; type Error = std::convert::Infallible; type Future = BoxFuture; fn poll_ready( &mut self, _cx: &mut Context<'_>, ) -> Poll> { Poll::Ready(Ok(())) } fn call(&mut self, req: http::Request) -> Self::Future { let inner = self.inner.clone(); match req.uri().path() { "/topos.tce.v1.APIService/SubmitCertificate" => { #[allow(non_camel_case_types)] struct SubmitCertificateSvc(pub Arc); impl< T: ApiService, > tonic::server::UnaryService for SubmitCertificateSvc { type Response = super::SubmitCertificateResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { ::submit_certificate(&inner, request).await }; Box::pin(fut) } } let accept_compression_encodings = self.accept_compression_encodings; let send_compression_encodings = self.send_compression_encodings; let max_decoding_message_size = self.max_decoding_message_size; let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { let inner = inner.0; let method = SubmitCertificateSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( accept_compression_encodings, send_compression_encodings, ) .apply_max_message_size_config( max_decoding_message_size, max_encoding_message_size, ); let res = grpc.unary(method, req).await; Ok(res) }; Box::pin(fut) } "/topos.tce.v1.APIService/GetSourceHead" => { #[allow(non_camel_case_types)] struct GetSourceHeadSvc(pub Arc); impl< T: ApiService, > tonic::server::UnaryService for GetSourceHeadSvc { type Response = super::GetSourceHeadResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { ::get_source_head(&inner, request).await }; Box::pin(fut) } } let accept_compression_encodings = self.accept_compression_encodings; let send_compression_encodings = self.send_compression_encodings; let max_decoding_message_size = self.max_decoding_message_size; let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { let inner = inner.0; let method = GetSourceHeadSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( accept_compression_encodings, send_compression_encodings, ) .apply_max_message_size_config( max_decoding_message_size, max_encoding_message_size, ); let res = grpc.unary(method, req).await; Ok(res) }; Box::pin(fut) } "/topos.tce.v1.APIService/GetLastPendingCertificates" => { #[allow(non_camel_case_types)] struct GetLastPendingCertificatesSvc(pub Arc); impl< T: ApiService, > tonic::server::UnaryService< super::GetLastPendingCertificatesRequest, > for GetLastPendingCertificatesSvc { type Response = super::GetLastPendingCertificatesResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, request: tonic::Request< super::GetLastPendingCertificatesRequest, >, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { ::get_last_pending_certificates( &inner, request, ) .await }; Box::pin(fut) } } let accept_compression_encodings = self.accept_compression_encodings; let send_compression_encodings = self.send_compression_encodings; let max_decoding_message_size = self.max_decoding_message_size; let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { let inner = inner.0; let method = GetLastPendingCertificatesSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( accept_compression_encodings, send_compression_encodings, ) .apply_max_message_size_config( max_decoding_message_size, max_encoding_message_size, ); let res = grpc.unary(method, req).await; Ok(res) }; Box::pin(fut) } "/topos.tce.v1.APIService/WatchCertificates" => { #[allow(non_camel_case_types)] struct WatchCertificatesSvc(pub Arc); impl< T: ApiService, > tonic::server::StreamingService for WatchCertificatesSvc { type Response = super::WatchCertificatesResponse; type ResponseStream = T::WatchCertificatesStream; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, request: tonic::Request< tonic::Streaming, >, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { ::watch_certificates(&inner, request).await }; Box::pin(fut) } } let accept_compression_encodings = self.accept_compression_encodings; let send_compression_encodings = self.send_compression_encodings; let max_decoding_message_size = self.max_decoding_message_size; let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { let inner = inner.0; let method = WatchCertificatesSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( accept_compression_encodings, send_compression_encodings, ) .apply_max_message_size_config( max_decoding_message_size, max_encoding_message_size, ); let res = grpc.streaming(method, req).await; Ok(res) }; Box::pin(fut) } _ => { Box::pin(async move { Ok( http::Response::builder() .status(200) .header("grpc-status", "12") .header("content-type", "application/grpc") .body(empty_body()) .unwrap(), ) }) } } } } impl Clone for ApiServiceServer { fn clone(&self) -> Self { let inner = self.inner.clone(); Self { inner, accept_compression_encodings: self.accept_compression_encodings, send_compression_encodings: self.send_compression_encodings, max_decoding_message_size: self.max_decoding_message_size, max_encoding_message_size: self.max_encoding_message_size, } } } impl Clone for _Inner { fn clone(&self) -> Self { Self(Arc::clone(&self.0)) } } impl std::fmt::Debug for _Inner { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{:?}", self.0) } } impl tonic::server::NamedService for ApiServiceServer { const NAME: &'static str = "topos.tce.v1.APIService"; } } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct StatusRequest {} #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct StatusResponse { #[prost(bool, tag = "1")] pub has_active_sample: bool, } /// Generated client implementations. pub mod console_service_client { #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] use tonic::codegen::*; use tonic::codegen::http::Uri; #[derive(Debug, Clone)] pub struct ConsoleServiceClient { inner: tonic::client::Grpc, } impl ConsoleServiceClient { /// Attempt to create a new client by connecting to a given endpoint. pub async fn connect(dst: D) -> Result where D: TryInto, D::Error: Into, { let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; Ok(Self::new(conn)) } } impl ConsoleServiceClient where T: tonic::client::GrpcService, T::Error: Into, T::ResponseBody: Body + Send + 'static, ::Error: Into + Send, { pub fn new(inner: T) -> Self { let inner = tonic::client::Grpc::new(inner); Self { inner } } pub fn with_origin(inner: T, origin: Uri) -> Self { let inner = tonic::client::Grpc::with_origin(inner, origin); Self { inner } } pub fn with_interceptor( inner: T, interceptor: F, ) -> ConsoleServiceClient> where F: tonic::service::Interceptor, T::ResponseBody: Default, T: tonic::codegen::Service< http::Request, Response = http::Response< >::ResponseBody, >, >, , >>::Error: Into + Send + Sync, { ConsoleServiceClient::new(InterceptedService::new(inner, interceptor)) } /// Compress requests with the given encoding. /// /// This requires the server to support it otherwise it might respond with an /// error. #[must_use] pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { self.inner = self.inner.send_compressed(encoding); self } /// Enable decompressing responses. #[must_use] pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { self.inner = self.inner.accept_compressed(encoding); self } /// Limits the maximum size of a decoded message. /// /// Default: `4MB` #[must_use] pub fn max_decoding_message_size(mut self, limit: usize) -> Self { self.inner = self.inner.max_decoding_message_size(limit); self } /// Limits the maximum size of an encoded message. /// /// Default: `usize::MAX` #[must_use] pub fn max_encoding_message_size(mut self, limit: usize) -> Self { self.inner = self.inner.max_encoding_message_size(limit); self } pub async fn status( &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { self.inner .ready() .await .map_err(|e| { tonic::Status::new( tonic::Code::Unknown, format!("Service was not ready: {}", e.into()), ) })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/topos.tce.v1.ConsoleService/Status", ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("topos.tce.v1.ConsoleService", "Status")); self.inner.unary(req, path, codec).await } } } /// Generated server implementations. pub mod console_service_server { #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] use tonic::codegen::*; /// Generated trait containing gRPC methods that should be implemented for use with ConsoleServiceServer. #[async_trait] pub trait ConsoleService: Send + Sync + 'static { async fn status( &self, request: tonic::Request, ) -> std::result::Result, tonic::Status>; } #[derive(Debug)] pub struct ConsoleServiceServer { inner: _Inner, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, max_decoding_message_size: Option, max_encoding_message_size: Option, } struct _Inner(Arc); impl ConsoleServiceServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } pub fn from_arc(inner: Arc) -> Self { let inner = _Inner(inner); Self { inner, accept_compression_encodings: Default::default(), send_compression_encodings: Default::default(), max_decoding_message_size: None, max_encoding_message_size: None, } } pub fn with_interceptor( inner: T, interceptor: F, ) -> InterceptedService where F: tonic::service::Interceptor, { InterceptedService::new(Self::new(inner), interceptor) } /// Enable decompressing requests with the given encoding. #[must_use] pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { self.accept_compression_encodings.enable(encoding); self } /// Compress responses with the given encoding, if the client supports it. #[must_use] pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { self.send_compression_encodings.enable(encoding); self } /// Limits the maximum size of a decoded message. /// /// Default: `4MB` #[must_use] pub fn max_decoding_message_size(mut self, limit: usize) -> Self { self.max_decoding_message_size = Some(limit); self } /// Limits the maximum size of an encoded message. /// /// Default: `usize::MAX` #[must_use] pub fn max_encoding_message_size(mut self, limit: usize) -> Self { self.max_encoding_message_size = Some(limit); self } } impl tonic::codegen::Service> for ConsoleServiceServer where T: ConsoleService, B: Body + Send + 'static, B::Error: Into + Send + 'static, { type Response = http::Response; type Error = std::convert::Infallible; type Future = BoxFuture; fn poll_ready( &mut self, _cx: &mut Context<'_>, ) -> Poll> { Poll::Ready(Ok(())) } fn call(&mut self, req: http::Request) -> Self::Future { let inner = self.inner.clone(); match req.uri().path() { "/topos.tce.v1.ConsoleService/Status" => { #[allow(non_camel_case_types)] struct StatusSvc(pub Arc); impl< T: ConsoleService, > tonic::server::UnaryService for StatusSvc { type Response = super::StatusResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { ::status(&inner, request).await }; Box::pin(fut) } } let accept_compression_encodings = self.accept_compression_encodings; let send_compression_encodings = self.send_compression_encodings; let max_decoding_message_size = self.max_decoding_message_size; let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { let inner = inner.0; let method = StatusSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( accept_compression_encodings, send_compression_encodings, ) .apply_max_message_size_config( max_decoding_message_size, max_encoding_message_size, ); let res = grpc.unary(method, req).await; Ok(res) }; Box::pin(fut) } _ => { Box::pin(async move { Ok( http::Response::builder() .status(200) .header("grpc-status", "12") .header("content-type", "application/grpc") .body(empty_body()) .unwrap(), ) }) } } } } impl Clone for ConsoleServiceServer { fn clone(&self) -> Self { let inner = self.inner.clone(); Self { inner, accept_compression_encodings: self.accept_compression_encodings, send_compression_encodings: self.send_compression_encodings, max_decoding_message_size: self.max_decoding_message_size, max_encoding_message_size: self.max_encoding_message_size, } } } impl Clone for _Inner { fn clone(&self) -> Self { Self(Arc::clone(&self.0)) } } impl std::fmt::Debug for _Inner { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{:?}", self.0) } } impl tonic::server::NamedService for ConsoleServiceServer { const NAME: &'static str = "topos.tce.v1.ConsoleService"; } } #[derive(serde::Deserialize, serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Gossip { #[prost(message, optional, tag = "1")] pub certificate: ::core::option::Option, } #[derive(serde::Deserialize, serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Echo { #[prost(message, optional, tag = "1")] pub certificate_id: ::core::option::Option, #[prost(message, optional, tag = "2")] pub signature: ::core::option::Option, #[prost(message, optional, tag = "3")] pub validator_id: ::core::option::Option, } #[derive(serde::Deserialize, serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Ready { #[prost(message, optional, tag = "1")] pub certificate_id: ::core::option::Option, #[prost(message, optional, tag = "2")] pub signature: ::core::option::Option, #[prost(message, optional, tag = "3")] pub validator_id: ::core::option::Option, } #[derive(serde::Deserialize, serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct DoubleEchoRequest { #[prost(oneof = "double_echo_request::Request", tags = "1, 2, 3")] pub request: ::core::option::Option, } /// Nested message and enum types in `DoubleEchoRequest`. pub mod double_echo_request { #[derive(serde::Deserialize, serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Request { #[prost(message, tag = "1")] Gossip(super::Gossip), #[prost(message, tag = "2")] Echo(super::Echo), #[prost(message, tag = "3")] Ready(super::Ready), } } #[derive(serde::Deserialize, serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Batch { #[prost(bytes = "vec", repeated, tag = "1")] pub messages: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, } ================================================ FILE: crates/topos-core/src/api/grpc/generated/topos.uci.v1.rs ================================================ /// Certificate - main exchange item #[derive(Eq, Hash, serde::Deserialize, serde::Serialize)] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Certificate { #[prost(message, optional, tag = "1")] pub prev_id: ::core::option::Option, #[prost(message, optional, tag = "2")] pub source_subnet_id: ::core::option::Option, #[prost(bytes = "vec", tag = "3")] pub state_root: ::prost::alloc::vec::Vec, #[prost(bytes = "vec", tag = "4")] pub tx_root_hash: ::prost::alloc::vec::Vec, #[prost(bytes = "vec", tag = "5")] pub receipts_root_hash: ::prost::alloc::vec::Vec, #[prost(message, repeated, tag = "6")] pub target_subnets: ::prost::alloc::vec::Vec, #[prost(uint32, tag = "7")] pub verifier: u32, #[prost(message, optional, tag = "8")] pub id: ::core::option::Option, #[prost(message, optional, tag = "9")] pub proof: ::core::option::Option, #[prost(message, optional, tag = "10")] pub signature: ::core::option::Option, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct OptionalCertificate { #[prost(message, optional, tag = "1")] pub value: ::core::option::Option, } ================================================ FILE: crates/topos-core/src/api/grpc/mod.rs ================================================ use self::checkpoints::StreamPositionError; use tonic::transport::Channel; use self::tce::v1::synchronizer_service_client::SynchronizerServiceClient; pub const FILE_DESCRIPTOR_SET: &[u8] = include_bytes!("generated/topos.bin"); pub mod checkpoints; pub trait GrpcClient { type Output; fn init(destination: Channel) -> Self::Output; } impl GrpcClient for SynchronizerServiceClient { type Output = Self; fn init(channel: Channel) -> Self::Output { SynchronizerServiceClient::new(channel) } } #[derive(thiserror::Error, Debug)] pub enum ConversionError { #[error(transparent)] GrpcDecode(#[from] prost::DecodeError), #[error("Missing mandatory field: {0}")] MissingField(&'static str), #[error(transparent)] StreamConversion(#[from] StreamPositionError), } #[allow(warnings)] #[rustfmt::skip] #[path = "generated/topos.p2p.rs"] pub mod p2p; #[path = ""] pub mod tce { #[rustfmt::skip] #[allow(warnings)] #[path = "generated/topos.tce.v1.rs"] pub mod v1; #[path = "conversions/tce/v1/mod.rs"] pub mod v1_conversions; } #[path = ""] pub mod shared { #[rustfmt::skip] #[allow(warnings)] #[path = "generated/topos.shared.v1.rs"] pub mod v1; #[path = "conversions/shared/v1/uuid.rs"] pub mod v1_conversions_uuid; #[path = "conversions/shared/v1/subnet.rs"] pub mod v1_conversions_subnet; #[path = "conversions/shared/v1/certificate.rs"] pub mod v1_conversions_certificate; #[path = "conversions/shared/v1/signature.rs"] pub mod v1_conversions_signature; #[path = "conversions/shared/v1/validator_id.rs"] pub mod v1_conversions_validator_id; } #[path = "."] pub mod uci { #[rustfmt::skip] #[allow(warnings)] #[path = "generated/topos.uci.v1.rs"] pub mod v1; #[path = "conversions/uci/v1/uci.rs"] pub mod v1_conversions; } ================================================ FILE: crates/topos-core/src/api/mod.rs ================================================ pub mod graphql; pub mod grpc; ================================================ FILE: crates/topos-core/src/errors.rs ================================================ use crate::api::grpc::checkpoints::StreamPositionError; #[derive(Debug, thiserror::Error)] pub enum GrpcParsingError { #[error("Malformed gRPC object: {0}")] GrpcMalformedType(&'static str), #[error(transparent)] PositionParsing(#[from] StreamPositionError), } ================================================ FILE: crates/topos-core/src/lib.rs ================================================ #[cfg_attr(docsrs, doc(cfg(feature = "uci")))] pub mod uci; #[cfg_attr(docsrs, doc(cfg(feature = "api")))] pub mod api; pub mod errors; pub mod types; #[cfg(test)] mod test; ================================================ FILE: crates/topos-core/src/test.rs ================================================ use crate::types::stream::Position; #[test] fn test_position() { let zero = Position::ZERO; let serialized = bincode::serialize(&zero).unwrap(); let deserialized: Position = bincode::deserialize(&serialized).unwrap(); assert_eq!(zero, deserialized); let one: u64 = 1; let serialized = bincode::serialize(&one).unwrap(); let deserialized: Position = bincode::deserialize(&serialized).unwrap(); assert_eq!(one, deserialized); } #[test] fn position_from_integer() { let position: Position = (0u64).into(); assert_eq!(*position, 0); } ================================================ FILE: crates/topos-core/src/types/stream.rs ================================================ use std::{fmt, ops::Deref}; use serde::{Deserialize, Serialize}; use thiserror::Error; use crate::uci::SubnetId; /// Represents the place of a certificate in the stream of the Source Subnet /// /// The `Source` Subnet is the subnet that produced the certificate. /// A certificate should and will have the same position in this stream /// no matter which node or component delivered it. The position is an /// aggregation of the precedence chain of a certificate, starting by the /// genesis certificate represented by a certificate which have a prev_id /// equal to the `INITIAL_CERTIFICATE_ID` #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] pub struct CertificateSourceStreamPosition { // Source subnet id pub subnet_id: SubnetId, // Source certificate position pub position: Position, } impl fmt::Display for CertificateSourceStreamPosition { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}:{}", self.subnet_id, self.position) } } impl CertificateSourceStreamPosition { pub fn new>(subnet_id: SubnetId, position: P) -> Self { Self { subnet_id, position: position.into(), } } } /// Represents the place of a certificate in the stream of a Target Subnet /// /// A `Target` Subnet is a subnet that was defined as target by the certificate. /// A certificate can have multiple target subnets, leading to multiple /// CertificateTargetStreamPosition for the same certificate but never more than /// one CertificateTargetStreamPosition per couple (target, source). /// /// The position of a certificate in a target stream will be the same accross /// the entire network. #[derive(Debug, Deserialize, Serialize, Clone, Copy)] pub struct CertificateTargetStreamPosition { pub target_subnet_id: SubnetId, pub source_subnet_id: SubnetId, pub position: Position, } impl CertificateTargetStreamPosition { pub fn new>( target_subnet_id: SubnetId, source_subnet_id: SubnetId, position: P, ) -> Self { Self { target_subnet_id, source_subnet_id, position: position.into(), } } } /// Certificate index in a stream of both source or target subnet #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Copy)] pub struct Position(u64); impl TryFrom for usize { type Error = PositionError; fn try_from(position: Position) -> Result { position .0 .try_into() .map_err(|_| PositionError::InvalidPosition) } } impl TryFrom for Position { type Error = PositionError; fn try_from(value: usize) -> Result { Ok(Self( u64::try_from(value).map_err(|_| PositionError::InvalidPosition)?, )) } } impl From for Position { fn from(value: u64) -> Self { Self(value) } } impl Deref for Position { type Target = u64; fn deref(&self) -> &Self::Target { &self.0 } } impl PartialEq for u64 { fn eq(&self, other: &Position) -> bool { *self == other.0 } } impl PartialOrd for Position { fn partial_cmp(&self, other: &Self) -> Option { self.0.partial_cmp(&other.0) } } impl fmt::Display for Position { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.0) } } impl Position { pub const ZERO: Self = Self(0); pub fn increment(self) -> Result { match self { Self::ZERO => Ok(Self(1)), Self(value) => value .checked_add(1) .ok_or(PositionError::MaximumPositionReached) .map(Self), } } } #[derive(Debug, Error)] pub enum PositionError { #[error("Maximum position reached for subnet")] MaximumPositionReached, #[error("Invalid expected position")] InvalidExpectedPosition, #[error("")] InvalidPosition, } ================================================ FILE: crates/topos-core/src/types.rs ================================================ use crate::uci::{Certificate, CertificateId}; use serde::{Deserialize, Serialize}; use crate::errors::GrpcParsingError; use self::stream::CertificateSourceStreamPosition; use crate::api::grpc::{ checkpoints::SourceStreamPosition, tce::v1::{ProofOfDelivery as GrpcProofOfDelivery, SignedReady}, }; pub mod stream; pub type Ready = String; pub type Signature = String; pub use topos_crypto::validator_id::Error as ValidatorIdConversionError; pub use topos_crypto::validator_id::ValidatorId; #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] pub struct CertificateDelivered { pub certificate: Certificate, pub proof_of_delivery: ProofOfDelivery, } impl AsRef for CertificateDelivered { fn as_ref(&self) -> &Self { self } } /// Certificate's Proof of Delivery /// /// This structure is used to prove that a certificate has been delivered. /// It contains the certificate's ID, the position of the certificate in the /// source stream, the list of Ready messages received and the threshold. /// The threshold is the number of Ready messages required to consider the /// certificate as delivered. For a certificate, multiple Proofs of Delivery /// can be created on the network, each one with a different list of Ready messages. /// /// Two different Proofs of Delivery for the same Certificate can still be valid /// if their Ready messages are valid. Because of the threshold, a certificate /// can be considered as delivered even with a different set of Ready messages, /// it simply means that the node received a different set of Ready messages /// than the other nodes. #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] pub struct ProofOfDelivery { /// The certificate's ID pub certificate_id: CertificateId, /// The position of the certificate in the source stream pub delivery_position: CertificateSourceStreamPosition, /// The list of Ready messages used to proove the certificate's delivery pub readies: Vec<(Ready, Signature)>, /// The threshold of Ready messages required to consider the certificate as delivered pub threshold: u64, } impl From for CertificateSourceStreamPosition { fn from(value: SourceStreamPosition) -> Self { Self { subnet_id: value.source_subnet_id, position: value.position.into(), } } } impl TryFrom for ProofOfDelivery { type Error = GrpcParsingError; fn try_from(value: GrpcProofOfDelivery) -> Result { let position: SourceStreamPosition = value .delivery_position .ok_or(GrpcParsingError::GrpcMalformedType("position"))? .try_into()?; Ok(Self { certificate_id: position .certificate_id .ok_or(GrpcParsingError::GrpcMalformedType( "position.certificate_id", ))?, delivery_position: position.into(), readies: value .readies .into_iter() .map(|v| (v.ready, v.signature)) .collect(), threshold: value.threshold, }) } } impl From for GrpcProofOfDelivery { fn from(value: ProofOfDelivery) -> Self { Self { delivery_position: Some( SourceStreamPosition { source_subnet_id: value.delivery_position.subnet_id, position: *value.delivery_position.position, certificate_id: Some(value.certificate_id), } .into(), ), readies: value .readies .into_iter() .map(|v| SignedReady { ready: v.0, signature: v.1, }) .collect(), threshold: value.threshold, } } } ================================================ FILE: crates/topos-core/src/uci/certificate.rs ================================================ use serde::{Deserialize, Serialize}; use std::borrow::Borrow; use std::fmt::Debug; use super::{ CertificateId, Error, Frost, ReceiptsRootHash, StarkProof, StateRoot, SubnetId, TxRootHash, CERTIFICATE_ID_LENGTH, DUMMY_FROST_VERIF_DELAY, DUMMY_STARK_DELAY, }; /// Certificate - main exchange item #[derive(Clone, Default, Serialize, Deserialize, PartialEq, Eq, Hash)] pub struct Certificate { pub id: CertificateId, pub prev_id: CertificateId, pub source_subnet_id: SubnetId, pub state_root: StateRoot, pub tx_root_hash: TxRootHash, pub receipts_root_hash: ReceiptsRootHash, pub target_subnets: Vec, pub verifier: u32, pub proof: StarkProof, pub signature: Frost, } impl AsRef for Certificate { fn as_ref(&self) -> &Self { self } } impl Debug for Certificate { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("Certificate") .field("id", &self.id.to_string()) .field("prev_id", &self.prev_id.to_string()) .field("source_subnet_id", &self.source_subnet_id.to_string()) .field( "state_root", &("0x".to_string() + &hex::encode(self.state_root)), ) .field( "tx_root_hash", &("0x".to_string() + &hex::encode(self.tx_root_hash)), ) .field( "receipts_root_hash", &("0x".to_string() + &hex::encode(self.receipts_root_hash)), ) .field( "target_subnets", &self .target_subnets .iter() .map(|ts| ts.to_string()) .collect::>(), ) .field("verifier", &self.verifier) .field("proof", &("0x".to_string() + &hex::encode(&self.proof))) .field( "signature", &("0x".to_string() + &hex::encode(&self.signature)), ) .finish() } } impl Certificate { #[allow(clippy::too_many_arguments)] pub fn new>( prev_id: P, source_subnet_id: SubnetId, state_root: StateRoot, tx_root_hash: TxRootHash, receipts_root_hash: ReceiptsRootHash, target_subnets: &[SubnetId], verifier: u32, proof: Vec, ) -> Result> { let mut cert = Certificate { id: [0; CERTIFICATE_ID_LENGTH].into(), prev_id: prev_id.into(), source_subnet_id, state_root, tx_root_hash, receipts_root_hash, target_subnets: target_subnets.into(), verifier, proof, signature: Default::default(), }; cert.id = Self::calculate_cert_id(&cert)?.into(); Ok(cert) } pub fn new_with_default_fields>( prev_id: P, source_subnet_id: SubnetId, target_subnets: &[SubnetId], ) -> Result> { let mut cert = Certificate { id: [0; CERTIFICATE_ID_LENGTH].into(), prev_id: prev_id.into(), source_subnet_id, state_root: Default::default(), tx_root_hash: Default::default(), receipts_root_hash: Default::default(), target_subnets: target_subnets.into(), verifier: 0, proof: Default::default(), signature: Default::default(), }; cert.id = Self::calculate_cert_id(&cert)?.into(); Ok(cert) } pub fn check_signature(&self) -> Result<(), Error> { std::thread::sleep(DUMMY_FROST_VERIF_DELAY); Ok(()) } pub fn check_proof(&self) -> Result<(), Error> { std::thread::sleep(DUMMY_STARK_DELAY); Ok(()) } /// Signs the hash of the certificate payload pub fn update_signature(&mut self, private_key: &[u8]) -> Result<(), Error> { self.signature = topos_crypto::signatures::sign(private_key, self.get_payload().as_slice())?; Ok(()) } /// Get byte payload of the certificate /// Excludes frost signature pub fn get_payload(&self) -> Vec { let mut buffer = Vec::new(); buffer.extend(self.id.as_array().as_ref()); buffer.extend_from_slice(self.prev_id.as_array().as_ref()); buffer.extend_from_slice(self.source_subnet_id.as_array().as_ref()); buffer.extend_from_slice(self.state_root.as_ref()); buffer.extend_from_slice(self.tx_root_hash.as_ref()); buffer.extend_from_slice(self.receipts_root_hash.as_ref()); for target_subnet in &self.target_subnets { buffer.extend_from_slice(target_subnet.as_array().as_ref()); } buffer.extend(self.verifier.to_be_bytes().as_ref()); buffer.extend(self.proof.as_slice()); buffer } // To get unique id, calculate certificate id of certificate object using keccak256, // excluding cert_id and signature fields fn calculate_cert_id(certificate: &Certificate) -> Result<[u8; CERTIFICATE_ID_LENGTH], Error> { let mut buffer = Vec::new(); buffer.extend_from_slice(certificate.prev_id.as_array().as_ref()); buffer.extend_from_slice(certificate.source_subnet_id.as_array().as_ref()); buffer.extend_from_slice(certificate.state_root.as_ref()); buffer.extend_from_slice(certificate.tx_root_hash.as_ref()); buffer.extend_from_slice(certificate.receipts_root_hash.as_ref()); for target_subnet in &certificate.target_subnets { buffer.extend_from_slice(target_subnet.as_array().as_ref()); } buffer.extend_from_slice(certificate.verifier.to_be_bytes().as_ref()); buffer.extend_from_slice(certificate.proof.as_ref()); let hash = topos_crypto::hash::calculate_hash(buffer.borrow()); Ok(hash) } } #[cfg(test)] mod tests { use crate::uci::SUBNET_ID_LENGTH; use super::*; const PREV_CERTIFICATE_ID: CertificateId = CertificateId::from_array([1u8; CERTIFICATE_ID_LENGTH]); const TARGET_SUBNET_ID: SubnetId = SubnetId::from_array([3u8; SUBNET_ID_LENGTH]); const STATE_ROOT: StateRoot = [4u8; 32]; const TX_ROOT_HASH: TxRootHash = [5u8; 32]; const RECEIPTS_ROOT_HASH: ReceiptsRootHash = [6u8; 32]; const PRIVATE_TEST_KEY: &str = "5fb92d6e98884f76de468fa3f6278f8807c48bebc13595d45af5bdc4da702133"; fn generate_dummy_cert(signing_key: &[u8]) -> Certificate { let public_key = topos_crypto::keys::derive_public_key(signing_key).expect("valid public key"); let source_subnet_id: [u8; SUBNET_ID_LENGTH] = public_key[1..33].try_into().unwrap(); Certificate::new( PREV_CERTIFICATE_ID, source_subnet_id.into(), STATE_ROOT, TX_ROOT_HASH, RECEIPTS_ROOT_HASH, &[TARGET_SUBNET_ID], 2, Default::default(), ) .expect("Dummy certificate") } #[test] fn certificate_signatures() { let private_test_key = hex::decode(PRIVATE_TEST_KEY).unwrap(); let mut dummy_cert = generate_dummy_cert(&private_test_key); dummy_cert .update_signature(private_test_key.as_slice()) .expect("valid signature update"); topos_crypto::signatures::verify( &dummy_cert.source_subnet_id.to_secp256k1_public_key(), dummy_cert.get_payload().as_slice(), dummy_cert.signature.as_slice(), ) .expect("valid signature check") } #[test] #[should_panic] fn signature_verification_failed_corrupt_data() { let private_test_key = hex::decode(PRIVATE_TEST_KEY).unwrap(); let mut dummy_cert = generate_dummy_cert(&private_test_key); dummy_cert .update_signature(private_test_key.as_slice()) .expect("valid signature update"); dummy_cert.state_root[0] = 0xff; let public_key = topos_crypto::keys::derive_public_key(private_test_key.as_slice()) .expect("valid public key"); topos_crypto::signatures::verify( &public_key, dummy_cert.get_payload().as_slice(), dummy_cert.signature.as_slice(), ) .expect("invalid valid signature check") } #[test] #[should_panic] fn signature_verification_failed_invalid_public_key() { let private_test_key = hex::decode(PRIVATE_TEST_KEY).unwrap(); let mut dummy_cert = generate_dummy_cert(&private_test_key); dummy_cert .update_signature(private_test_key.as_slice()) .expect("valid signature update"); dummy_cert.state_root[0] = 0xff; let mut public_key = topos_crypto::keys::derive_public_key(private_test_key.as_slice()) .expect("valid public key"); public_key[3] = 0xff; topos_crypto::signatures::verify( &dummy_cert.source_subnet_id.to_secp256k1_public_key(), dummy_cert.get_payload().as_slice(), dummy_cert.signature.as_slice(), ) .expect("invalid valid signature check") } } ================================================ FILE: crates/topos-core/src/uci/certificate_id.rs ================================================ use serde::{Deserialize, Serialize}; use std::fmt::{Debug, Display}; use std::hash::Hash; use super::{Error, CERTIFICATE_ID_LENGTH, HEX_CERTIFICATE_ID_LENGTH}; pub const INITIAL_CERTIFICATE_ID: CertificateId = CertificateId::from_array([0u8; super::CERTIFICATE_ID_LENGTH]); #[derive(Serialize, Hash, Deserialize, Default, PartialEq, Eq, Clone, Copy)] pub struct CertificateId { id: [u8; CERTIFICATE_ID_LENGTH], } impl Display for CertificateId { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "0x{}", hex::encode(self.id)) } } impl Debug for CertificateId { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "0x{}", hex::encode(self.id)) } } impl Ord for CertificateId { fn cmp(&self, other: &Self) -> std::cmp::Ordering { self.id.cmp(&other.id) } } impl PartialOrd for CertificateId { fn partial_cmp(&self, other: &Self) -> Option { Some(self.id.cmp(&other.id)) } } impl From<[u8; CERTIFICATE_ID_LENGTH]> for CertificateId { fn from(value: [u8; CERTIFICATE_ID_LENGTH]) -> Self { Self { id: value } } } impl From for Vec { fn from(value: CertificateId) -> Vec { value.id.to_vec() } } impl TryFrom<&[u8]> for CertificateId { type Error = Error; fn try_from(value: &[u8]) -> Result { let value = if value.starts_with(b"0x") && (value.len() == (HEX_CERTIFICATE_ID_LENGTH + 2) || value.len() == (CERTIFICATE_ID_LENGTH + 2)) { &value[2..] } else { value }; let length = value.len(); if length != CERTIFICATE_ID_LENGTH && length != HEX_CERTIFICATE_ID_LENGTH { return Err(Error::ValidationError(format!( "invalid certificate id length {length} - should be {CERTIFICATE_ID_LENGTH} bytes \ array or hex encoded string of size {HEX_CERTIFICATE_ID_LENGTH}" ))); } let mut id = [0; CERTIFICATE_ID_LENGTH]; if length == HEX_CERTIFICATE_ID_LENGTH { let value = hex::decode(value).map_err(|_| { Error::ValidationError(format!( "invalid hex encoded certificate id string: {value:?}" )) })?; id.copy_from_slice(&value[..]) } else { id.copy_from_slice(value); } Ok(Self { id }) } } impl CertificateId { pub const fn from_array(id: [u8; CERTIFICATE_ID_LENGTH]) -> Self { Self { id } } pub const fn as_array(&self) -> &[u8; CERTIFICATE_ID_LENGTH] { &self.id } } #[cfg(test)] mod tests { use super::CertificateId; const CERTIFICATE_ID_WITH_PREFIX: &str = "0x11db8713a79c41625f4bb2221bd43ac4766fff23e78f82212f48713a6768e76a"; const CERTIFICATE_ID_WITHOUT_PREFIX: &str = "11db8713a79c41625f4bb2221bd43ac4766fff23e78f82212f48713a6768e76a"; const MALFORMATTED_CERTIFICATE_ID: &str = "invalid_hex_string"; #[test] fn convert_cert_id_string_with_prefix() { let certificate_id: CertificateId = CERTIFICATE_ID_WITH_PREFIX .as_bytes() .try_into() .expect("Cannot convert to CertificateID"); let expected_bytes: &[u8] = &[ 0x11, 0xdb, 0x87, 0x13, 0xa7, 0x9c, 0x41, 0x62, 0x5f, 0x4b, 0xb2, 0x22, 0x1b, 0xd4, 0x3a, 0xc4, 0x76, 0x6f, 0xff, 0x23, 0xe7, 0x8f, 0x82, 0x21, 0x2f, 0x48, 0x71, 0x3a, 0x67, 0x68, 0xe7, 0x6a, ]; assert_eq!(certificate_id.id.as_slice(), expected_bytes) } #[test] fn convert_cert_id_string_without_prefix() { let certificate_id: &[u8] = &hex::decode(CERTIFICATE_ID_WITHOUT_PREFIX).expect("Cannot convert to CertificateI"); let certificate_id: CertificateId = certificate_id .try_into() .expect("Cannot transform bytes to CertificateId"); let expected_bytes: &[u8] = &[ 0x11, 0xdb, 0x87, 0x13, 0xa7, 0x9c, 0x41, 0x62, 0x5f, 0x4b, 0xb2, 0x22, 0x1b, 0xd4, 0x3a, 0xc4, 0x76, 0x6f, 0xff, 0x23, 0xe7, 0x8f, 0x82, 0x21, 0x2f, 0x48, 0x71, 0x3a, 0x67, 0x68, 0xe7, 0x6a, ]; assert_eq!(certificate_id.id.as_slice(), expected_bytes) } #[test] fn malformatted_cert_id() { let certificate_id = CertificateId::try_from(MALFORMATTED_CERTIFICATE_ID.as_bytes()); assert!(certificate_id.is_err()); } } ================================================ FILE: crates/topos-core/src/uci/mod.rs ================================================ //! Universal Certificate Interface //! //! Data structures to support Certificates' exchange pub use certificate::Certificate; pub use certificate_id::CertificateId; pub use subnet_id::SubnetId; use std::fmt::Debug; use std::time; use thiserror::Error; mod certificate; mod certificate_id; mod subnet_id; pub const CERTIFICATE_ID_LENGTH: usize = 32; pub const HEX_CERTIFICATE_ID_LENGTH: usize = 64; pub const SUBNET_ID_LENGTH: usize = 32; pub use certificate_id::INITIAL_CERTIFICATE_ID; pub type StarkProof = Vec; pub type Frost = Vec; pub type Address = [u8; 20]; pub type Amount = ethereum_types::U256; pub type StateRoot = [u8; 32]; pub type TxRootHash = [u8; 32]; pub type ReceiptsRootHash = [u8; 32]; /// Heavily checked on the gossip, so not abstracted const DUMMY_FROST_VERIF_DELAY: time::Duration = time::Duration::from_millis(0); /// Zero second to abstract it by considering having a great machine const DUMMY_STARK_DELAY: time::Duration = time::Duration::from_millis(0); #[derive(Debug, Error)] pub enum Error { #[error("certificate validation error: {0}")] ValidationError(String), #[error("topos crypto error: (0)")] CryptoError(#[from] topos_crypto::Error), } ================================================ FILE: crates/topos-core/src/uci/subnet_id.rs ================================================ use serde::{Deserialize, Serialize}; use std::fmt::{Debug, Display}; use std::hash::Hash; use std::str::FromStr; use super::{Error, SUBNET_ID_LENGTH}; #[derive(Serialize, Hash, Deserialize, Default, PartialEq, Eq, Clone, Copy)] pub struct SubnetId { id: [u8; SUBNET_ID_LENGTH], } impl Display for SubnetId { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "0x{}", hex::encode(self.id)) } } impl Debug for SubnetId { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "0x{}", hex::encode(self.id)) } } impl Ord for SubnetId { fn cmp(&self, other: &Self) -> std::cmp::Ordering { self.id.cmp(&other.id) } } impl PartialOrd for SubnetId { fn partial_cmp(&self, other: &Self) -> Option { Some(self.id.cmp(&other.id)) } } impl From<[u8; SUBNET_ID_LENGTH]> for SubnetId { fn from(value: [u8; SUBNET_ID_LENGTH]) -> Self { Self { id: value } } } impl From for [u8; SUBNET_ID_LENGTH] { fn from(value: SubnetId) -> Self { value.id } } impl From for Vec { fn from(value: SubnetId) -> Vec { value.id.to_vec() } } impl TryFrom<&[u8]> for SubnetId { type Error = Error; fn try_from(value: &[u8]) -> Result { if value.len() != SUBNET_ID_LENGTH { return Err(Error::ValidationError(format!( "invalid subnet id of length {}, expected length {SUBNET_ID_LENGTH}", value.len() ))); } let mut id = [0; SUBNET_ID_LENGTH]; id.copy_from_slice(value); Ok(Self { id }) } } impl FromStr for SubnetId { type Err = Error; fn from_str(s: &str) -> Result { let s = if s.starts_with("0x") { hex::decode(&s[2..s.len()]).map_err(|e| { Error::ValidationError(format!( "could not decode subnet id hex encoded string '{s}' error: {e}" )) })? } else { s.as_bytes().to_vec() }; s.as_slice().try_into() } } impl PartialEq<[u8]> for SubnetId { fn eq(&self, other: &[u8]) -> bool { if let Ok(current) = Self::try_from(other) { self.as_array().eq(current.as_array()) } else { false } } } impl SubnetId { pub const fn from_array(id: [u8; SUBNET_ID_LENGTH]) -> Self { Self { id } } pub const fn as_array(&self) -> &[u8; SUBNET_ID_LENGTH] { &self.id } pub fn to_secp256k1_public_key(&self) -> [u8; 33] { let mut public_key: [u8; 33] = [0; 33]; public_key[0] = 0x02; public_key[1..(self.id.len() + 1)].copy_from_slice(&self.id[..]); public_key } } ================================================ FILE: crates/topos-core/tests/tce_layer.rs ================================================ use async_stream::stream; use futures::{channel::oneshot, FutureExt}; use futures::{Stream, StreamExt}; use rstest::rstest; use std::collections::HashMap; use std::pin::Pin; use std::time::Duration; use test_log::test; use tokio::sync::mpsc; use tonic::transport::Endpoint; use tonic::{transport::Server, Request, Response, Status, Streaming}; use topos_core::api::grpc::shared::v1::checkpoints::TargetCheckpoint; use topos_core::api::grpc::shared::v1::positions::SourceStreamPosition; use topos_core::api::grpc::shared::v1::{CertificateId, SubnetId}; use topos_core::api::grpc::tce::v1::api_service_server::{ApiService, ApiServiceServer}; use topos_core::api::grpc::tce::v1::synchronizer_service_client::SynchronizerServiceClient; use topos_core::api::grpc::tce::v1::watch_certificates_request::{Command, OpenStream}; use topos_core::api::grpc::tce::v1::{ GetLastPendingCertificatesRequest, GetLastPendingCertificatesResponse, GetSourceHeadRequest, GetSourceHeadResponse, LastPendingCertificate, SubmitCertificateRequest, SubmitCertificateResponse, WatchCertificatesRequest, WatchCertificatesResponse, }; use topos_core::api::grpc::uci::v1::Certificate; use topos_core::api::grpc::{shared, GrpcClient}; use uuid::Uuid; use topos_test_sdk::constants::*; #[test(tokio::test)] async fn create_tce_layer() { struct TceServer; use base64ct::{Base64, Encoding}; #[tonic::async_trait] impl ApiService for TceServer { type WatchCertificatesStream = Pin> + Send + 'static>>; async fn submit_certificate( &self, _request: Request, ) -> Result, tonic::Status> { Ok(Response::new(SubmitCertificateResponse {})) } async fn get_source_head( &self, request: Request, ) -> Result, tonic::Status> { let request = request.into_inner(); let return_certificate_id: CertificateId = CERTIFICATE_ID_2.into(); let return_prev_certificate_id: CertificateId = CERTIFICATE_ID_1.into(); Ok(Response::new(GetSourceHeadResponse { position: Some(SourceStreamPosition { source_subnet_id: request.subnet_id.clone(), certificate_id: Some(return_certificate_id.clone()), position: 0, }), certificate: Some(Certificate { source_subnet_id: request.subnet_id, id: Some(return_certificate_id), prev_id: Some(return_prev_certificate_id), target_subnets: Vec::new(), ..Default::default() }), })) } async fn get_last_pending_certificates( &self, request: Request, ) -> Result, Status> { let request = request.into_inner(); let subnet_ids = request.subnet_ids; let return_certificate_id: CertificateId = CERTIFICATE_ID_2.into(); let return_prev_certificate_id: CertificateId = CERTIFICATE_ID_1.into(); let mut map = HashMap::new(); for subnet_id in subnet_ids { map.insert( Base64::encode_string(&subnet_id.value), LastPendingCertificate { value: Some(Certificate { source_subnet_id: subnet_id.into(), id: Some(return_certificate_id.clone()), prev_id: Some(return_prev_certificate_id.clone()), target_subnets: Vec::new(), ..Default::default() }), index: 0, }, ); } Ok(Response::new(GetLastPendingCertificatesResponse { last_pending_certificate: map, })) } async fn watch_certificates( &self, request: Request>, ) -> Result, tonic::Status> { let mut stream: Streaming<_> = request.into_inner(); let (tx, mut rx) = mpsc::channel::(10); let output = stream! { loop { tokio::select! { Some(_message) = stream.next() => { let tx = tx.clone(); tokio::spawn(async move { let _ = tx.send(WatchCertificatesResponse { request_id: Some(Uuid::new_v4().into()), event: None }).await; }); } Some(event) = rx.recv() => { yield Ok(event); } } } }; Ok(Response::new( Box::pin(output) as Self::WatchCertificatesStream )) } } let (tx, rx) = oneshot::channel(); let svc = ApiServiceServer::new(TceServer); let jh = tokio::spawn(async move { Server::builder() .add_service(svc) .serve_with_shutdown("127.0.0.1:1340".parse().unwrap(), rx.map(drop)) .await .unwrap(); }); tokio::time::sleep(Duration::from_millis(100)).await; let mut client = topos_core::api::grpc::tce::v1::api_service_client::ApiServiceClient::connect( "http://127.0.0.1:1340", ) .await .unwrap(); let source_subnet_id: SubnetId = SOURCE_SUBNET_ID_1.into(); let prev_certificate_id: CertificateId = CERTIFICATE_ID_1.into(); let certificate_id: CertificateId = CERTIFICATE_ID_2.into(); let original_certificate = Certificate { source_subnet_id: Some(source_subnet_id.clone()), id: Some(certificate_id), prev_id: Some(prev_certificate_id), target_subnets: vec![], ..Default::default() }; // Submit one certificate let response = client .submit_certificate(SubmitCertificateRequest { certificate: Some(original_certificate.clone()), }) .await .map(|r| r.into_inner()) .unwrap(); assert_eq!(response, SubmitCertificateResponse {}); // Test get source head certificate let response = client .get_source_head(GetSourceHeadRequest { subnet_id: Some(source_subnet_id.clone()), }) .await .map(|r| r.into_inner()) .unwrap(); let expected_response = GetSourceHeadResponse { certificate: Some(original_certificate.clone()), position: Some(SourceStreamPosition { source_subnet_id: Some(source_subnet_id.clone()), certificate_id: original_certificate.id.clone(), position: 0, }), }; assert_eq!(response, expected_response); // Test last pending certificate let response = client .get_last_pending_certificates(GetLastPendingCertificatesRequest { subnet_ids: vec![source_subnet_id.clone()], }) .await .map(|r| r.into_inner()) .unwrap(); let mut expected_last_pending_certificate_ids = HashMap::new(); expected_last_pending_certificate_ids.insert( Base64::encode_string(&source_subnet_id.value), LastPendingCertificate { value: Some(original_certificate.clone()), index: 0, }, ); let expected_response = GetLastPendingCertificatesResponse { last_pending_certificate: expected_last_pending_certificate_ids, }; assert_eq!(response, expected_response); let command = Some(Command::OpenStream(OpenStream { target_checkpoint: Some(TargetCheckpoint { target_subnet_ids: vec![source_subnet_id.clone()], positions: Vec::new(), }), source_checkpoint: None, })); let request_id: shared::v1::Uuid = Uuid::new_v4().into(); let first_request = WatchCertificatesRequest { request_id: Some(request_id), command, }; let mut first_request_short: WatchCertificatesRequest = OpenStream { target_checkpoint: Some(TargetCheckpoint { target_subnet_ids: vec![source_subnet_id], positions: Vec::new(), }), source_checkpoint: None, } .into(); first_request_short.request_id = Some(request_id); assert_eq!(first_request, first_request_short); let outbound = stream! { yield first_request; }; let mut stream = client .watch_certificates(outbound) .await .map(|r| r.into_inner()) .unwrap(); let message = stream.message().await.unwrap(); assert!(matches!(message, Some(WatchCertificatesResponse { .. }))); tx.send(()).unwrap(); drop(stream); jh.await.unwrap(); } #[rstest] #[test(tokio::test)] async fn create_grpc_client() { let entrypoint = Endpoint::from_static("http://127.0.0.1:1340").connect_lazy(); let _client = SynchronizerServiceClient::init(entrypoint); } ================================================ FILE: crates/topos-crypto/Cargo.toml ================================================ [package] name = "topos-crypto" description = "Implementation of the Topos cryptography utility functions" version = "0.1.0" edition = "2021" [lints] workspace = true [dependencies] secp256k1.workspace = true byteorder.workspace = true hex.workspace = true thiserror.workspace = true ethers.workspace = true serde.workspace = true keccak-hash = "0.10.0" eth-keystore = "0.5.0" [dev-dependencies] rstest.workspace = true topos-core = { path = "../topos-core", features = ["api", "uci"] } ethers.workspace = true ================================================ FILE: crates/topos-crypto/src/hash.rs ================================================ use keccak_hash::keccak_256; pub fn calculate_hash(data: &[u8]) -> [u8; 32] { let mut hash: [u8; 32] = [0u8; 32]; keccak_256(data, &mut hash); hash } ================================================ FILE: crates/topos-crypto/src/keys.rs ================================================ use crate::Error; use secp256k1::{PublicKey, Secp256k1, SecretKey}; pub fn derive_public_key(private_key: &[u8]) -> Result, Error> { let secret_key = SecretKey::from_slice(private_key).map_err(|e| Error::InvalidKeyError(e.to_string()))?; Ok(PublicKey::from_secret_key(&Secp256k1::new(), &secret_key) .serialize() .to_vec()) } ================================================ FILE: crates/topos-crypto/src/keystore.rs ================================================ use crate::Error; /// Module for handling local topos node keystore use std::path::Path; pub const SUBNET_NODE_VALIDATOR_KEY_FILE_PATH: &str = "/consensus/validator.key"; pub fn read_private_key_from_file( file_name: &std::path::PathBuf, password: Option, ) -> Result, Error> { let keypath = Path::new(file_name); let private_key = if let Some(password) = password { // Encrypted keystore in ethereum wallet format eth_keystore::decrypt_key(keypath, password)? } else { let key = std::fs::read_to_string(keypath)?.trim().to_string(); hex::decode(key).map_err(|e| Error::InvalidKeyError(e.to_string()))? }; Ok(private_key) } pub fn get_keystore_path(subnet_data_dir: &str) -> std::path::PathBuf { std::path::PathBuf::from(&(subnet_data_dir.to_string() + SUBNET_NODE_VALIDATOR_KEY_FILE_PATH)) } ================================================ FILE: crates/topos-crypto/src/lib.rs ================================================ use thiserror::Error; pub mod hash; pub mod keys; pub mod keystore; pub mod messages; pub mod signatures; pub mod validator_id; #[derive(Debug, Error)] pub enum Error { #[error("Keystore error: {0}")] KeystoreError(#[from] eth_keystore::KeystoreError), #[error("Keystore file io error: {0}")] KeystoreFileError(#[from] std::io::Error), #[error("Invalid key error: {0}")] InvalidKeyError(String), #[error("Elliptic curve error: {0}")] Secp256k1Error(#[from] secp256k1::Error), #[error("Invalid signature: {0}")] InvalidSignature(String), } ================================================ FILE: crates/topos-crypto/src/messages.rs ================================================ use ethers::signers::Signer; use ethers::signers::{LocalWallet, WalletError}; use ethers::types::{RecoveryMessage, SignatureError}; use ethers::utils::hash_message; use std::str::FromStr; use thiserror::Error; pub use ethers::types::{Address, Signature, H160, U256}; #[derive(Error, Debug)] pub enum MessageSignerError { #[error("Unable to parse private key")] PrivateKeyParsing, } #[derive(Debug)] pub struct MessageSigner { pub public_address: Address, wallet: LocalWallet, } impl FromStr for MessageSigner { type Err = MessageSignerError; fn from_str(s: &str) -> Result { let decoded = hex::decode(s).map_err(|_| MessageSignerError::PrivateKeyParsing)?; Self::new(&decoded[..]) } } impl MessageSigner { pub fn new(private_key: &[u8]) -> Result { let wallet: LocalWallet = LocalWallet::from_bytes(private_key) .map_err(|_| MessageSignerError::PrivateKeyParsing)?; Ok(Self { public_address: wallet.address(), wallet, }) } pub fn sign_message(&self, payload: &[u8]) -> Result { let hash = hash_message(payload); LocalWallet::sign_hash(&self.wallet, hash) } pub fn verify_signature( &self, signature: Signature, payload: &[u8], public_key: Address, ) -> Result<(), SignatureError> { let message: RecoveryMessage = payload.into(); signature.verify(message, public_key) } } ================================================ FILE: crates/topos-crypto/src/signatures.rs ================================================ use crate::Error; use secp256k1::{Message, PublicKey, Secp256k1, SecretKey}; pub fn sign(private_key: &[u8], data: &[u8]) -> Result, crate::Error> { let secp = Secp256k1::new(); let secret_key = SecretKey::from_slice(private_key).map_err(|e| Error::InvalidKeyError(e.to_string()))?; let hash = crate::hash::calculate_hash(data); let message = Message::from_slice(&hash).map_err(Error::Secp256k1Error)?; let signature = secp.sign_ecdsa(&message, &secret_key); Ok(signature.serialize_compact().to_vec()) } pub fn verify(public_key: &[u8], data: &[u8], signature: &[u8]) -> Result<(), crate::Error> { let secp = Secp256k1::new(); let public_key = PublicKey::from_slice(public_key).map_err(|e| Error::InvalidKeyError(e.to_string()))?; let signature = secp256k1::ecdsa::Signature::from_compact(signature) .map_err(|e| Error::InvalidSignature(e.to_string()))?; let hash = crate::hash::calculate_hash(data); let message = Message::from_slice(&hash).map_err(Error::Secp256k1Error)?; secp.verify_ecdsa(&message, &signature, &public_key) .map_err(|e| Error::InvalidSignature(e.to_string())) } ================================================ FILE: crates/topos-crypto/src/validator_id.rs ================================================ use crate::messages::{Address, H160}; use serde::{Deserialize, Serialize}; use std::str::FromStr; use thiserror::Error; pub const VALIDATOR_ID_LENGTH: usize = 20; #[derive(Debug, Error)] pub enum Error { #[error("Failed to parse address string as H160")] ParseError, #[error("Failed to convert byte array into H160: {0}")] InvalidByteLength(String), } #[derive(Clone, Copy, Default, Debug, Serialize, Deserialize, Eq, PartialEq, Hash)] pub struct ValidatorId(H160); impl ValidatorId { pub fn as_bytes(&self) -> &[u8] { self.0.as_bytes() } pub fn address(&self) -> Address { self.0 } } impl From for ValidatorId { fn from(address: H160) -> Self { ValidatorId(address) } } impl FromStr for ValidatorId { type Err = Error; fn from_str(address: &str) -> Result { H160::from_str(address) .map_err(|_| Error::ParseError) .map(ValidatorId) } } impl std::fmt::Display for ValidatorId { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "0x{}", hex::encode(self.0)) } } ================================================ FILE: crates/topos-crypto/tests/messages.rs ================================================ use std::str::FromStr; use rstest::*; use topos_core::types::ValidatorId; use topos_core::uci::CertificateId; use topos_crypto::messages::MessageSigner; #[rstest] pub fn test_signing_messages() { let message_signer_sender = MessageSigner::from_str("122f3ae6ade1fd136b292cea4f6243c7811160352c8821528547a1fe7c459daf") .unwrap(); let validator_id_sender = ValidatorId::from(message_signer_sender.public_address); let certificate_id = CertificateId::from_array([0u8; 32]); let mut payload = Vec::new(); payload.extend_from_slice(certificate_id.as_array()); payload.extend_from_slice(validator_id_sender.as_bytes()); let signature = message_signer_sender .sign_message(&payload) .expect("Cannot create Signature"); let message_signer_receiver = MessageSigner::from_str("a2e33a9bad88f7b7568228f51d5274c471a9217162d46f1533b6a290f0be1baf") .unwrap(); let verify = message_signer_receiver.verify_signature( signature, &payload, validator_id_sender.address(), ); assert!(verify.is_ok()); } #[rstest] pub fn fails_to_verify_with_own_public_address() { let message_signer_sender = MessageSigner::from_str("122f3ae6ade1fd136b292cea4f6243c7811160352c8821528547a1fe7c459daf") .unwrap(); let validator_id_sender = ValidatorId::from(message_signer_sender.public_address); let certificate_id = CertificateId::from_array([0u8; 32]); let mut payload = Vec::new(); payload.extend_from_slice(certificate_id.as_array()); payload.extend_from_slice(validator_id_sender.as_bytes()); let signature = message_signer_sender .sign_message(&payload) .expect("Cannot create Signature"); let message_signer_receiver = MessageSigner::from_str("a2e33a9bad88f7b7568228f51d5274c471a9217162d46f1533b6a290f0be1baf") .unwrap(); let validator_id_receiver = ValidatorId::from(message_signer_receiver.public_address); let verify = message_signer_receiver.verify_signature( signature, &payload, validator_id_receiver.address(), ); assert!(verify.is_err()); } ================================================ FILE: crates/topos-metrics/Cargo.toml ================================================ [package] name = "topos-metrics" version = "0.1.0" edition = "2021" [lints] workspace = true [dependencies] lazy_static.workspace = true prometheus.workspace = true ================================================ FILE: crates/topos-metrics/src/api.rs ================================================ use prometheus::{register_int_counter_with_registry, IntCounter}; use lazy_static::lazy_static; use crate::TOPOS_METRIC_REGISTRY; lazy_static! { pub static ref API_GRPC_CERTIFICATE_RECEIVED_TOTAL: IntCounter = register_int_counter_with_registry!( "api_grpc_certificate_received_total", "Number of Certificates received from the gRPC API.", TOPOS_METRIC_REGISTRY ) .unwrap(); } ================================================ FILE: crates/topos-metrics/src/double_echo.rs ================================================ use prometheus::{ register_int_counter_with_registry, register_int_gauge_with_registry, IntCounter, IntGauge, }; use lazy_static::lazy_static; use crate::TOPOS_METRIC_REGISTRY; lazy_static! { pub static ref DOUBLE_ECHO_ACTIVE_TASKS_COUNT: IntGauge = register_int_gauge_with_registry!( "double_echo_active_tasks_count", "Number of active tasks in the double echo.", TOPOS_METRIC_REGISTRY ) .unwrap(); pub static ref DOUBLE_ECHO_COMMAND_CHANNEL_CAPACITY_TOTAL: IntCounter = register_int_counter_with_registry!( "double_echo_command_channel_capacity_total", "Number of time the double echo command channel was at capacity.", TOPOS_METRIC_REGISTRY ) .unwrap(); pub static ref DOUBLE_ECHO_BUFFER_CAPACITY_TOTAL: IntCounter = register_int_counter_with_registry!( "double_echo_buffer_capacity_total", "Number of time the double echo buffer was at capacity.", TOPOS_METRIC_REGISTRY ) .unwrap(); pub static ref DOUBLE_ECHO_CURRENT_BUFFER_SIZE: IntGauge = register_int_gauge_with_registry!( "double_echo_current_buffer_size", "Current size of the double echo buffer.", TOPOS_METRIC_REGISTRY ) .unwrap(); pub static ref DOUBLE_ECHO_BUFFERED_MESSAGE_COUNT: IntGauge = register_int_gauge_with_registry!( "double_echo_buffered_message_count", "Number of message buffered in the double echo buffer.", TOPOS_METRIC_REGISTRY ) .unwrap(); pub static ref DOUBLE_ECHO_BROADCAST_CREATED_TOTAL: IntCounter = register_int_counter_with_registry!( "double_echo_broadcast_created_total", "Number of broadcast created.", TOPOS_METRIC_REGISTRY ) .unwrap(); pub static ref DOUBLE_ECHO_BROADCAST_FINISHED_TOTAL: IntCounter = register_int_counter_with_registry!( "double_echo_broadcast_finished_total", "Number of broadcast finished.", TOPOS_METRIC_REGISTRY ) .unwrap(); } ================================================ FILE: crates/topos-metrics/src/lib.rs ================================================ use prometheus::{ register_histogram_with_registry, register_int_counter_with_registry, Encoder, Histogram, IntCounter, Registry, TextEncoder, }; use lazy_static::lazy_static; use std::collections::hash_map::HashMap; mod api; mod double_echo; mod p2p; mod storage; #[cfg(test)] mod tests; pub use api::*; pub use double_echo::*; pub use p2p::*; pub use storage::*; lazy_static! { pub static ref TOPOS_METRIC_REGISTRY: Registry = Registry::new_custom( Some("topos".to_string()), Some(HashMap::from([ ( "run_id".to_string(), std::env::var("TOPOS_RUN_ID") .ok() .unwrap_or("default".to_string()) ), ( "run_number".to_string(), std::env::var("TOPOS_RUN_NUMBER") .ok() .unwrap_or("default".to_string()) ) ])) ) .unwrap(); pub static ref CERTIFICATE_PROCESSING_TOTAL: IntCounter = register_int_counter_with_registry!( "certificate_processing_total", "Number of certificate received.", TOPOS_METRIC_REGISTRY ) .unwrap(); pub static ref CERTIFICATE_PROCESSING_FROM_GOSSIP_TOTAL: IntCounter = register_int_counter_with_registry!( "certificate_processing_from_gossip_total", "Number of certificate received from gossip.", TOPOS_METRIC_REGISTRY ) .unwrap(); pub static ref CERTIFICATE_PROCESSING_FROM_API_TOTAL: IntCounter = register_int_counter_with_registry!( "certificate_processing_from_api_total", "Number of certificate received from api.", TOPOS_METRIC_REGISTRY ) .unwrap(); pub static ref CERTIFICATE_DELIVERED_TOTAL: IntCounter = register_int_counter_with_registry!( "certificate_delivered_total", "Number of certificate delivered.", TOPOS_METRIC_REGISTRY ) .unwrap(); pub static ref CERTIFICATE_DELIVERY_LATENCY: Histogram = register_histogram_with_registry!( "double_echo_delivery_latency", "Latency to delivery.", prometheus::linear_buckets(0.1, 0.01, 500).unwrap(), TOPOS_METRIC_REGISTRY ) .unwrap(); } pub fn gather_metrics() -> String { let mut buffer = Vec::new(); let encoder = TextEncoder::new(); // Gather the metrics. let metric_families = prometheus::gather(); // Encode them to send. encoder.encode(&metric_families, &mut buffer).unwrap(); let topos_metrics = TOPOS_METRIC_REGISTRY.gather(); encoder.encode(&topos_metrics, &mut buffer).unwrap(); String::from_utf8(buffer.clone()).unwrap() } pub fn init_metrics() { API_GRPC_CERTIFICATE_RECEIVED_TOTAL.reset(); P2P_EVENT_STREAM_CAPACITY_TOTAL.reset(); P2P_MESSAGE_RECEIVED_ON_GOSSIP_TOTAL.reset(); P2P_MESSAGE_RECEIVED_ON_ECHO_TOTAL.reset(); P2P_MESSAGE_RECEIVED_ON_READY_TOTAL.reset(); P2P_MESSAGE_SENT_ON_GOSSIPSUB_TOTAL.reset(); DOUBLE_ECHO_ACTIVE_TASKS_COUNT.set(0); DOUBLE_ECHO_COMMAND_CHANNEL_CAPACITY_TOTAL.reset(); DOUBLE_ECHO_BUFFER_CAPACITY_TOTAL.reset(); DOUBLE_ECHO_CURRENT_BUFFER_SIZE.set(0); DOUBLE_ECHO_BUFFERED_MESSAGE_COUNT.set(0); DOUBLE_ECHO_BROADCAST_CREATED_TOTAL.reset(); DOUBLE_ECHO_BROADCAST_FINISHED_TOTAL.reset(); CERTIFICATE_PROCESSING_TOTAL.reset(); CERTIFICATE_PROCESSING_FROM_GOSSIP_TOTAL.reset(); CERTIFICATE_PROCESSING_FROM_API_TOTAL.reset(); CERTIFICATE_DELIVERED_TOTAL.reset(); STORAGE_COMMAND_CHANNEL_CAPACITY_TOTAL.reset(); } ================================================ FILE: crates/topos-metrics/src/p2p.rs ================================================ use prometheus::{ register_histogram_with_registry, register_int_counter_vec_with_registry, register_int_counter_with_registry, Histogram, IntCounter, IntCounterVec, }; use lazy_static::lazy_static; use crate::TOPOS_METRIC_REGISTRY; lazy_static! { pub static ref P2P_EVENT_STREAM_CAPACITY_TOTAL: IntCounter = register_int_counter_with_registry!( "p2p_event_stream_capacity_total", "Number of time the p2p event stream was almost at capacity.", TOPOS_METRIC_REGISTRY ) .unwrap(); pub static ref P2P_DUPLICATE_MESSAGE_ID_RECEIVED_TOTAL: IntCounter = register_int_counter_with_registry!( "p2p_duplicate_message_id_received_total", "Number of time a duplicate message id was received.", TOPOS_METRIC_REGISTRY ) .unwrap(); pub static ref P2P_MESSAGE_RECEIVED_ON_GOSSIP_TOTAL: IntCounter = register_int_counter_with_registry!( "p2p_gossip_message_total", "Number of gossip message received.", TOPOS_METRIC_REGISTRY ) .unwrap(); pub static ref P2P_MESSAGE_RECEIVED_ON_ECHO_TOTAL: IntCounter = register_int_counter_with_registry!( "p2p_echo_message_total", "Number of echo message received.", TOPOS_METRIC_REGISTRY ) .unwrap(); pub static ref P2P_MESSAGE_RECEIVED_ON_READY_TOTAL: IntCounter = register_int_counter_with_registry!( "p2p_ready_message_total", "Number of ready message received.", TOPOS_METRIC_REGISTRY ) .unwrap(); pub static ref P2P_MESSAGE_SENT_ON_GOSSIPSUB_TOTAL: IntCounter = register_int_counter_with_registry!( "p2p_gossipsub_message_sent_total", "Number of gossipsub message sent.", TOPOS_METRIC_REGISTRY ) .unwrap(); pub static ref P2P_GOSSIP_BATCH_SIZE: Histogram = register_histogram_with_registry!( "p2p_gossip_batch_size", "Number of message sent in a gossip batch.", vec![1.0, 5.0, 10.0, 50.0, 100.0, 200.0, 500.0, 1000.0, 2000.0, 5000.0], TOPOS_METRIC_REGISTRY ) .unwrap(); pub static ref P2P_MESSAGE_DESERIALIZE_FAILURE_TOTAL: IntCounterVec = register_int_counter_vec_with_registry!( "p2p_message_deserialize_failure_total", "Number of message deserialization failure.", &["topic"], TOPOS_METRIC_REGISTRY ) .unwrap(); pub static ref P2P_MESSAGE_SERIALIZE_FAILURE_TOTAL: IntCounterVec = register_int_counter_vec_with_registry!( "p2p_message_serialize_failure_total", "Number of message serialization failure.", &["topic"], TOPOS_METRIC_REGISTRY ) .unwrap(); } ================================================ FILE: crates/topos-metrics/src/storage.rs ================================================ use prometheus::{ register_histogram_with_registry, register_int_counter_with_registry, register_int_gauge_with_registry, Histogram, IntCounter, IntGauge, }; use lazy_static::lazy_static; use crate::TOPOS_METRIC_REGISTRY; lazy_static! { pub static ref STORAGE_COMMAND_CHANNEL_CAPACITY_TOTAL: IntCounter = register_int_counter_with_registry!( "storage_command_channel_capacity_total", "Number of time the storage command channel was at capacity.", TOPOS_METRIC_REGISTRY ) .unwrap(); pub static ref STORAGE_PENDING_CERTIFICATE_EXISTENCE_LATENCY: Histogram = register_histogram_with_registry!( "storage_pending_certificate_existence_latency", "Latency of the pending certificate existance check.", vec![0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1.0, 2.0, 5.0], TOPOS_METRIC_REGISTRY ) .unwrap(); pub static ref STORAGE_ADDING_PENDING_CERTIFICATE_LATENCY: Histogram = register_histogram_with_registry!( "storage_adding_pending_certificate_latency", "Latency of adding a pending certificate.", vec![0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1.0, 2.0, 5.0], TOPOS_METRIC_REGISTRY ) .unwrap(); pub static ref STORAGE_PENDING_POOL_COUNT: IntGauge = register_int_gauge_with_registry!( "storage_pending_pool_count", "Number of certificates in the pending pool.", TOPOS_METRIC_REGISTRY ) .unwrap(); pub static ref STORAGE_PRECEDENCE_POOL_COUNT: IntGauge = register_int_gauge_with_registry!( "storage_precedence_pool_count", "Number of certificates in the precedence pool.", TOPOS_METRIC_REGISTRY ) .unwrap(); } ================================================ FILE: crates/topos-metrics/src/tests.rs ================================================ use crate::p2p; #[test] fn increment_echo_failure_ser() { let m = &p2p::P2P_MESSAGE_SERIALIZE_FAILURE_TOTAL; m.with_label_values(&["echo"]).inc(); assert_eq!(m.get_metric_with_label_values(&["echo"]).unwrap().get(), 1); assert_eq!(m.get_metric_with_label_values(&["ready"]).unwrap().get(), 0); } #[test] fn increment_echo_failure_des() { let m = &p2p::P2P_MESSAGE_DESERIALIZE_FAILURE_TOTAL; m.with_label_values(&["echo"]).inc(); assert_eq!(m.get_metric_with_label_values(&["echo"]).unwrap().get(), 1); assert_eq!(m.get_metric_with_label_values(&["ready"]).unwrap().get(), 0); } ================================================ FILE: crates/topos-node/Cargo.toml ================================================ [package] name = "topos-node" version = "0.1.0" edition = "2021" description = "Runtime crate of a topos-node" [lints] workspace = true [dependencies] topos-config = { path = "../topos-config/" } topos-tce = { path = "../topos-tce/" } topos-p2p = { path = "../topos-p2p" } topos-sequencer = { path = "../topos-sequencer" } topos-core = { workspace = true, features = ["api"] } topos-certificate-spammer = { path = "../topos-certificate-spammer" } topos-tce-broadcast = { path = "../topos-tce-broadcast", optional = true } topos-wallet = { path = "../topos-wallet" } topos-telemetry = { path = "../topos-telemetry/", features = ["tracing"] } async-stream.workspace = true async-trait.workspace = true clap.workspace = true hex.workspace = true futures.workspace = true opentelemetry.workspace = true serde.workspace = true serde_json.workspace = true tokio = { workspace = true, features = ["full"] } tokio-util.workspace = true tonic.workspace = true tower.workspace = true tracing = { workspace = true, features = ["log"] } tracing-opentelemetry.workspace = true tracing-subscriber = { workspace = true, features = ["env-filter", "json", "ansi", "fmt"] } uuid.workspace = true rand.workspace = true reqwest.workspace = true thiserror.workspace = true opentelemetry-otlp = { workspace = true, features = ["grpc-tonic", "metrics", "tls-roots"] } dirs = "5.0" tracing-log = { version = "0.1.3", features = ["env_logger"] } tar = "0.4.38" flate2 ="1.0.26" url = "2.3.1" once_cell = "1.17.1" regex = "1" rlp = "0.5.1" openssl = { version = "0.10.61", features = ["vendored"] } [dev-dependencies] toml = "0.7.4" topos-tce-broadcast = { path = "../topos-tce-broadcast" } topos-tce-synchronizer = { path = "../topos-tce-synchronizer" } topos-tce-gatekeeper = { path = "../topos-tce-gatekeeper" } topos-tce-api = { path = "../topos-tce-api" } topos-tce-storage = { path = "../topos-tce-storage" } topos-test-sdk = { path = "../topos-test-sdk" } serde.workspace = true serde_json.workspace = true test-log.workspace = true env_logger.workspace = true rand.workspace = true futures.workspace = true libp2p = { workspace = true, features = ["identify"] } assert_cmd = "2.0.6" insta = { version = "1.21", features = ["json", "redactions"] } rstest = { workspace = true, features = ["async-timeout"] } tempfile = "3.8.0" predicates = "3.0.3" sysinfo = "0.29.11" serial_test = {version = "0.9.0"} [features] default = [] ================================================ FILE: crates/topos-node/build.rs ================================================ use std::process::Command; const DEFAULT_VERSION: &str = "detached"; fn main() { // Set TOPOS_VERSION to HEAD short commit hash unless it's already set if std::option_env!("TOPOS_VERSION").is_none() { let output = Command::new("git") .args(["rev-parse", "--short", "HEAD"]) .output() .expect("failed to access the HEAD commit hash"); let git_hash = String::from_utf8(output.stdout).unwrap(); let topos_version = if git_hash.is_empty() { DEFAULT_VERSION } else { git_hash.as_str() }; println!("cargo:rustc-env=TOPOS_VERSION={topos_version}"); } } ================================================ FILE: crates/topos-node/src/lib.rs ================================================ //! Temporary lib exposition for backward topos CLI compatibility use std::process::ExitStatus; use futures::stream::FuturesUnordered; use futures::StreamExt; use opentelemetry::global; use process::Errors; use tokio::{ signal::{self, unix::SignalKind}, sync::mpsc, task::JoinHandle, }; use tokio_util::sync::CancellationToken; use topos_config::{ genesis::Genesis, node::{NodeConfig, NodeRole}, }; use topos_telemetry::tracing::setup_tracing; use topos_wallet::SecretManager; use tracing::{debug, error, info}; use tracing_subscriber::util::TryInitError; mod process; #[derive(Debug, thiserror::Error)] pub enum Error { #[error(transparent)] GenesisFile(#[from] topos_config::genesis::Error), #[error("Unable to setup tracing logger: {0}")] Tracing(#[from] TryInitError), #[error(transparent)] IO(#[from] std::io::Error), #[error( "The role in the config file expect to have a sequencer config defined, none was found" )] MissingSequencerConfig, #[error("An Edge config was expected to be found in the config file")] MissingEdgeConfig, #[error("A TCE config was expected to be found in the config file")] MissingTCEConfig, } pub async fn start( verbose: u8, no_color: bool, otlp_agent: Option, otlp_service_name: Option, no_edge_process: bool, config: NodeConfig, ) -> Result<(), Error> { // Setup instrumentation if both otlp agent and otlp service name // are provided as arguments setup_tracing( verbose, no_color, otlp_agent, otlp_service_name, env!("TOPOS_VERSION"), )?; info!( "⚙️ Read the configuration from {}/config.toml", config.node_path.display() ); debug!("TceConfig: {:?}", config); let config_ref = &config; let genesis: Genesis = config_ref.try_into().map_err(|error| { info!( "Could not load genesis.json file on path {} \n Please make sure to have a valid \ genesis.json file for your subnet in the {}/subnet/{} folder.", config.genesis_path.display(), config.home_path.display(), &config.base.subnet ); error })?; // Get secrets let keys: SecretManager = config_ref.into(); info!( "🧢 New joiner: {} for the \"{}\" subnet as {:?}", config.base.name, config.base.subnet, config.base.role ); let shutdown_token = CancellationToken::new(); let shutdown_trigger = shutdown_token.clone(); let (shutdown_sender, shutdown_receiver) = mpsc::channel(1); let mut processes = spawn_processes( no_edge_process, config, genesis, shutdown_sender, keys, shutdown_token, )?; let mut sigterm_stream = signal::unix::signal(SignalKind::terminate())?; tokio::select! { _ = sigterm_stream.recv() => { info!("Received SIGTERM, shutting down application..."); shutdown(shutdown_trigger, shutdown_receiver).await; } _ = signal::ctrl_c() => { info!("Received ctrl_c, shutting down application..."); shutdown( shutdown_trigger, shutdown_receiver).await; } Some(result) = processes.next() => { shutdown(shutdown_trigger, shutdown_receiver).await; processes.clear(); match result { Ok(Ok(status)) => { if let Some(0) = status.code() { info!("Terminating with success error code"); } else { info!("Terminating with error status: {:?}", status); std::process::exit(1); } } Ok(Err(e)) => { error!("Terminating with error: {e}"); std::process::exit(1); } Err(e) => { error!("Terminating with error: {e}"); std::process::exit(1); } } } }; Ok(()) } fn spawn_processes( no_edge_process: bool, mut config: NodeConfig, genesis: Genesis, shutdown_sender: mpsc::Sender<()>, keys: SecretManager, shutdown_token: CancellationToken, ) -> Result>>, Error> { let processes = FuturesUnordered::new(); // Edge node if no_edge_process { info!("Using external edge node, skip running of local edge instance...") } else { let edge_config = config.edge.take().ok_or(Error::MissingEdgeConfig)?; let edge_bin_config = config.edge_bin.take().ok_or(Error::MissingEdgeConfig)?; let data_dir = config.node_path.clone(); info!( "Spawning edge process with genesis file: {}, data directory: {}, additional edge \ arguments: {:?}", config.genesis_path.display(), data_dir.display(), edge_config.args ); processes.push(process::spawn_edge_process( edge_bin_config.binary_path(), data_dir, config.genesis_path.clone(), edge_config.args, )); } // Sequencer if matches!(config.base.role, NodeRole::Sequencer) { let sequencer_config = config .sequencer .take() .ok_or(Error::MissingSequencerConfig)?; info!( "Running sequencer with configuration {:?}", sequencer_config ); processes.push(process::spawn_sequencer_process( sequencer_config, &keys, (shutdown_token.clone(), shutdown_sender.clone()), )); } // TCE if config.base.subnet == "topos" { let tce_config = config.tce.ok_or(Error::MissingTCEConfig)?; info!("Running topos TCE service...",); processes.push(process::spawn_tce_process( tce_config, keys, genesis, (shutdown_token.clone(), shutdown_sender.clone()), )); } drop(shutdown_sender); Ok(processes) } async fn shutdown(trigger: CancellationToken, mut termination: mpsc::Receiver<()>) { trigger.cancel(); // Wait that all sender get dropped info!("Waiting that all components dropped"); let _ = termination.recv().await; info!("Shutdown procedure finished, exiting..."); // Shutdown tracing global::shutdown_tracer_provider(); } ================================================ FILE: crates/topos-node/src/main.rs ================================================ #[tokio::main] async fn main() -> Result<(), Box> { Ok(()) } ================================================ FILE: crates/topos-node/src/process.rs ================================================ use std::collections::HashMap; use std::path::PathBuf; use std::process::ExitStatus; use thiserror::Error; use tokio::{spawn, sync::mpsc, task::JoinHandle}; use tokio_util::sync::CancellationToken; use topos_config::edge::command::CommandConfig; use topos_config::sequencer::SequencerConfig; use topos_config::tce::broadcast::ReliableBroadcastParams; use topos_config::tce::{AuthKey, StorageConfiguration, TceConfig}; use topos_p2p::Multiaddr; use topos_sequencer::SequencerConfiguration; use topos_wallet::SecretManager; use tracing::{debug, error, warn}; use topos_config::genesis::Genesis; #[derive(Error, Debug)] pub enum Errors { #[error("TCE error")] TceFailure, #[error("Sequencer error")] SequencerFailure, #[error("Edge error: {0}")] EdgeTerminated(#[from] std::io::Error), } pub(crate) fn spawn_sequencer_process( config: SequencerConfig, keys: &SecretManager, shutdown: (CancellationToken, mpsc::Sender<()>), ) -> JoinHandle> { let config = SequencerConfiguration { subnet_id: config.subnet_id, public_key: keys.validator_pubkey(), subnet_jsonrpc_http: config.subnet_jsonrpc_http, subnet_jsonrpc_ws: config.subnet_jsonrpc_ws, subnet_contract_address: config.subnet_contract_address, tce_grpc_endpoint: config.tce_grpc_endpoint, signing_key: keys.validator.clone().unwrap(), verifier: 0, start_block: config.start_block, }; debug!("Sequencer args: {config:?}"); spawn(async move { topos_sequencer::run(config, shutdown).await.map_err(|e| { error!("Sequencer failure: {e:?}"); Errors::SequencerFailure }) }) } pub(crate) fn spawn_tce_process( mut config: TceConfig, keys: SecretManager, genesis: Genesis, shutdown: (CancellationToken, mpsc::Sender<()>), ) -> JoinHandle> { config.boot_peers = genesis .boot_peers(Some(topos_p2p::constants::TCE_BOOTNODE_PORT)) .into_iter() .chain(config.parse_boot_peers()) .collect::>(); config.auth_key = keys.network.map(AuthKey::PrivateKey); config.signing_key = keys.validator.map(AuthKey::PrivateKey); config.p2p.is_bootnode = if let Some(AuthKey::PrivateKey(ref k)) = config.auth_key { let peer_id = topos_p2p::utils::keypair_from_protobuf_encoding(&k[..]) .public() .to_peer_id(); config.boot_peers.iter().any(|(p, _)| p == &peer_id) } else { false }; config.validators = genesis.validators().expect("Cannot parse validators"); config.tce_params = ReliableBroadcastParams::new(config.validators.len()); if let Some(socket) = config.libp2p_api_addr { warn!( "`libp2p_api_addr` is deprecated in favor of `listen_addresses` and \ `public_addresses` and will be removed in the next version. In order to keep your \ node running, `libp2p_api_addr` will be used." ); let addr: Multiaddr = format!("/ip4/{}/tcp/{}", socket.ip(), socket.port()) .parse() .expect("Unable to generate Multiaddr from `libp2p_api_addr`"); config.p2p.listen_addresses = vec![addr.clone()]; config.p2p.public_addresses = vec![addr]; } config.version = env!("TOPOS_VERSION"); config.storage = StorageConfiguration::RocksDB(Some(config.db_path.clone())); debug!("TCE args: {config:?}"); spawn(async move { topos_tce::launch(&config, shutdown).await.map_err(|e| { error!("TCE process terminated: {e:?}"); Errors::TceFailure }) }) } pub fn spawn_edge_process( edge_path: PathBuf, data_dir: PathBuf, genesis_path: PathBuf, edge_args: HashMap, ) -> JoinHandle> { spawn(async move { CommandConfig::new(edge_path) .server(&data_dir, &genesis_path, edge_args) .spawn() .await .map_err(Errors::EdgeTerminated) }) } ================================================ FILE: crates/topos-p2p/Cargo.toml ================================================ [package] name = "topos-p2p" version = "0.1.0" edition = "2021" [lints] workspace = true [dependencies] async-trait.workspace = true bincode.workspace = true bytes.workspace = true futures.workspace = true hex.workspace = true http-body = "0.4.5" http-body-util = "0.1.0-rc.3" http.workspace = true lazy_static.workspace = true libp2p = { workspace = true, features = ["macros", "gossipsub", "tcp", "dns", "tokio", "request-response", "identify", "kad", "serde", "yamux", "secp256k1"] } pin-project = "1.1.3" prometheus-client.workspace = true rand.workspace = true serde = { workspace = true, features = ["derive"] } smallvec = "1.11.1" thiserror.workspace = true tokio = { workspace = true, features = ["full"] } tokio-stream.workspace = true tokio-util.workspace = true tonic = {workspace = true, features = ["tls", "tls-roots"]} topos-metrics = { path = "../topos-metrics/" } tower.workspace = true tracing = { workspace = true, features = ["attributes"] } uuid.workspace = true void = "1" hyper.workspace = true prost.workspace = true topos-core = { path = "../topos-core/" } ip_network = "0.4.1" [dev-dependencies] libp2p-swarm-test = "0.3.0" test-log.workspace = true env_logger.workspace = true rstest = { workspace = true, features = ["async-timeout"] } tracing-subscriber.workspace = true topos-test-sdk = { path = "../topos-test-sdk/" } rand.workspace = true ================================================ FILE: crates/topos-p2p/src/behaviour/discovery.rs ================================================ use std::borrow::Cow; use std::pin::Pin; use std::task::Poll; use std::time::Duration; use crate::error::P2PError; use crate::{config::DiscoveryConfig, error::CommandExecutionError}; use libp2p::kad::{ BootstrapOk, BootstrapResult, Event as KademliaEvent, ProgressStep, QueryId, QueryResult, }; use libp2p::swarm::ToSwarm; use libp2p::{ identity::Keypair, kad::{store::MemoryStore, Behaviour, BucketInserts, Config}, swarm::NetworkBehaviour, Multiaddr, PeerId, }; use tokio::sync::oneshot; use tracing::{debug, error, info}; use super::HealthStatus; pub type PendingRecordRequest = oneshot::Sender, CommandExecutionError>>; /// DiscoveryBehaviour is responsible to discover and manage connections with peers pub(crate) struct DiscoveryBehaviour { /// The inner kademlia behaviour pub(crate) inner: Behaviour, /// The current bootstrap query id used to track the progress of the bootstrap /// and to avoid to start a new bootstrap query if the previous one is still in progress pub(crate) current_bootstrap_query_id: Option, /// The next bootstrap query interval used to schedule the next bootstrap query pub(crate) next_bootstrap_query: Option>>, /// The health status of the discovery behaviour pub(crate) health_status: HealthStatus, } impl DiscoveryBehaviour { pub fn create( config: &DiscoveryConfig, peer_key: Keypair, discovery_protocol: Cow<'static, [u8]>, known_peers: &[(PeerId, Multiaddr)], _with_mdns: bool, ) -> Self { let local_peer_id = peer_key.public().to_peer_id(); let kademlia_config = Config::default() .set_replication_factor(config.replication_factor) .set_kbucket_inserts(BucketInserts::Manual) .set_replication_interval(config.replication_interval) .set_publication_interval(config.publication_interval) .set_provider_publication_interval(config.provider_publication_interval) .to_owned(); let mut kademlia = Behaviour::with_config( local_peer_id, MemoryStore::new(local_peer_id), kademlia_config, ); for known_peer in known_peers { info!( "Adding the known peer:{} reachable at {}", &known_peer.0, &known_peer.1 ); let x = kademlia.add_address(&known_peer.0, known_peer.1.clone()); info!( "Adding the known peer:{} reachable at {} - {:?}", &known_peer.0, &known_peer.1, x ); } Self { inner: kademlia, current_bootstrap_query_id: None, // If the `discovery` behaviour is created without known_peers // The bootstrap query interval is disabled only when the local // node is a lonely bootnode, other nodes will join it. next_bootstrap_query: if known_peers.is_empty() { None } else { let mut interval = tokio::time::interval(config.bootstrap_interval); interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay); Some(Box::pin(interval)) }, health_status: if known_peers.is_empty() { HealthStatus::Healthy } else { HealthStatus::Initializing }, } } /// Start the kademlia bootstrap process if it is not already in progress. /// The bootstrap process is used to discover new peers in the network. /// The bootstrap process starts by sending a `FIND_NODE` query of the local PeerId in the DHT. /// Then multiple random PeerId are created in order to randomly walk the network. pub fn bootstrap(&mut self) -> Result<(), P2PError> { if self.current_bootstrap_query_id.is_none() { let query_id = self.inner.bootstrap()?; debug!("Started kademlia bootstrap query with query_id: {query_id:?}"); self.current_bootstrap_query_id = Some(query_id); } Ok(()) } /// Change the interval of the next bootstrap queries pub async fn change_interval(&mut self, duration: Duration) -> Result<(), P2PError> { if let Some(interval) = self.next_bootstrap_query.as_mut() { let mut new_interval = tokio::time::interval(duration); // Delay the next tick new_interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay); // ignore first tick _ = new_interval.tick().await; interval.set(new_interval); } Ok(()) } } impl NetworkBehaviour for DiscoveryBehaviour { type ConnectionHandler = as NetworkBehaviour>::ConnectionHandler; type ToSwarm = KademliaEvent; fn handle_established_inbound_connection( &mut self, connection_id: libp2p::swarm::ConnectionId, peer: PeerId, local_addr: &Multiaddr, remote_addr: &Multiaddr, ) -> Result, libp2p::swarm::ConnectionDenied> { self.inner.handle_established_inbound_connection( connection_id, peer, local_addr, remote_addr, ) } fn handle_established_outbound_connection( &mut self, connection_id: libp2p::swarm::ConnectionId, peer: PeerId, addr: &Multiaddr, role_override: libp2p::core::Endpoint, ) -> Result, libp2p::swarm::ConnectionDenied> { self.inner .handle_established_outbound_connection(connection_id, peer, addr, role_override) } fn on_swarm_event(&mut self, event: libp2p::swarm::FromSwarm) { self.inner.on_swarm_event(event) } fn on_connection_handler_event( &mut self, peer_id: PeerId, connection_id: libp2p::swarm::ConnectionId, event: libp2p::swarm::THandlerOutEvent, ) { self.inner .on_connection_handler_event(peer_id, connection_id, event) } fn poll( &mut self, cx: &mut std::task::Context<'_>, ) -> Poll>> { // Poll the kademlia bootstrap interval future in order to define if we need to call the // `bootstrap` if let Some(next_bootstrap_query) = self.next_bootstrap_query.as_mut() { if next_bootstrap_query.poll_tick(cx).is_ready() { if let Err(error) = self.bootstrap() { error!("Error while create bootstrap query: {error:?}"); } } } if let Poll::Ready(event) = self.inner.poll(cx) { match event { // When a Bootstrap query ends, we reset the `query_id` ToSwarm::GenerateEvent(KademliaEvent::OutboundQueryProgressed { id, result: result @ QueryResult::Bootstrap(BootstrapResult::Ok(BootstrapOk { num_remaining: 0, .. })), step: step @ ProgressStep { last: true, .. }, stats, }) if Some(&id) == self.current_bootstrap_query_id.as_ref() => { if let Some(interval) = self.next_bootstrap_query.as_mut() { interval.reset(); }; self.current_bootstrap_query_id = None; debug!("Kademlia bootstrap completed with query_id: {id:?}"); return Poll::Ready(ToSwarm::GenerateEvent( KademliaEvent::OutboundQueryProgressed { id, result, stats, step, }, )); } event => { return Poll::Ready(event); } } } Poll::Pending } fn handle_pending_inbound_connection( &mut self, connection_id: libp2p::swarm::ConnectionId, local_addr: &Multiaddr, remote_addr: &Multiaddr, ) -> Result<(), libp2p::swarm::ConnectionDenied> { self.inner .handle_pending_inbound_connection(connection_id, local_addr, remote_addr) } fn handle_pending_outbound_connection( &mut self, connection_id: libp2p::swarm::ConnectionId, maybe_peer: Option, addresses: &[Multiaddr], effective_role: libp2p::core::Endpoint, ) -> Result, libp2p::swarm::ConnectionDenied> { self.inner.handle_pending_outbound_connection( connection_id, maybe_peer, addresses, effective_role, ) } } ================================================ FILE: crates/topos-p2p/src/behaviour/gossip.rs ================================================ use std::collections::hash_map::DefaultHasher; use std::collections::HashSet; use std::hash::{Hash, Hasher}; use std::{ collections::{HashMap, VecDeque}, env, task::Poll, time::Duration, }; use libp2p::swarm::{ConnectionClosed, FromSwarm}; use libp2p::PeerId; use libp2p::{ gossipsub::{self, IdentTopic, Message, MessageAuthenticity}, identity::Keypair, swarm::{NetworkBehaviour, THandlerInEvent, ToSwarm}, }; use prost::Message as ProstMessage; use topos_core::api::grpc::tce::v1::Batch; use topos_metrics::P2P_GOSSIP_BATCH_SIZE; use tracing::{debug, error, warn}; use crate::error::P2PError; use crate::{constants, event::ComposedEvent, TOPOS_ECHO, TOPOS_GOSSIP, TOPOS_READY}; use super::HealthStatus; const MAX_BATCH_SIZE: usize = 10; pub struct Behaviour { batch_size: usize, gossipsub: gossipsub::Behaviour, pending: HashMap<&'static str, VecDeque>>, tick: tokio::time::Interval, /// List of connected peers per topic. connected_peer: HashMap<&'static str, HashSet>, /// The health status of the gossip behaviour pub(crate) health_status: HealthStatus, } impl Behaviour { pub fn publish( &mut self, topic: &'static str, message: Vec, ) -> Result { match topic { TOPOS_GOSSIP => { if let Ok(msg_id) = self.gossipsub.publish(IdentTopic::new(topic), message) { debug!("Published on topos_gossip: {:?}", msg_id); } } TOPOS_ECHO | TOPOS_READY => self.pending.entry(topic).or_default().push_back(message), _ => return Err("Invalid topic"), } Ok(0) } pub fn subscribe(&mut self) -> Result<(), P2PError> { self.gossipsub .subscribe(&gossipsub::IdentTopic::new(TOPOS_GOSSIP))?; self.gossipsub .subscribe(&gossipsub::IdentTopic::new(TOPOS_ECHO))?; self.gossipsub .subscribe(&gossipsub::IdentTopic::new(TOPOS_READY))?; Ok(()) } pub async fn new(peer_key: Keypair) -> Self { let batch_size = env::var("TOPOS_GOSSIP_BATCH_SIZE") .map(|v| v.parse::()) .unwrap_or(Ok(MAX_BATCH_SIZE)) .unwrap(); let gossipsub = gossipsub::ConfigBuilder::default() .max_transmit_size(2 * 1024 * 1024) .validation_mode(gossipsub::ValidationMode::Strict) .message_id_fn(|msg_id| { // Content based id let mut s = DefaultHasher::new(); msg_id.data.hash(&mut s); gossipsub::MessageId::from(s.finish().to_be_bytes()) }) .build() .unwrap(); let gossipsub = gossipsub::Behaviour::new_with_metrics( MessageAuthenticity::Signed(peer_key), gossipsub, constants::METRIC_REGISTRY .lock() .await .sub_registry_with_prefix("libp2p_gossipsub"), Default::default(), ) .unwrap(); Self { batch_size, gossipsub, pending: [ (TOPOS_ECHO, VecDeque::new()), (TOPOS_READY, VecDeque::new()), ] .into_iter() .collect(), tick: tokio::time::interval(Duration::from_millis( env::var("TOPOS_GOSSIP_INTERVAL") .map(|v| v.parse::()) .unwrap_or(Ok(100)) .unwrap(), )), connected_peer: Default::default(), health_status: Default::default(), } } } impl NetworkBehaviour for Behaviour { type ConnectionHandler = ::ConnectionHandler; type ToSwarm = ComposedEvent; fn handle_established_inbound_connection( &mut self, connection_id: libp2p::swarm::ConnectionId, peer: libp2p::PeerId, local_addr: &libp2p::Multiaddr, remote_addr: &libp2p::Multiaddr, ) -> Result, libp2p::swarm::ConnectionDenied> { self.gossipsub.handle_established_inbound_connection( connection_id, peer, local_addr, remote_addr, ) } fn handle_established_outbound_connection( &mut self, connection_id: libp2p::swarm::ConnectionId, peer: libp2p::PeerId, addr: &libp2p::Multiaddr, role_override: libp2p::core::Endpoint, ) -> Result, libp2p::swarm::ConnectionDenied> { self.gossipsub.handle_established_outbound_connection( connection_id, peer, addr, role_override, ) } fn on_swarm_event(&mut self, event: libp2p::swarm::FromSwarm) { if let FromSwarm::ConnectionClosed(ConnectionClosed { peer_id, connection_id, endpoint, remaining_established, .. }) = &event { debug!( "Connection closed: {:?} {:?} {:?} {:?}", peer_id, connection_id, endpoint, remaining_established ); for (_, topic) in self.connected_peer.iter_mut() { topic.remove(peer_id); } } self.gossipsub.on_swarm_event(event) } fn on_connection_handler_event( &mut self, peer_id: libp2p::PeerId, connection_id: libp2p::swarm::ConnectionId, event: libp2p::swarm::THandlerOutEvent, ) { self.gossipsub .on_connection_handler_event(peer_id, connection_id, event) } fn poll( &mut self, cx: &mut std::task::Context<'_>, ) -> Poll>> { if self.tick.poll_tick(cx).is_ready() { // Publish batch for (topic, queue) in self.pending.iter_mut() { if !queue.is_empty() { let num_of_message = queue.len().min(self.batch_size); let batch = Batch { messages: queue.drain(0..num_of_message).collect(), }; debug!("Publishing {} {}", batch.messages.len(), topic); let msg = batch.encode_to_vec(); P2P_GOSSIP_BATCH_SIZE.observe(batch.messages.len() as f64); match self.gossipsub.publish(IdentTopic::new(*topic), msg) { Ok(message_id) => debug!("Published {} {}", topic, message_id), Err(error) => error!("Failed to publish {}: {}", topic, error), } } } } match self.gossipsub.poll(cx) { Poll::Pending => return Poll::Pending, Poll::Ready(ToSwarm::GenerateEvent(event)) => match event { gossipsub::Event::Message { propagation_source, message_id, message: Message { source, data, topic, .. }, } => match topic.as_str() { TOPOS_GOSSIP => { return Poll::Ready(ToSwarm::GenerateEvent(ComposedEvent::Gossipsub( crate::event::GossipEvent::Message { topic: TOPOS_GOSSIP, message: data, source, }, ))) } TOPOS_ECHO => { return Poll::Ready(ToSwarm::GenerateEvent(ComposedEvent::Gossipsub( crate::event::GossipEvent::Message { topic: TOPOS_ECHO, message: data, source, }, ))) } TOPOS_READY => { return Poll::Ready(ToSwarm::GenerateEvent(ComposedEvent::Gossipsub( crate::event::GossipEvent::Message { topic: TOPOS_READY, message: data, source, }, ))) } _ => {} }, gossipsub::Event::Subscribed { peer_id, topic } => { debug!("{peer_id} subscribed to {:?}", topic); // If the behaviour isn't already healthy we check if this event // triggers a switch to healthy if self.health_status != HealthStatus::Healthy && self.gossipsub.topics().all(|topic| { self.gossipsub.mesh_peers(topic).peekable().peek().is_some() }) { self.health_status = HealthStatus::Healthy; } } gossipsub::Event::Unsubscribed { peer_id, topic } => { debug!("{peer_id} unsubscribed from {:?}", topic); } gossipsub::Event::GossipsubNotSupported { peer_id } => { debug!("Gossipsub not supported by {:?}", peer_id); } }, Poll::Ready(ToSwarm::ListenOn { opts }) => { return Poll::Ready(ToSwarm::ListenOn { opts }) } Poll::Ready(ToSwarm::RemoveListener { id }) => { return Poll::Ready(ToSwarm::RemoveListener { id }) } Poll::Ready(ToSwarm::Dial { opts }) => return Poll::Ready(ToSwarm::Dial { opts }), Poll::Ready(ToSwarm::NotifyHandler { peer_id, handler, event, }) => { return Poll::Ready(ToSwarm::NotifyHandler { peer_id, handler, event, }) } Poll::Ready(ToSwarm::CloseConnection { peer_id, connection, }) => { return Poll::Ready(ToSwarm::CloseConnection { peer_id, connection, }) } Poll::Ready(ToSwarm::ExternalAddrExpired(addr)) => { return Poll::Ready(ToSwarm::ExternalAddrExpired(addr)) } Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)) => { return Poll::Ready(ToSwarm::ExternalAddrConfirmed(addr)) } Poll::Ready(ToSwarm::NewExternalAddrCandidate(addr)) => { return Poll::Ready(ToSwarm::NewExternalAddrCandidate(addr)) } Poll::Ready(event) => { warn!("Unhandled event in gossip behaviour: {:?}", event); } } Poll::Pending } } ================================================ FILE: crates/topos-p2p/src/behaviour/grpc/connection.rs ================================================ use std::future::IntoFuture; use futures::{future::BoxFuture, FutureExt}; use libp2p::{swarm::ConnectionId, Multiaddr}; use tokio::sync::oneshot; use tonic::transport::Channel; use super::{ error::{OutboundConnectionError, OutboundError}, RequestId, }; /// Connection struct which represent a connection between two nodes /// It contains the connection id, the address of the node, the request id /// and the gRPC channel which is used to communicate with the node. #[derive(Debug)] pub(crate) struct Connection { /// The connection id pub(crate) id: ConnectionId, /// The address of the node pub(crate) address: Option, /// The request id that is served by this connection pub(crate) request_id: Option, /// The gRPC channel used to communicate with the node pub(crate) channel: Option, } /// Connection request struct which is used to open a connection to a node pub(crate) struct OutboundConnectionRequest { pub(crate) request_id: RequestId, pub(crate) notifier: oneshot::Sender>, pub(crate) protocol: String, } /// Struct which is used to represent a connected channel connection #[derive(Debug)] pub struct OutboundConnectedConnection { #[allow(dead_code)] pub(crate) request_id: RequestId, // TODO: Remove unused when gRPC behaviour is activated #[allow(unused)] pub(crate) channel: tonic::transport::Channel, } /// Enum that represents the different states of an outbound connection #[derive(Debug)] pub enum OutboundConnection { Connected(OutboundConnectedConnection), Pending { request_id: RequestId, }, Opening { request_id: RequestId, receiver: oneshot::Receiver>, }, } impl IntoFuture for OutboundConnection { type Output = Result; type IntoFuture = BoxFuture<'static, Self::Output>; fn into_future(self) -> Self::IntoFuture { async move { match self { // The outbound connection is already opened OutboundConnection::Connected(connected) => Ok(connected), // The outbound connection is in pending OutboundConnection::Pending { request_id } => { Err(OutboundConnectionError::AlreadyNegotiating) } // The connection is in opening state so we need to proceed to connect OutboundConnection::Opening { request_id, receiver, } => { let channel = receiver.await??; Ok(OutboundConnectedConnection { request_id, channel, }) } } } .boxed() } } ================================================ FILE: crates/topos-p2p/src/behaviour/grpc/error.rs ================================================ use std::sync::Arc; use tokio::sync::oneshot; #[derive(Debug, thiserror::Error)] pub enum OutboundError { #[error("Unable to Dial")] DialFailure, #[error("Peer doesn't support the protocol: {0}")] UnsupportedProtocol(String), #[error(transparent)] GrpcChannel(#[from] Arc), #[error("Outbound connection timeout")] Timeout, } #[derive(thiserror::Error, Debug)] pub enum OutboundConnectionError { #[error(transparent)] Outbound(#[from] OutboundError), #[error(transparent)] ConnectionCanceled(#[from] oneshot::error::RecvError), #[error("This connection is already negotiating with another client")] AlreadyNegotiating, } ================================================ FILE: crates/topos-p2p/src/behaviour/grpc/event.rs ================================================ use libp2p::{swarm::ConnectionId, PeerId}; use tonic::transport::Channel; use super::{OutboundError, RequestId}; #[derive(Debug)] pub enum Event { OutboundFailure { peer_id: PeerId, request_id: RequestId, error: OutboundError, }, OutboundSuccess { peer_id: PeerId, request_id: RequestId, #[allow(unused)] channel: Channel, }, InboundNegotiatedConnection { request_id: RequestId, connection_id: ConnectionId, }, OutboundNegotiatedConnection { peer_id: PeerId, request_id: RequestId, }, } ================================================ FILE: crates/topos-p2p/src/behaviour/grpc/handler/event.rs ================================================ use crate::behaviour::grpc::RequestId; use super::ProtocolRequest; #[derive(Debug)] pub enum Event { InboundNegotiatedStream { request_id: RequestId, stream: libp2p::Stream, }, OutboundNegotiatedStream { request_id: RequestId, stream: libp2p::Stream, }, UnsupportedProtocol(RequestId, String), OutboundTimeout(ProtocolRequest), } ================================================ FILE: crates/topos-p2p/src/behaviour/grpc/handler/protocol.rs ================================================ use std::collections::HashSet; use libp2p::{core::UpgradeInfo, InboundUpgrade, OutboundUpgrade, Stream}; /// UpgradeProtocol for gRPC Connection /// /// This protocol is used to upgrade the connection to a gRPC connection. /// It is used by the `Handler` to upgrade the connection to a gRPC connection. /// The gRPC protocol is defined as constant but can be updated to manage different /// version of the protocol. /// /// The `UpgradeInfo` trait is implemented to provide the protocol information. /// The `OutboundUpgrade` and `InboundUpgrade` traits are implemented to provide /// the upgrade of the connection. The upgrade is done by returning the socket /// wrapped in a `Future`. #[derive(Debug)] pub struct GrpcUpgradeProtocol { pub(crate) protocols: HashSet, } impl UpgradeInfo for GrpcUpgradeProtocol { type Info = String; type InfoIter = std::collections::hash_set::IntoIter; fn protocol_info(&self) -> Self::InfoIter { self.protocols.clone().into_iter() } } impl OutboundUpgrade for GrpcUpgradeProtocol { type Output = Stream; type Error = std::io::Error; type Future = futures::future::Ready>; fn upgrade_outbound(self, socket: Stream, _info: Self::Info) -> Self::Future { futures::future::ready(Ok(socket)) } } impl InboundUpgrade for GrpcUpgradeProtocol { type Output = Stream; type Error = std::io::Error; type Future = futures::future::Ready>; fn upgrade_inbound(self, socket: Stream, info: Self::Info) -> Self::Future { futures::future::ready(Ok(socket)) } } ================================================ FILE: crates/topos-p2p/src/behaviour/grpc/handler.rs ================================================ use std::{ collections::{HashSet, VecDeque}, sync::{ atomic::{AtomicU64, Ordering}, Arc, }, task::Poll, }; use libp2p::swarm::{ handler::{ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound}, ConnectionHandler, ConnectionHandlerEvent, SubstreamProtocol, }; use tracing::{debug, warn}; use self::protocol::GrpcUpgradeProtocol; use super::RequestId; pub(crate) mod event; use event::Event; pub(crate) mod protocol; #[derive(Debug)] pub struct ProtocolRequest { pub(crate) request_id: RequestId, pub(crate) protocol: String, } /// Handler for gRPC connections pub struct Handler { /// Next inbound request id inbound_request_id: Arc, /// Pending events to send pending_events: VecDeque, /// Optional outbound request id outbound_request_id: Option, protocols: HashSet, keep_alive: bool, } impl Handler { pub(crate) fn new(inbound_request_id: Arc, protocols: HashSet) -> Self { Self { inbound_request_id, pending_events: VecDeque::new(), outbound_request_id: None, protocols, keep_alive: true, } } } impl ConnectionHandler for Handler { type FromBehaviour = ProtocolRequest; type ToBehaviour = event::Event; type InboundProtocol = GrpcUpgradeProtocol; type OutboundProtocol = GrpcUpgradeProtocol; type InboundOpenInfo = RequestId; type OutboundOpenInfo = ProtocolRequest; fn listen_protocol(&self) -> SubstreamProtocol { let id = self.inbound_request_id.fetch_add(1, Ordering::Relaxed); SubstreamProtocol::new( GrpcUpgradeProtocol { protocols: self.protocols.clone(), }, RequestId(id), ) } fn connection_keep_alive(&self) -> bool { self.keep_alive } fn on_behaviour_event(&mut self, request: Self::FromBehaviour) { let request_id = request.request_id; if let Some(prev) = self.outbound_request_id.replace(request) { warn!( "Received new outbound request id {:?} while previous request id {:?} is still \ pending", request_id, prev.request_id ); } } fn on_connection_event( &mut self, event: ConnectionEvent< Self::InboundProtocol, Self::OutboundProtocol, Self::InboundOpenInfo, Self::OutboundOpenInfo, >, ) { match event { // New Inbound stream ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound { protocol, info }) => { self.pending_events .push_back(Event::InboundNegotiatedStream { request_id: info, stream: protocol, }) } // New Outbound stream ConnectionEvent::FullyNegotiatedOutbound(FullyNegotiatedOutbound { protocol, info, }) => self .pending_events .push_back(Event::OutboundNegotiatedStream { request_id: info.request_id, stream: protocol, }), ConnectionEvent::DialUpgradeError(DialUpgradeError { info, error: libp2p::swarm::StreamUpgradeError::Timeout, }) => { self.pending_events.push_back(Event::OutboundTimeout(info)); // Closing the connection handler self.keep_alive = false; } ConnectionEvent::DialUpgradeError(DialUpgradeError { info, error: libp2p::swarm::StreamUpgradeError::NegotiationFailed, }) => { self.pending_events .push_back(Event::UnsupportedProtocol(info.request_id, info.protocol)); // Closing the connection handler self.keep_alive = false; } ConnectionEvent::DialUpgradeError(_) | ConnectionEvent::AddressChange(_) | ConnectionEvent::ListenUpgradeError(_) | ConnectionEvent::LocalProtocolsChange(_) | ConnectionEvent::RemoteProtocolsChange(_) => (), event => warn!("Unhandled connection event: {:?}", event), } } #[allow(deprecated)] fn poll( &mut self, cx: &mut std::task::Context<'_>, ) -> std::task::Poll< libp2p::swarm::ConnectionHandlerEvent< Self::OutboundProtocol, Self::OutboundOpenInfo, Self::ToBehaviour, >, > { if let Some(event) = self.pending_events.pop_front() { return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(event)); } if let Some(request) = self.outbound_request_id.take() { debug!( "Starting outbound request SubstreamProtocol for {}", request.request_id ); let mut protocols = self.protocols.clone(); protocols.insert(request.protocol.clone()); return Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { protocol: SubstreamProtocol::new(GrpcUpgradeProtocol { protocols }, request), }); } Poll::Pending } } ================================================ FILE: crates/topos-p2p/src/behaviour/grpc/proxy.rs ================================================ use std::{ io, pin::Pin, task::{Context, Poll}, }; use futures::Stream; use pin_project::pin_project; use tokio::sync::mpsc; use super::stream::GrpcStream; /// Proxy for gRPC connection with the local service. #[pin_project] pub(crate) struct GrpcProxy { #[pin] rx: mpsc::UnboundedReceiver>, } impl GrpcProxy { pub(crate) fn new(rx: mpsc::UnboundedReceiver>) -> Self { Self { rx } } } impl Stream for GrpcProxy { type Item = io::Result; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.project().rx.as_mut().poll_recv(cx) } } ================================================ FILE: crates/topos-p2p/src/behaviour/grpc/stream.rs ================================================ use std::{ io, pin::Pin, sync::{Arc, Mutex}, task::{Context, Poll}, }; use futures::{AsyncRead as FuturesAsyncRead, AsyncWrite as FuturesAsyncWrite, Future}; use http::Uri; use libp2p::{swarm::ConnectionId, PeerId}; use pin_project::pin_project; use tokio::{ io::{AsyncRead, AsyncWrite, ReadBuf}, sync::mpsc, }; use tonic::transport::{server::Connected, Channel, Endpoint}; use tower::{BoxError, Service}; /// Manage a gRPC Stream linked to an open [`libp2p::Stream`] #[pin_project] pub(crate) struct GrpcStream { #[pin] stream: libp2p::Stream, peer_id: PeerId, connection_id: libp2p::swarm::ConnectionId, } /// Outbound GrpcStream initialization struct #[pin_project] struct InitializedGrpcOutboundStream { #[pin] stream_rx: Arc>>>, } /// Fully negotiated Outbound GrpcStream #[pin_project] struct NegotiatedGrpcOutboundStream { #[pin] stream_rx: Arc>>>, } impl Future for NegotiatedGrpcOutboundStream { type Output = Result; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let stream = self.project(); let mut fut = stream .stream_rx .lock() .expect("Failed to lock gRPC Outbound Stream Receiver"); fut.poll_recv(cx).map(|option| option.unwrap()) } } impl Service for InitializedGrpcOutboundStream { type Response = GrpcStream; type Error = BoxError; type Future = NegotiatedGrpcOutboundStream; fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) } fn call(&mut self, req: Uri) -> Self::Future { NegotiatedGrpcOutboundStream { stream_rx: self.stream_rx.clone(), } } } impl GrpcStream { pub fn new(stream: libp2p::Stream, peer_id: PeerId, connection_id: ConnectionId) -> Self { Self { stream, peer_id, connection_id, } } /// Transform the GrpcStream into a [`tonic::transport::Channel`] pub async fn into_channel(self) -> Result { let (sender, receiver) = mpsc::channel(1); let connection = InitializedGrpcOutboundStream { stream_rx: Arc::new(Mutex::new(receiver)), }; let fut = async move { Endpoint::try_from("http://[::]:50051") .unwrap() .connect_with_connector(connection) .await }; let (channel, send_result) = tokio::join!(fut, sender.send(Ok(self))); channel } } impl Connected for GrpcStream { type ConnectInfo = (); fn connect_info(&self) -> Self::ConnectInfo {} } impl AsyncRead for GrpcStream { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll> { let unfilled = buf.initialize_unfilled(); self.project() .stream .poll_read(cx, unfilled) .map_ok(|len| buf.advance(len)) } } impl AsyncWrite for GrpcStream { fn poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { self.project().stream.poll_write(cx, buf) } fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.project().stream.poll_flush(cx) } fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.project().stream.poll_close(cx) } } ================================================ FILE: crates/topos-p2p/src/behaviour/grpc.rs ================================================ use std::{ collections::{HashMap, HashSet, VecDeque}, io, sync::{atomic::AtomicU64, Arc}, task::{Context, Poll}, }; use futures::{future::BoxFuture, stream::FuturesUnordered, FutureExt, StreamExt}; use handler::Handler; use libp2p::{ core::ConnectedPoint, swarm::{ derive_prelude::{ConnectionEstablished, ListenerId, NewListener}, dial_opts::DialOpts, ConnectionClosed, DialError, DialFailure, FromSwarm, NetworkBehaviour, ToSwarm, }, Multiaddr, PeerId, }; use smallvec::SmallVec; use std::fmt::Display; use tokio::sync::{mpsc, oneshot}; use tonic::transport::{server::Router, Channel}; use tracing::{debug, info, warn}; use crate::GrpcRouter; use self::{ connection::{ Connection, OutboundConnectedConnection, OutboundConnection, OutboundConnectionRequest, }, error::OutboundError, handler::ProtocolRequest, stream::GrpcStream, }; pub(crate) use event::Event; pub(crate) mod connection; pub mod error; pub mod event; pub(crate) mod handler; mod proxy; mod stream; #[derive(Default)] pub struct GrpcContext { server: Option, client: HashSet, } impl GrpcContext { pub(crate) fn into_parts(mut self) -> (Option, (HashSet, HashSet)) { let (server, inbound_protocols) = self .server .map(|server| (Some(server.server), server.protocols)) .unwrap_or((None, HashSet::new())); if self.client.is_empty() { self.client = inbound_protocols.clone(); } (server, (inbound_protocols, self.client)) } pub fn with_router(mut self, router: GrpcRouter) -> Self { self.server = Some(router); self } pub fn add_client_protocol(mut self, protocol: S) -> Self { self.client.insert(protocol.to_string()); self } pub fn with_client_protocols(mut self, protocols: HashSet) -> Self { self.client = protocols; self } } /// The request id used to identify a gRPC request #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct RequestId(pub(crate) u64); impl Display for RequestId { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", self.0) } } type ChannelNegotiationFuture = BoxFuture<'static, (Result, RequestId, PeerId)>; /// gRPC behaviour for libp2p /// /// Allows opening gRPC connections to peers and to accept incoming gRPC connections. /// It also handles the negotiation of the gRPC channel. Once the channel is established, /// the behaviour will return a [`GrpcStream`] that can be used to send and receive gRPC messages. /// A gRPC Router is optional because as a client or light client I need be able to open a connection /// to a peer without having a gRPC service to expose. pub(crate) struct Behaviour { /// The optional gRPC service to expose service: Option, /// The next request id to use next_request_id: RequestId, /// The next inbound request id to use next_inbound_request_id: Arc, /// The list of connected peers with the associated gRPC channel connected: HashMap>, /// The list of known addresses for each peer managed by `add_address` and `remove_address` addresses: HashMap>, /// The optional inbound stream to receive gRPC connections inbound_stream: Option>>, /// The list of pending outbound connections pending_outbound_connections: HashMap, /// The list of pending events to send to the swarm pending_events: VecDeque>, /// The list of pending channel negotiation futures pending_negotiated_channels: FuturesUnordered, inbound_protocols: HashSet, outbound_protocols: HashSet, } impl Behaviour { // TODO: Remove unused when gRPC behaviour is activated pub fn new(service: GrpcContext) -> Self { let (service, (inbound_protocols, outbound_protocols)) = service.into_parts(); Self { service, inbound_protocols, outbound_protocols, connected: HashMap::new(), addresses: HashMap::new(), inbound_stream: None, next_request_id: RequestId(1), next_inbound_request_id: Arc::new(AtomicU64::new(0)), pending_outbound_connections: HashMap::new(), pending_events: VecDeque::new(), pending_negotiated_channels: FuturesUnordered::new(), } } /// Adds a known address for a peer that can be used for /// dialing attempts by the `Swarm` /// /// Addresses added in this way are only removed by `remove_address`. #[cfg(test)] pub fn add_address(&mut self, peer: &PeerId, address: Multiaddr) { self.addresses.entry(*peer).or_default().push(address); } /// Removes an address of a peer previously added via `add_address`. #[cfg(test)] #[allow(unused)] pub fn remove_address(&mut self, peer: &PeerId, address: &Multiaddr) { let mut last = false; if let Some(addresses) = self.addresses.get_mut(peer) { addresses.retain(|a| a != address); last = addresses.is_empty(); } if last { self.addresses.remove(peer); } } /// Ask the behaviour to create a new outbound connection for the given peer. /// /// The return value is an [`OutboundConnection`] that can be used to check the status of the /// connection. If the connection is pending, the request id is returned. If the connection /// is established, the gRPC channel is returned. // TODO: Remove unused when gRPC behaviour is activated #[allow(unused)] pub fn open_outbound_connection( &mut self, peer_id: &PeerId, protocol: String, ) -> OutboundConnection { // If there is a pending outbound connection for this peer // return the request id if let Some(request) = self.pending_outbound_connections.get(peer_id) { return OutboundConnection::Pending { request_id: request.request_id, }; } if let Some(connections) = self.connected.get_mut(peer_id) { match connections.first() { Some(Connection { id, address, request_id: Some(request_id), channel: Some(channel), }) => OutboundConnection::Connected(OutboundConnectedConnection { request_id: *request_id, channel: channel.clone(), }), Some(Connection { id, address, request_id: Some(request_id), channel, }) => { debug!( "Peer already connected but no channel yet, waiting for channel \ negotiation" ); OutboundConnection::Pending { request_id: *request_id, } } Some(_) => self.open_connection(peer_id, protocol), _ => { debug!("No connection for this peer {}", peer_id); self.open_connection(peer_id, protocol) } } } else { debug!("Buffering sender as no available connection to peer {peer_id} yet"); self.open_connection(peer_id, protocol) } } /// Return the next outbound request id fn next_request_id(&mut self) -> RequestId { let request_id = self.next_request_id; self.next_request_id.0 += 1; request_id } /// Try to open a connection with the given peer. fn open_connection(&mut self, peer_id: &PeerId, protocol: String) -> OutboundConnection { info!("Opening gRPC outbound connection to peer {peer_id}"); let (notifier, receiver) = oneshot::channel(); let request_id = self.next_request_id(); self.pending_outbound_connections .entry(*peer_id) .or_insert_with(|| OutboundConnectionRequest { request_id, notifier, protocol, }); self.pending_events.push_back(ToSwarm::Dial { opts: DialOpts::peer_id(*peer_id).build(), }); OutboundConnection::Opening { request_id, receiver, } } /// Handle the [`ConnectionEstablished`] event coming from the [`Swarm`] /// and try to open a gRPC channel using a [`ConnectionHandler`]. fn on_connection_established( &mut self, ConnectionEstablished { peer_id, connection_id, endpoint, failed_addresses, other_established, }: ConnectionEstablished, ) { let address = match endpoint { ConnectedPoint::Dialer { address, .. } => Some(address.clone()), ConnectedPoint::Listener { .. } => None, }; let connection = Connection { id: connection_id, address, request_id: None, channel: None, }; self.connected.entry(peer_id).or_default().push(connection); // If there is no current established connection it means that it's the // first connection with that peer if other_established == 0 { self.try_connect(&peer_id); } } /// Starts the gRPC service if not already started fn on_new_listener(&mut self, listener_id: ListenerId) { if let Some(service) = self.service.take() { let (tx, rx) = mpsc::unbounded_channel(); self.inbound_stream = Some(tx); // TODO: TP-758: Switch to serve_with_incoming_shutdown at some point tokio::spawn(service.serve_with_incoming(proxy::GrpcProxy::new(rx))); info!("New gRPC proxy started and listening on {listener_id:?}"); } else { warn!( "Tried to instantiate a gRPC proxy on {listener_id:?} but the service is missing \ (already spawn or unprovided)" ); } } /// On [`ConnectionClosed`] we cleanup the `connected` state of the behaviour. fn on_connection_closed( &mut self, ConnectionClosed { peer_id, connection_id, endpoint, remaining_established, }: ConnectionClosed, ) { debug!("Connection {connection_id} closed with peer {peer_id}"); if let Some(connections) = self.connected.get_mut(&peer_id) { connections.retain(|conn| conn.id != connection_id); if connections.is_empty() { self.connected.remove(&peer_id); } } } /// Handle the [`DialFailure`] event comming from the [`Swarm`] fn on_dial_failure( &mut self, DialFailure { peer_id, error, connection_id, }: DialFailure, ) { if let Some(peer_id) = peer_id { match error { DialError::DialPeerConditionFalse(_) => { self.try_connect(&peer_id); } _ => { if let Some(OutboundConnectionRequest { request_id, notifier, protocol, }) = self.pending_outbound_connections.remove(&peer_id) { self.pending_events.push_back(ToSwarm::GenerateEvent( Event::OutboundFailure { peer_id, request_id, error: OutboundError::DialFailure, }, )); let _ = notifier.send(Err(OutboundError::DialFailure)); } } } } } /// Try to connect an opened outbound connection with a [`ConnectionHandler`] /// in order to handle the request. fn try_connect(&mut self, peer_id: &PeerId) { if let Some(connections) = self.connected.get_mut(peer_id) { let connection = connections.first_mut(); if let Some(connection) = connection { if let Some(OutboundConnectionRequest { request_id, notifier, protocol, }) = self.pending_outbound_connections.get(peer_id) { debug!("gRPC Outbound connection established with {peer_id}"); self.pending_events.push_back(ToSwarm::NotifyHandler { peer_id: *peer_id, handler: libp2p::swarm::NotifyHandler::One(connection.id), event: ProtocolRequest { request_id: *request_id, protocol: protocol.clone(), }, }); } } } } } impl NetworkBehaviour for Behaviour { type ConnectionHandler = Handler; type ToSwarm = Event; fn handle_established_inbound_connection( &mut self, _connection_id: libp2p::swarm::ConnectionId, peer: PeerId, local_addr: &libp2p::Multiaddr, remote_addr: &libp2p::Multiaddr, ) -> Result, libp2p::swarm::ConnectionDenied> { Ok(Handler::new( self.next_inbound_request_id.clone(), self.inbound_protocols.clone(), )) } fn handle_established_outbound_connection( &mut self, _connection_id: libp2p::swarm::ConnectionId, peer: PeerId, addr: &libp2p::Multiaddr, role_override: libp2p::core::Endpoint, ) -> Result, libp2p::swarm::ConnectionDenied> { Ok(Handler::new( self.next_inbound_request_id.clone(), self.outbound_protocols.clone(), )) } fn handle_pending_outbound_connection( &mut self, _connection_id: libp2p::swarm::ConnectionId, maybe_peer: Option, _addresses: &[libp2p::Multiaddr], _effective_role: libp2p::core::Endpoint, ) -> Result, libp2p::swarm::ConnectionDenied> { let peer_id = match maybe_peer { None => return Ok(vec![]), Some(peer_id) => peer_id, }; let mut addresses = Vec::new(); if let Some(connections) = self.connected.get(&peer_id) { addresses.extend(connections.iter().filter_map(|c| c.address.clone())); } if let Some(more) = self.addresses.get(&peer_id) { addresses.extend(more.into_iter().cloned()); } Ok(addresses) } fn on_connection_handler_event( &mut self, peer_id: PeerId, connection_id: libp2p::swarm::ConnectionId, event: libp2p::swarm::THandlerOutEvent, ) { match event { handler::event::Event::OutboundTimeout(request) => { debug!( "Outbound timeout for request {} with peer {peer_id}", request.request_id ); self.pending_events .push_back(ToSwarm::GenerateEvent(Event::OutboundFailure { peer_id, request_id: request.request_id, error: OutboundError::Timeout, })); if let Some(connection_request) = self.pending_outbound_connections.remove(&peer_id) { _ = connection_request .notifier .send(Err(OutboundError::Timeout)) } } handler::event::Event::UnsupportedProtocol(request_id, protocol) => { debug!( "Unsupported protocol {protocol} for request {request_id} with peer {peer_id}" ); self.pending_events .push_back(ToSwarm::GenerateEvent(Event::OutboundFailure { peer_id, request_id, error: OutboundError::UnsupportedProtocol(protocol.clone()), })); if let Some(connection_request) = self.pending_outbound_connections.remove(&peer_id) { _ = connection_request .notifier .send(Err(OutboundError::UnsupportedProtocol(protocol))) } } handler::event::Event::InboundNegotiatedStream { request_id, stream } => { debug!("Inbound stream negotiated for request {request_id} with peer {peer_id}",); if let Some(sender) = &mut self.inbound_stream { _ = sender.send(Ok(GrpcStream::new(stream, peer_id, connection_id))); self.pending_events.push_back(ToSwarm::GenerateEvent( Event::InboundNegotiatedConnection { request_id, connection_id, }, )); } } handler::event::Event::OutboundNegotiatedStream { request_id, stream } => { debug!("Outbound stream negotiated for request {request_id} with peer {peer_id}",); let stream = GrpcStream::new(stream, peer_id, connection_id); let future = stream .into_channel() .map(move |channel| (channel, request_id, peer_id)) .boxed(); self.pending_negotiated_channels.push(future); } } } fn on_swarm_event(&mut self, event: FromSwarm) { match event { FromSwarm::ConnectionEstablished(connection_established) => { self.on_connection_established(connection_established) } FromSwarm::NewListener(NewListener { listener_id }) => { self.on_new_listener(listener_id) } FromSwarm::ConnectionClosed(connection_closed) => { self.on_connection_closed(connection_closed) } FromSwarm::DialFailure(dial_failure) => self.on_dial_failure(dial_failure), FromSwarm::AddressChange(_) | FromSwarm::ExpiredListenAddr(_) | FromSwarm::ExternalAddrConfirmed(_) | FromSwarm::ExternalAddrExpired(_) | FromSwarm::ListenFailure(_) | FromSwarm::ListenerClosed(_) | FromSwarm::ListenerError(_) | FromSwarm::NewExternalAddrCandidate(_) | FromSwarm::NewListenAddr(_) => (), event => debug!("Unhandled event from swarm (grpc): {:?}", event), } } fn poll( &mut self, cx: &mut Context<'_>, ) -> Poll>> { // Sending event to both `Swarm` and `ConnectionHandler` if let Some(ev) = self.pending_events.pop_front() { return Poll::Ready(ev); } // When channel has been negotiated by the [`ConnectionHandler`] we need // to update the [`Connection`] with the channel. match self.pending_negotiated_channels.poll_next_unpin(cx) { Poll::Ready(Some((Ok(channel), request_id, peer_id))) => { debug!("gRPC channel ready for {} {}", peer_id, request_id); if let Some(conns) = self.connected.get_mut(&peer_id) { for conn in conns { if let Some(conn_request_id) = &conn.request_id { if request_id == *conn_request_id { conn.channel = Some(channel.clone()); break; } } } } // Notifying the channel to the initial sender if let Some(req) = self.pending_outbound_connections.remove(&peer_id) { let _ = req.notifier.send(Ok(channel.clone())); self.pending_events.push_back(ToSwarm::GenerateEvent( Event::OutboundNegotiatedConnection { request_id: req.request_id, peer_id, }, )); } return Poll::Ready(ToSwarm::GenerateEvent(Event::OutboundSuccess { peer_id, request_id, channel, })); } Poll::Ready(Some((Err(error), request_id, peer_id))) => { debug!("Received error from channel negotiation {:?}", error); let error = Arc::new(error); if let Some(req) = self.pending_outbound_connections.remove(&peer_id) { let _ = req .notifier .send(Err(OutboundError::GrpcChannel(error.clone()))); } return Poll::Ready(ToSwarm::GenerateEvent(Event::OutboundFailure { peer_id, request_id, error: OutboundError::GrpcChannel(error), })); } _ => {} } Poll::Pending } } ================================================ FILE: crates/topos-p2p/src/behaviour/peer_info.rs ================================================ use libp2p::{ identify::Behaviour as Identify, identify::Config as IdentifyConfig, identify::Event as IdentifyEvent, identity::Keypair, swarm::NetworkBehaviour, }; #[derive(NetworkBehaviour)] #[behaviour(to_swarm = "IdentifyEvent")] pub struct PeerInfoBehaviour { identify: Identify, } impl PeerInfoBehaviour { pub(crate) fn new(identify_protocol: &'static str, peer_key: &Keypair) -> PeerInfoBehaviour { let ident_config = IdentifyConfig::new(identify_protocol.to_string(), peer_key.public()); let identify = Identify::new(ident_config); Self { identify } } } ================================================ FILE: crates/topos-p2p/src/behaviour/topos.rs ================================================ // #[derive(NetworkBehaviour)] // #[behaviour(out_event = "ToposOut", event_process = true)] // pub struct ToposBehaviour {} ================================================ FILE: crates/topos-p2p/src/behaviour.rs ================================================ use self::{discovery::DiscoveryBehaviour, peer_info::PeerInfoBehaviour}; use crate::event::ComposedEvent; use libp2p::swarm::NetworkBehaviour; pub(crate) mod discovery; pub(crate) mod gossip; pub(crate) mod grpc; pub(crate) mod peer_info; pub(crate) mod topos; /// Represents the health status of a behaviour inside the p2p layer #[derive(Debug, Default, PartialEq, Eq)] pub(crate) enum HealthStatus { #[default] Initializing, Healthy, Unhealthy, Killing, #[allow(unused)] Recovering, } #[derive(NetworkBehaviour)] #[behaviour(to_swarm = "ComposedEvent")] pub(crate) struct Behaviour { /// Periodically pings and identifies the nodes we are connected to, /// and store information in a cache. pub(crate) peer_info: PeerInfoBehaviour, /// DiscoveryBehaviour which handle every aspect of the node discovery pub(crate) discovery: DiscoveryBehaviour, /// Gossip behaviour which handle the gossipsub protocol pub(crate) gossipsub: gossip::Behaviour, /// Custom gRPC behaviour which handle the different TOPOS gRPC protocols pub(crate) grpc: grpc::Behaviour, } ================================================ FILE: crates/topos-p2p/src/client.rs ================================================ use futures::future::BoxFuture; use libp2p::PeerId; use tokio::sync::{ mpsc::{self, error::SendError}, oneshot, }; use tonic::server::NamedService; use topos_core::api::grpc::GrpcClient; use crate::{ error::{CommandExecutionError, P2PError}, utils::GrpcOverP2P, Command, }; #[derive(Clone)] pub struct NetworkClient { pub retry_ttl: u64, pub local_peer_id: PeerId, pub sender: mpsc::Sender, pub grpc_over_p2p: GrpcOverP2P, pub shutdown_channel: mpsc::Sender>, } impl NetworkClient { pub async fn connected_peers(&self) -> Result, P2PError> { let (sender, receiver) = oneshot::channel(); Self::send_command_with_receiver(&self.sender, Command::ConnectedPeers { sender }, receiver) .await } pub async fn random_known_peer(&self) -> Result { let (sender, receiver) = oneshot::channel(); Self::send_command_with_receiver( &self.sender, Command::RandomKnownPeer { sender }, receiver, ) .await } pub fn publish( &self, topic: &'static str, message: T, ) -> BoxFuture<'static, Result<(), SendError>> { let network = self.sender.clone(); Box::pin(async move { network .send(Command::Gossip { topic, data: message.encode_to_vec(), }) .await }) } async fn send_command_with_receiver< T, E: From + From, >( sender: &mpsc::Sender, command: Command, receiver: oneshot::Receiver>, ) -> Result { if let Err(SendError(command)) = sender.send(command).await { return Err(CommandExecutionError::UnableToSendCommand(command).into()); } receiver.await.unwrap_or_else(|error| Err(error.into())) } pub async fn shutdown(&self) -> Result<(), P2PError> { let (sender, receiver) = oneshot::channel(); self.shutdown_channel .send(sender) .await .map_err(P2PError::ShutdownCommunication)?; Ok(receiver.await?) } /// Creates a new gRPC client for the given peer. pub async fn new_grpc_client(&self, peer: PeerId) -> Result where C: GrpcClient, S: NamedService, { self.grpc_over_p2p.create::(peer).await } } pub enum RetryPolicy { NoRetry, N(usize), } ================================================ FILE: crates/topos-p2p/src/command.rs ================================================ use std::fmt::Display; use libp2p::PeerId; use tokio::sync::oneshot; use crate::{behaviour::grpc::connection::OutboundConnection, error::P2PError}; #[derive(Debug)] pub enum Command { /// Command to ask for the current connected peer id list ConnectedPeers { sender: oneshot::Sender, P2PError>>, }, Gossip { topic: &'static str, data: Vec, }, /// Ask for the creation of a new proxy connection for a gRPC query. /// The response will be sent to the sender of the command once the connection is established. /// The response will be a `OutboundConnection` that can be used to create a gRPC client. /// A connection is established if needed with the peer. NewProxiedQuery { protocol: &'static str, peer: PeerId, id: uuid::Uuid, response: oneshot::Sender, }, /// Ask for a random known peer RandomKnownPeer { sender: oneshot::Sender>, }, } impl Display for Command { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Command::ConnectedPeers { .. } => write!(f, "ConnectedPeers"), Command::RandomKnownPeer { .. } => write!(f, "RandomKnownPeer"), Command::Gossip { .. } => write!(f, "GossipMessage"), Command::NewProxiedQuery { .. } => write!(f, "NewProxiedQuery"), } } } ================================================ FILE: crates/topos-p2p/src/config.rs ================================================ use std::{num::NonZeroUsize, time::Duration}; pub struct NetworkConfig { pub minimum_cluster_size: usize, pub client_retry_ttl: u64, pub discovery: DiscoveryConfig, pub yamux_max_buffer_size: usize, pub yamux_window_size: Option, pub allow_private_ip: bool, } impl Default for NetworkConfig { fn default() -> Self { Self { minimum_cluster_size: Self::MINIMUM_CLUSTER_SIZE, client_retry_ttl: Self::CLIENT_RETRY_TTL, discovery: Default::default(), yamux_max_buffer_size: usize::MAX, yamux_window_size: None, allow_private_ip: false, } } } impl NetworkConfig { pub const MINIMUM_CLUSTER_SIZE: usize = 5; pub const CLIENT_RETRY_TTL: u64 = 200; } pub struct DiscoveryConfig { pub replication_factor: NonZeroUsize, pub replication_interval: Option, pub publication_interval: Option, pub provider_publication_interval: Option, /// Interval at which the node will send bootstrap query to the network /// /// Defaults to [DiscoveryConfig::BOOTSTRAP_INTERVAL] pub bootstrap_interval: Duration, /// Interval at which the node will send fast bootstrap query to the network /// Mostly used when the node is bootstrapping and failed to connect to boot peers /// /// Defaults to [DiscoveryConfig::FAST_BOOTSTRAP_INTERVAL] pub fast_bootstrap_interval: Duration, } impl Default for DiscoveryConfig { fn default() -> Self { Self { replication_factor: NonZeroUsize::new(4).unwrap(), replication_interval: Some(Duration::from_secs(10)), publication_interval: Some(Duration::from_secs(10)), provider_publication_interval: Some(Duration::from_secs(10)), bootstrap_interval: Self::BOOTSTRAP_INTERVAL, fast_bootstrap_interval: Self::FAST_BOOTSTRAP_INTERVAL, } } } impl DiscoveryConfig { /// Default bootstrap interval in seconds pub const BOOTSTRAP_INTERVAL: Duration = Duration::from_secs(60); /// Default fast bootstrap interval in seconds pub const FAST_BOOTSTRAP_INTERVAL: Duration = Duration::from_secs(5); pub fn with_replication_factor(mut self, replication_factor: NonZeroUsize) -> Self { self.replication_factor = replication_factor; self } } ================================================ FILE: crates/topos-p2p/src/constants.rs ================================================ use std::{env, time::Duration}; use lazy_static::lazy_static; use prometheus_client::registry::Registry; use tokio::sync::Mutex; lazy_static! { /// Metric Registry used to register all the metrics from libp2p::gossipsub // NOTE: During tests, if multiple instances are started, they will all point to the same // registry. pub static ref METRIC_REGISTRY: Mutex = Mutex::new(::with_prefix("topos")); pub static ref EVENT_STREAM_BUFFER: usize = env::var("TCE_EVENT_STREAM_BUFFER") .ok() .and_then(|v| v.parse::().ok()) .unwrap_or(2048 * 2); pub static ref CAPACITY_EVENT_STREAM_BUFFER: usize = EVENT_STREAM_BUFFER .checked_mul(10) .map(|v| { let r: usize = v.checked_div(100).unwrap_or(*EVENT_STREAM_BUFFER); r }) .unwrap_or(*EVENT_STREAM_BUFFER); pub static ref COMMAND_STREAM_BUFFER_SIZE: usize = env::var("TCE_COMMAND_STREAM_BUFFER_SIZE") .ok() .and_then(|v| v.parse::().ok()) .unwrap_or(2048); } pub const DISCOVERY_PROTOCOL: &str = "/tce-disco/1"; pub const PEER_INFO_PROTOCOL: &str = "/tce-peer-info/1"; pub const GRPC_P2P_TOPOS_PROTOCOL: &str = "/topos-grpc-p2p/1.0"; // FIXME: Considered as constant until customizable and exposed properly in the genesis file pub const TCE_BOOTNODE_PORT: u16 = 9090; /// Swarm idle connection timeout pub const IDLE_CONNECTION_TIMEOUT: Duration = Duration::from_secs(30); ================================================ FILE: crates/topos-p2p/src/error.rs ================================================ use std::io; use libp2p::{ gossipsub::SubscriptionError, kad::NoKnownPeers, noise::Error as NoiseError, request_response::OutboundFailure, TransportError, }; use thiserror::Error; use tokio::sync::{mpsc, oneshot}; use crate::{behaviour::grpc::error::OutboundConnectionError, command::Command}; #[derive(Error, Debug)] pub enum P2PError { #[error("Unable build a network: peer_key missing")] MissingPeerKey, #[error("Unable to reach any bootnode")] UnableToReachBootnode, #[error("The handle on the runtime failed")] JoinHandleFailure, #[error(transparent)] CommandError(#[from] CommandExecutionError), #[error("An error occurred on the Transport layer: {0}")] TransportError(#[from] TransportError), #[error("An error occured trying to subscribe to gossip topic: {0}")] SubscriptionError(#[from] SubscriptionError), #[error("Unable to receive expected response of a oneshot channel")] OneshotReceiveError(#[from] oneshot::error::RecvError), #[error("An error occurred on the Noise protocol: {0}")] NoiseProtocolError(#[from] NoiseError), #[error("Error during bootstrap phase: {0}")] BootstrapError(&'static str), #[error("Kademlia bootstrap query error: {0}")] KademliaBootstrapError(#[from] NoKnownPeers), #[error("Unable to execute shutdown on the p2p runtime: {0}")] ShutdownCommunication(mpsc::error::SendError>), #[error("Unable to create gRPC client")] UnableToCreateGrpcClient(#[from] OutboundConnectionError), #[error("Gossip topics subscription failed")] GossipTopicSubscriptionFailure, } #[derive(Error, Debug)] pub enum CommandExecutionError { #[error("Unable to parse message")] ParsingError, #[error("Unable to send command {0}")] UnableToSendCommand(Command), #[error("Unable to perform query: {0}")] RequestOutboundFailure(#[from] OutboundFailure), #[error("Unable to receive expected response of a oneshot channel")] UnableToReceiveCommandResponse(#[from] oneshot::error::RecvError), #[error("Unable to send a command: {0}")] SendError(#[from] mpsc::error::SendError), #[error("Failed to fetch Record from DHT")] DHTGetRecordFailed, #[error("Connection with a peer has failed")] ConnectionClosed, #[error("No known peer in the peer set")] NoKnownPeer, } ================================================ FILE: crates/topos-p2p/src/event.rs ================================================ use libp2p::{identify, kad, PeerId}; use crate::behaviour::{grpc, HealthStatus}; /// Represents the events that the Gossip protocol can emit #[derive(Debug)] pub enum GossipEvent { /// A message has been received from a peer on one of the subscribed topics Message { source: Option, topic: &'static str, message: Vec, }, } #[derive(Debug)] pub enum ComposedEvent { Kademlia(Box), PeerInfo(Box), Gossipsub(GossipEvent), Grpc(grpc::Event), Void, } impl From for ComposedEvent { fn from(event: grpc::Event) -> Self { ComposedEvent::Grpc(event) } } impl From for ComposedEvent { fn from(event: kad::Event) -> Self { ComposedEvent::Kademlia(Box::new(event)) } } impl From for ComposedEvent { fn from(event: identify::Event) -> Self { ComposedEvent::PeerInfo(Box::new(event)) } } impl From for ComposedEvent { fn from(_: void::Void) -> Self { Self::Void } } /// Represents the events that the p2p layer can emit #[derive(Debug)] pub enum Event { /// An event emitted when a gossip message is received Gossip { from: PeerId, data: Vec }, /// An event emitted when the p2p layer becomes healthy Healthy, /// An event emitted when the p2p layer becomes unhealthy Unhealthy, /// An event emitted when the p2p layer is shutting down Killing, } impl From<&HealthStatus> for Event { fn from(value: &HealthStatus) -> Self { match value { HealthStatus::Healthy => Event::Healthy, HealthStatus::Killing => Event::Killing, _ => Event::Unhealthy, } } } ================================================ FILE: crates/topos-p2p/src/lib.rs ================================================ #![allow(unused_variables)] mod behaviour; mod client; mod command; pub mod config; pub mod constants; pub mod error; mod event; mod runtime; #[cfg(test)] mod tests; use std::collections::HashSet; use std::convert::Infallible; pub(crate) use behaviour::Behaviour; pub use client::NetworkClient; pub use client::RetryPolicy; pub use command::Command; pub use event::Event; use http::Request; use http::Response; pub use libp2p::Multiaddr; pub use libp2p::PeerId; pub use runtime::Runtime; use hyper::Body; use tonic::body::BoxBody; use tonic::server::NamedService; use tonic::transport::server::Router; use topos_core::api::grpc::p2p::info_service_server::InfoService; use topos_core::api::grpc::p2p::info_service_server::InfoServiceServer; use tower::Service; pub mod network; pub const TOPOS_GOSSIP: &str = "topos_gossip"; pub const TOPOS_ECHO: &str = "topos_echo"; pub const TOPOS_READY: &str = "topos_ready"; #[macro_export] macro_rules! protocol_name { ($i:expr) => { format!("/{}", $i) }; } #[derive(Debug)] pub(crate) struct GrpcP2pInfo {} #[async_trait::async_trait] impl InfoService for GrpcP2pInfo {} pub use behaviour::grpc::GrpcContext; pub struct GrpcRouter { server: Router, protocols: HashSet, } impl GrpcRouter { pub fn new(mut server: tonic::transport::Server) -> Self { let mut protocols = HashSet::new(); protocols.insert(protocol_name!(InfoServiceServer::::NAME)); Self { server: server.add_optional_service::>(None), protocols, } } pub fn add_service(mut self, service: S) -> Self where S: Service, Response = Response, Error = Infallible> + NamedService + Clone + Send + 'static, S::Future: Send + 'static, { self.protocols.insert(protocol_name!(S::NAME)); self.server = self.server.add_service(service); self } } pub mod utils { use std::future::IntoFuture; use libp2p::{identity, PeerId}; use tokio::{sync::mpsc, sync::oneshot}; use tonic::server::NamedService; use topos_core::api::grpc::GrpcClient; use tracing::debug; use crate::{command::Command, error::P2PError}; #[derive(Clone)] pub struct GrpcOverP2P { pub(crate) proxy_sender: mpsc::Sender, } impl GrpcOverP2P { pub fn new(proxy_sender: mpsc::Sender) -> Self { Self { proxy_sender } } pub async fn create(&self, peer: PeerId) -> Result where C: GrpcClient, S: NamedService, { debug!("Creating new instance of GRPC client for P2P"); let (sender, recv) = oneshot::channel(); let id = uuid::Uuid::new_v4(); let _ = self .proxy_sender .send(Command::NewProxiedQuery { protocol: S::NAME, peer, id, response: sender, }) .await; let connection = recv.await?; let connected = connection.into_future().await?; Ok(C::init(connected.channel)) } } /// build peer_id keys, generate for now - either from the seed or purely random one pub fn local_key_pair(secret_key_seed: Option) -> identity::Keypair { // todo: load from protobuf encoded|base64 encoded config.local_key_pair match secret_key_seed { Some(seed) => { let mut bytes = [0u8; 32]; bytes[0] = seed; identity::Keypair::ed25519_from_bytes(bytes).expect("Invalid keypair") } None => identity::Keypair::generate_ed25519(), } } pub fn local_key_pair_from_slice(slice: &[u8]) -> identity::Keypair { // todo: load from protobuf encoded|base64 encoded config.local_key_pair let mut bytes = [0u8; 32]; if slice.len() <= 32 { bytes[..slice.len()].clone_from_slice(slice); } else { bytes.clone_from_slice(&slice[..32]); } identity::Keypair::ed25519_from_bytes(bytes).expect("Invalid keypair") } pub fn keypair_from_protobuf_encoding(priv_key: &[u8]) -> identity::Keypair { identity::Keypair::from_protobuf_encoding(priv_key).expect("Invalid keypair retrieval") } } #[test] pub fn generate_from_secp256k1() { // Key living in the AWS SM or FS at libp2p/libp2p.key let edge_dec_privkey = hex::decode("08021220eb5ce97bd3e7729ac4ab077b83881426cebf19e58a9d9760d1cedfc53d772d6c") .expect("Failed to hex decode"); use std::str::FromStr; let edge_peerid = PeerId::from_str("16Uiu2HAkxA7KW9GC2T3tQg3zHvjrnDPqfQUKTfzU3wbts8AsV6kH").unwrap(); let keypair = utils::keypair_from_protobuf_encoding(&edge_dec_privkey); // Verify that we end up with the same PeerId assert_eq!(keypair.public().to_peer_id(), edge_peerid); } ================================================ FILE: crates/topos-p2p/src/network.rs ================================================ use super::{Behaviour, Event, NetworkClient, Runtime}; use crate::{ behaviour::{ discovery::DiscoveryBehaviour, gossip, grpc, peer_info::PeerInfoBehaviour, HealthStatus, }, config::{DiscoveryConfig, NetworkConfig}, constants::{ self, COMMAND_STREAM_BUFFER_SIZE, DISCOVERY_PROTOCOL, EVENT_STREAM_BUFFER, PEER_INFO_PROTOCOL, }, error::P2PError, utils::GrpcOverP2P, GrpcContext, }; use futures::Stream; use libp2p::{ core::{transport::MemoryTransport, upgrade}, dns, identity::Keypair, kad::store::MemoryStore, noise, swarm::{self, ConnectionId}, tcp::Config, Multiaddr, PeerId, Swarm, Transport, }; use std::{ borrow::Cow, collections::{HashMap, HashSet}, time::Duration, }; use tokio::sync::{mpsc, oneshot}; use tokio_stream::wrappers::ReceiverStream; use tracing::debug; pub fn builder<'a>() -> NetworkBuilder<'a> { NetworkBuilder::default() } const TWO_HOURS: Duration = Duration::from_secs(60 * 60 * 2); #[derive(Default)] pub struct NetworkBuilder<'a> { discovery_protocol: Option<&'static str>, peer_key: Option, listen_addresses: Option>, public_addresses: Option>, store: Option, known_peers: &'a [(PeerId, Multiaddr)], local_port: Option, config: NetworkConfig, grpc_context: GrpcContext, memory_transport: bool, } impl<'a> NetworkBuilder<'a> { #[cfg(test)] pub(crate) fn memory(mut self) -> Self { self.memory_transport = true; self } pub fn grpc_context(mut self, grpc_context: GrpcContext) -> Self { self.grpc_context = grpc_context; self } pub fn discovery_config(mut self, config: DiscoveryConfig) -> Self { self.config.discovery = config; self } pub fn minimum_cluster_size(mut self, size: usize) -> Self { self.config.minimum_cluster_size = size; self } pub fn peer_key(mut self, peer_key: Keypair) -> Self { self.peer_key = Some(peer_key); self } pub fn public_addresses>>(mut self, addresses: M) -> Self { self.public_addresses = Some(addresses.into()); self } pub fn listen_addresses>>(mut self, addresses: M) -> Self { self.listen_addresses = Some(addresses.into()); self } #[doc(hidden)] pub fn allow_private_ip(mut self, allow_private_ip: bool) -> Self { self.config.allow_private_ip = allow_private_ip; self } pub fn store(mut self, store: MemoryStore) -> Self { self.store = Some(store); self } pub fn known_peers(mut self, known_peers: &'a [(PeerId, Multiaddr)]) -> Self { self.known_peers = known_peers; self } pub fn local_port(mut self, port: u8) -> Self { self.local_port = Some(port); self } pub fn discovery_protocol(mut self, protocol: &'static str) -> Self { self.discovery_protocol = Some(protocol); self } pub async fn build( mut self, ) -> Result<(NetworkClient, impl Stream, Runtime), P2PError> { let peer_key = self.peer_key.ok_or(P2PError::MissingPeerKey)?; let peer_id = peer_key.public().to_peer_id(); let (command_sender, command_receiver) = mpsc::channel(*COMMAND_STREAM_BUFFER_SIZE); let (event_sender, event_receiver) = mpsc::channel(*EVENT_STREAM_BUFFER); let gossipsub = gossip::Behaviour::new(peer_key.clone()).await; let grpc = grpc::Behaviour::new(self.grpc_context); debug!("Known peers: {:?}", self.known_peers); let behaviour = Behaviour { gossipsub, peer_info: PeerInfoBehaviour::new(PEER_INFO_PROTOCOL, &peer_key), discovery: DiscoveryBehaviour::create( &self.config.discovery, peer_key.clone(), Cow::Borrowed( self.discovery_protocol .unwrap_or(DISCOVERY_PROTOCOL) .as_bytes(), ), self.known_peers, false, ), grpc, }; let multiplex_config = libp2p::yamux::Config::default(); let transport = if self.memory_transport { MemoryTransport::new() .upgrade(upgrade::Version::V1) .authenticate(noise::Config::new(&peer_key)?) .multiplex(multiplex_config) .timeout(TWO_HOURS) .boxed() } else { let tcp = libp2p::tcp::tokio::Transport::new(Config::default().nodelay(true)); let dns_tcp = dns::tokio::Transport::system(tcp).unwrap(); let tcp = libp2p::tcp::tokio::Transport::new(Config::default().nodelay(true)); dns_tcp .or_transport(tcp) .upgrade(upgrade::Version::V1) .authenticate(noise::Config::new(&peer_key)?) .multiplex(multiplex_config) .timeout(TWO_HOURS) .boxed() }; let swarm = Swarm::new( transport, behaviour, peer_id, swarm::Config::with_tokio_executor() .with_idle_connection_timeout(constants::IDLE_CONNECTION_TIMEOUT), ); let (shutdown_channel, shutdown) = mpsc::channel::>(1); let grpc_over_p2p = GrpcOverP2P::new(command_sender.clone()); let listen_addr = self .listen_addresses .take() .expect("Node requires at least one address to listen for incoming connections"); let public_addresses = self .public_addresses .map(|addresses| { if addresses.is_empty() { listen_addr.clone() } else { addresses } }) .unwrap_or(listen_addr.clone()); Ok(( NetworkClient { retry_ttl: self.config.client_retry_ttl, local_peer_id: peer_id, sender: command_sender, grpc_over_p2p, shutdown_channel, }, ReceiverStream::new(event_receiver), Runtime { swarm, config: self.config, peer_set: self.known_peers.iter().map(|(p, _)| *p).collect(), boot_peers: self.known_peers.iter().map(|(p, _)| *p).collect(), command_receiver, event_sender, local_peer_id: peer_id, listening_on: listen_addr, public_addresses, active_listeners: HashSet::new(), pending_record_requests: HashMap::new(), shutdown, health_state: crate::runtime::HealthState { bootnode_connection_retries: 3, successfully_connected_to_bootnode: if self.known_peers.is_empty() { // Node seems to be a boot node Some(ConnectionId::new_unchecked(0)) } else { None }, ..Default::default() }, health_status: HealthStatus::Initializing, }, )) } } ================================================ FILE: crates/topos-p2p/src/runtime/handle_command.rs ================================================ use crate::{ error::{CommandExecutionError, P2PError}, protocol_name, Command, Runtime, }; use rand::{thread_rng, Rng}; use topos_metrics::P2P_MESSAGE_SENT_ON_GOSSIPSUB_TOTAL; use tracing::{debug, error, warn}; impl Runtime { pub(crate) async fn handle_command(&mut self, command: Command) { match command { Command::NewProxiedQuery { peer, id, response, protocol, } => { let connection = self .swarm .behaviour_mut() .grpc .open_outbound_connection(&peer, protocol_name!(protocol)); _ = response.send(connection); } Command::ConnectedPeers { sender } => { if sender .send(Ok(self .swarm .connected_peers() .cloned() .collect::>())) .is_err() { warn!("Unable to notify ConnectedPeers response: initiator is dropped"); } } Command::RandomKnownPeer { sender } => { if self.peer_set.is_empty() { let _ = sender.send(Err(P2PError::CommandError( CommandExecutionError::NoKnownPeer, ))); return; } let selected_peer: usize = thread_rng().gen_range(0..(self.peer_set.len())); if sender .send( self.peer_set .iter() .nth(selected_peer) .cloned() .ok_or(P2PError::CommandError(CommandExecutionError::NoKnownPeer)), ) .is_err() { warn!("Unable to notify RandomKnownPeer response: initiator is dropped"); } } Command::Gossip { topic, data: message, } => match self.swarm.behaviour_mut().gossipsub.publish(topic, message) { Ok(message_id) => { debug!("Published message to {topic}"); P2P_MESSAGE_SENT_ON_GOSSIPSUB_TOTAL.inc(); } Err(err) => error!("Failed to publish message to {topic}: {err}"), }, } } } ================================================ FILE: crates/topos-p2p/src/runtime/handle_event/discovery.rs ================================================ use libp2p::kad::{BootstrapOk, BootstrapResult, Event, QueryResult}; use tracing::{debug, error, info, warn}; use crate::{behaviour::HealthStatus, error::P2PError, Runtime}; use super::{EventHandler, EventResult}; #[async_trait::async_trait] impl EventHandler> for Runtime { async fn handle(&mut self, event: Box) -> EventResult { match *event { Event::InboundRequest { request } => { // warn!("InboundRequest {:?}", request); } Event::RoutingUpdated { peer, addresses, .. } => { debug!("DHT -> RoutingUpdated {:?} {:?}", peer, addresses); } Event::RoutablePeer { peer, address } => { debug!("DHT -> RoutablePeer {:?}, {:?}", peer, address); } Event::PendingRoutablePeer { peer, address } => { debug!("DHT -> PendingRoutablePeer {:?}, {:?}", peer, address); } Event::UnroutablePeer { peer } => { // Ignored } Event::OutboundQueryProgressed { id, result: QueryResult::Bootstrap(BootstrapResult::Ok(BootstrapOk { peer, num_remaining, })), stats, step, } if num_remaining == 0 && self.swarm.behaviour().discovery.health_status == HealthStatus::Initializing => { if self .health_state .successfully_connected_to_bootnode .is_none() { warn!( "Bootstrap query finished but unable to connect to bootnode during \ initialization, switching from discovery(initializing) -> \ discover(unhealthy) and fast bootstrap mode", ); let behaviour = self.swarm.behaviour_mut(); behaviour.discovery.health_status = HealthStatus::Unhealthy; _ = behaviour .discovery .change_interval(self.config.discovery.fast_bootstrap_interval) .await; } else { warn!( "Bootstrap query finished with bootnode, switching from \ discovery(initializing) -> discovery(healthy)", ); let behaviour = self.swarm.behaviour_mut(); behaviour.discovery.health_status = HealthStatus::Healthy; } } Event::OutboundQueryProgressed { id, result: QueryResult::Bootstrap(BootstrapResult::Ok(BootstrapOk { peer, num_remaining, })), stats, step, } if num_remaining == 0 && self .health_state .successfully_connected_to_bootnode .is_none() && self.swarm.behaviour().discovery.health_status == HealthStatus::Unhealthy => { match self.health_state.bootnode_connection_retries.checked_sub(1) { None => { error!( "Bootstrap query finished but unable to connect to bootnode, stopping" ); return Err(P2PError::UnableToReachBootnode); } Some(new) => { warn!( "Bootstrap query finished but unable to connect to bootnode, retrying \ {} more times", new ); self.health_state.bootnode_connection_retries = new; } } } Event::OutboundQueryProgressed { id, result: QueryResult::Bootstrap(BootstrapResult::Ok(BootstrapOk { peer, num_remaining, })), stats, step, } if num_remaining == 0 && self .health_state .successfully_connected_to_bootnode .is_some() && self.swarm.behaviour().discovery.health_status == HealthStatus::Unhealthy => { info!( "Bootstrap query finished with bootnode, switching discover(unhealthy) -> \ discover(healthy) and normal bootstrap mode", ); let behaviour = self.swarm.behaviour_mut(); behaviour.discovery.health_status = HealthStatus::Healthy; _ = behaviour .discovery .change_interval(self.config.discovery.bootstrap_interval) .await; } Event::OutboundQueryProgressed { result: QueryResult::Bootstrap(res), id, .. } => { debug!("BootstrapResult query: {id:?}, {res:?}"); } Event::OutboundQueryProgressed { id, result, stats, .. } => {} Event::ModeChanged { new_mode } => {} } Ok(()) } } ================================================ FILE: crates/topos-p2p/src/runtime/handle_event/gossipsub.rs ================================================ use topos_metrics::{ P2P_EVENT_STREAM_CAPACITY_TOTAL, P2P_MESSAGE_DESERIALIZE_FAILURE_TOTAL, P2P_MESSAGE_RECEIVED_ON_ECHO_TOTAL, P2P_MESSAGE_RECEIVED_ON_GOSSIP_TOTAL, P2P_MESSAGE_RECEIVED_ON_READY_TOTAL, }; use tracing::{debug, error}; use crate::{constants, event::GossipEvent, Event, Runtime, TOPOS_ECHO, TOPOS_GOSSIP, TOPOS_READY}; use prost::Message; use topos_core::api::grpc::tce::v1::Batch; use super::{EventHandler, EventResult}; #[async_trait::async_trait] impl EventHandler for Runtime { async fn handle(&mut self, event: GossipEvent) -> EventResult { if let GossipEvent::Message { source: Some(source), message, topic, } = event { if self.event_sender.capacity() < *constants::CAPACITY_EVENT_STREAM_BUFFER { P2P_EVENT_STREAM_CAPACITY_TOTAL.inc(); } debug!("Received message from {:?} on topic {:?}", source, topic); match topic { TOPOS_GOSSIP => { P2P_MESSAGE_RECEIVED_ON_GOSSIP_TOTAL.inc(); if let Err(e) = self .event_sender .send(Event::Gossip { from: source, data: message, }) .await { error!("Failed to send gossip event to runtime: {:?}", e); } } TOPOS_ECHO | TOPOS_READY => { if topic == TOPOS_ECHO { P2P_MESSAGE_RECEIVED_ON_ECHO_TOTAL.inc(); } else { P2P_MESSAGE_RECEIVED_ON_READY_TOTAL.inc(); } if let Ok(Batch { messages }) = Batch::decode(&message[..]) { for message in messages { if let Err(e) = self .event_sender .send(Event::Gossip { from: source, data: message, }) .await { error!("Failed to send gossip {} event to runtime: {:?}", topic, e); } } } else { P2P_MESSAGE_DESERIALIZE_FAILURE_TOTAL .with_label_values(&[topic]) .inc(); } } _ => { error!("Received message on unknown topic {:?}", topic); } } } Ok(()) } } ================================================ FILE: crates/topos-p2p/src/runtime/handle_event/grpc.rs ================================================ use tracing::debug; use crate::{behaviour::grpc, Runtime}; use super::{EventHandler, EventResult}; #[async_trait::async_trait] impl EventHandler for Runtime { async fn handle(&mut self, event: grpc::Event) -> EventResult { match event { grpc::Event::OutboundFailure { peer_id, request_id, error, } => { debug!( "Outbound connection failure to peer {} for request {}: {}", peer_id, request_id, error ); } grpc::Event::OutboundSuccess { peer_id, request_id, .. } => { debug!( "Outbound connection success to peer {} for request {}", peer_id, request_id ); } grpc::Event::InboundNegotiatedConnection { request_id, connection_id, } => { debug!( "Inbound connection negotiated for request {} with connection {}", request_id, connection_id ); } grpc::Event::OutboundNegotiatedConnection { peer_id, request_id, } => { debug!( "Outbound connection negotiated to peer {} for request {}", peer_id, request_id ); } } Ok(()) } } ================================================ FILE: crates/topos-p2p/src/runtime/handle_event/peer_info.rs ================================================ use ip_network::IpNetwork; use libp2p::{ identify::{Event as IdentifyEvent, Info as IdentifyInfo}, multiaddr::Protocol, Multiaddr, }; use tracing::info; use crate::{constants::PEER_INFO_PROTOCOL, Runtime}; use super::{EventHandler, EventResult}; #[async_trait::async_trait] impl EventHandler> for Runtime { async fn handle(&mut self, event: Box) -> EventResult { if let IdentifyEvent::Received { peer_id, info, .. } = *event { let IdentifyInfo { protocol_version, listen_addrs, protocols, observed_addr, .. } = info; if !self.peer_set.contains(&peer_id) && protocol_version.as_bytes() == PEER_INFO_PROTOCOL.as_bytes() { self.peer_set.insert(peer_id); for addr in listen_addrs { if self.config.allow_private_ip || is_global_addr(&addr) { info!( "Adding self-reported address {} from {} to Kademlia DHT.", addr, peer_id ); self.swarm .behaviour_mut() .discovery .inner .add_address(&peer_id, addr); } } } } Ok(()) } } pub fn is_global_addr(addr: &Multiaddr) -> bool { match addr.iter().next() { Some(Protocol::Dns(_)) | Some(Protocol::Dns4(_)) | Some(Protocol::Dns6(_)) => true, Some(Protocol::Ip4(ip)) => IpNetwork::from(ip).is_global(), Some(Protocol::Ip6(ip)) => IpNetwork::from(ip).is_global(), _ => false, } } ================================================ FILE: crates/topos-p2p/src/runtime/handle_event.rs ================================================ use libp2p::{core::Endpoint, multiaddr::Protocol, swarm::SwarmEvent}; use tracing::{debug, error, info, warn}; use crate::{error::P2PError, event::ComposedEvent, Event, Runtime}; mod discovery; mod gossipsub; mod grpc; mod peer_info; pub type EventResult = Result<(), P2PError>; #[async_trait::async_trait] pub(crate) trait EventHandler { async fn handle(&mut self, event: T) -> EventResult; } #[async_trait::async_trait] impl EventHandler for Runtime { async fn handle(&mut self, event: Event) -> EventResult { if let Err(error) = self.event_sender.try_send(event) { warn!(reason = %error, "Unable to send NetworkEvent event to outer stream"); } Ok(()) } } #[async_trait::async_trait] impl EventHandler for Runtime { async fn handle(&mut self, event: ComposedEvent) -> EventResult { match event { ComposedEvent::Kademlia(event) => self.handle(event).await, ComposedEvent::PeerInfo(event) => self.handle(event).await, ComposedEvent::Gossipsub(event) => self.handle(event).await, ComposedEvent::Grpc(event) => self.handle(event).await, ComposedEvent::Void => Ok(()), } } } #[async_trait::async_trait] impl EventHandler> for Runtime { async fn handle(&mut self, event: SwarmEvent) -> EventResult { match event { SwarmEvent::NewListenAddr { listener_id, address, .. } => { info!( "Local node is listening on {:?}", address.with(Protocol::P2p(self.local_peer_id)), ); self.active_listeners.insert(listener_id); } SwarmEvent::OutgoingConnectionError { connection_id, peer_id: Some(peer_id), error, } if self .health_state .successfully_connected_to_bootnode .is_none() && self.health_state.dialed_bootnode.contains(&connection_id) => { warn!("Unable to connect to bootnode {peer_id}: {error:?}"); self.health_state.dialed_bootnode.remove(&connection_id); if self.health_state.dialed_bootnode.is_empty() { // We tried to connect to all bootnode without success error!("Unable to connect to any bootnode"); } } SwarmEvent::OutgoingConnectionError { peer_id, error, connection_id, } => { if let Some(peer_id) = peer_id { error!( "OutgoingConnectionError peer_id: {peer_id} | error: {error:?} | \ connection_id: {connection_id}" ); } else { error!( "OutgoingConnectionError for unknown peer | error: {error:?} | \ connection_id: {connection_id}" ); error!("OutgoingConnectionError {error:?}"); } } SwarmEvent::ConnectionEstablished { peer_id, connection_id, endpoint, num_established, concurrent_dial_errors, established_in, } if self.health_state.dialed_bootnode.contains(&connection_id) => { info!("Successfully connected to bootnode {peer_id}"); if self .health_state .successfully_connected_to_bootnode .is_none() { self.health_state.successfully_connected_to_bootnode = Some(connection_id); _ = self.health_state.dialed_bootnode.remove(&connection_id); } } SwarmEvent::ConnectionEstablished { peer_id, endpoint, connection_id, .. } => { if self .health_state .successfully_connected_to_bootnode .is_none() && self.boot_peers.contains(&peer_id) { info!( "Connection established with bootnode {peer_id} as {:?}", endpoint.to_endpoint() ); if endpoint.to_endpoint() == Endpoint::Listener { if let Err(error) = self.swarm.dial(peer_id) { error!( "Unable to dial bootnode {peer_id} after incoming connection: \ {error}" ); } } } else { info!( "Connection established with peer {peer_id} as {:?}", endpoint.to_endpoint() ); } if self.swarm.connected_peers().count() >= self.config.minimum_cluster_size { if let Err(error) = self.swarm.behaviour_mut().gossipsub.subscribe() { error!("Unable to subscribe to gossipsub topic: {}", error); return Err(P2PError::GossipTopicSubscriptionFailure); } } } incoming_connection_error @ SwarmEvent::IncomingConnectionError { .. } => { error!("{:?}", incoming_connection_error); } SwarmEvent::IncomingConnection { local_addr, connection_id, send_back_addr, } => { debug!( "IncomingConnection | local_addr: {local_addr} | connection_id: \ {connection_id} | send_back_addr: {send_back_addr}" ) } SwarmEvent::ListenerClosed { listener_id, addresses, reason, } => { debug!( "ListenerClosed {:?}: listener_id{listener_id:?} | addresses: {addresses:?} | \ reason: {reason:?}", *self.swarm.local_peer_id() ); } SwarmEvent::ConnectionClosed { peer_id, cause, .. } => { debug!("ConnectionClosed {peer_id} because of {cause:?}"); } SwarmEvent::Dialing { peer_id: Some(ref peer_id), connection_id, } if self.boot_peers.contains(peer_id) => { info!("Dialing bootnode {peer_id} on connection: {connection_id}"); self.health_state.dialed_bootnode.insert(connection_id); } SwarmEvent::Dialing { peer_id, connection_id, } => { debug!("Dialing peer_id: {peer_id:?} | connection_id: {connection_id}"); } SwarmEvent::Behaviour(event) => self.handle(event).await?, SwarmEvent::ExpiredListenAddr { listener_id, address, } => error!("Unhandled ExpiredListenAddr {listener_id:?} | {address}"), SwarmEvent::ListenerError { listener_id, error } => { error!("Unhandled ListenerError {listener_id:?} | {error}") } event => { warn!("Unhandled SwarmEvent: {:?}", event); } } let behaviour = self.swarm.behaviour(); if let Some(event) = self.healthy_status_changed() { debug!("Healthy status changed: {:?}", event); _ = self.event_sender.send(event).await; } info!("Healthystatus: {:?}", self.health_status); Ok(()) } } ================================================ FILE: crates/topos-p2p/src/runtime/mod.rs ================================================ use std::collections::{HashMap, HashSet}; use crate::{ behaviour::{discovery::PendingRecordRequest, HealthStatus}, config::NetworkConfig, error::P2PError, runtime::handle_event::EventHandler, Behaviour, Command, Event, }; use libp2p::{ core::transport::ListenerId, kad::QueryId, swarm::ConnectionId, Multiaddr, PeerId, Swarm, }; use tokio::{ spawn, sync::{mpsc, oneshot}, task::JoinHandle, }; use tokio_stream::{Stream, StreamExt}; use tracing::{debug, error, info, Instrument}; pub struct Runtime { pub(crate) config: NetworkConfig, // TODO: check if needed pub(crate) peer_set: HashSet, pub(crate) swarm: Swarm, pub(crate) command_receiver: mpsc::Receiver, pub(crate) event_sender: mpsc::Sender, pub(crate) local_peer_id: PeerId, pub(crate) listening_on: Vec, pub(crate) public_addresses: Vec, /// Well-known or pre-configured bootnodes to connect to in order to bootstrap the p2p layer pub(crate) boot_peers: Vec, /// Contains current listenerId of the swarm pub active_listeners: HashSet, /// Pending DHT queries pub pending_record_requests: HashMap, /// Shutdown signal receiver from the client pub(crate) shutdown: mpsc::Receiver>, /// Internal health state of the p2p layer pub(crate) health_state: HealthState, /// Health status of the p2p layer pub(crate) health_status: HealthStatus, } mod handle_command; mod handle_event; /// Internal health state of the p2p layer /// /// This struct may change in the future to be more flexible and to handle more /// complex state transitions/representation. #[derive(Default)] pub(crate) struct HealthState { /// Indicates if the node has external addresses configured pub(crate) has_external_addresses: bool, /// Indicates if the node is listening on any address pub(crate) is_listening: bool, /// List the bootnodes that the node has tried to connect to pub(crate) dialed_bootnode: HashSet, /// Indicates if the node has successfully connected to a bootnode pub(crate) successfully_connected_to_bootnode: Option, /// Track the number of remaining retries to connect to any bootnode pub(crate) bootnode_connection_retries: usize, } impl Runtime { /// Bootstrap the p2p layer runtime with the given configuration. /// This method will configure, launch and start queries. /// The result of this call is a p2p layer bootstrap but it doesn't mean it is /// ready. pub async fn bootstrap + Unpin + Send>( mut self, event_stream: &mut S, ) -> Result>, P2PError> { debug!("Added public addresses: {:?}", self.public_addresses); for address in &self.public_addresses { self.swarm.add_external_address(address.clone()); self.health_state.has_external_addresses = true; } debug!("Starting to listen on {:?}", self.listening_on); for addr in &self.listening_on { if let Err(error) = self.swarm.listen_on(addr.clone()) { error!("Couldn't start listening on {} because of {error:?}", addr); return Err(P2PError::TransportError(error)); } self.health_state.is_listening = true; } let mut handle = spawn(self.run().in_current_span()); // Await the Event::Healthy coming from freshly started p2p layer loop { tokio::select! { result = &mut handle => { match result { Ok(Ok(_)) => info!("P2P layer has been shutdown"), Ok(Err(error)) => { error!("P2P layer has failed with error: {:?}", error); return Err(error); } Err(_) => { error!("P2P layer has failed in an unexpected way."); return Err(P2PError::JoinHandleFailure); } } } Some(event) = event_stream.next() => { if let Event::Healthy = event { info!("P2P layer is healthy"); break; } } } } Ok(handle) } /// Run p2p runtime pub async fn run(mut self) -> Result<(), P2PError> { let shutdowned: Option> = loop { tokio::select! { Some(event) = self.swarm.next() => { self.handle(event).in_current_span().await? }, Some(command) = self.command_receiver.recv() => self.handle_command(command).in_current_span().await, shutdown = self.shutdown.recv() => { break shutdown; } } }; if let Some(sender) = shutdowned { info!("Shutting down p2p runtime..."); _ = sender.send(()); } Ok(()) } pub(crate) fn healthy_status_changed(&mut self) -> Option { let behaviours = self.swarm.behaviour(); let gossipsub = &behaviours.gossipsub.health_status; let discovery = &behaviours.discovery.health_status; let new_status = match (discovery, gossipsub) { (HealthStatus::Killing, _) | (_, HealthStatus::Killing) => HealthStatus::Killing, (HealthStatus::Initializing, _) | (_, HealthStatus::Initializing) => { HealthStatus::Initializing } (HealthStatus::Unhealthy, _) | (_, HealthStatus::Unhealthy) => HealthStatus::Unhealthy, (HealthStatus::Recovering, _) | (_, HealthStatus::Recovering) => { HealthStatus::Recovering } (HealthStatus::Healthy, HealthStatus::Healthy) => HealthStatus::Healthy, }; if self.health_status != new_status { self.health_status = new_status; Some((&self.health_status).into()) } else { None } } } ================================================ FILE: crates/topos-p2p/src/tests/behaviour/grpc.rs ================================================ use std::{collections::HashSet, future::IntoFuture, time::Duration}; use libp2p::Swarm; use libp2p_swarm_test::SwarmExt; use rstest::rstest; use test_log::test; use tokio::spawn; use tokio_util::sync::CancellationToken; use tonic::server::NamedService; use tonic::transport::Server; use topos_test_sdk::grpc::{ behaviour::{ helloworld::{ greeter_client::GreeterClient, greeter_server::GreeterServer, HelloRequest, HelloWithDelayRequest, }, noop::noop_server::NoopServer, }, implementations::{self, DummyServer}, }; use crate::{ behaviour::grpc::{ self, error::{OutboundConnectionError, OutboundError}, }, protocol_name, GrpcContext, GrpcRouter, }; #[rstest] #[test(tokio::test)] #[timeout(Duration::from_secs(5))] async fn instantiate_grpc() { let dummy = DummyServer {}; let router = GrpcContext::default() .with_router(GrpcRouter::new(Server::builder()).add_service(GreeterServer::new(dummy))); let client_protocols = { let mut protocols = HashSet::new(); protocols.insert(protocol_name!(GreeterServer::::NAME)); protocols }; let mut client_swarm = Swarm::new_ephemeral(|_| grpc::Behaviour::new(GrpcContext::default())); let mut server_swarm = Swarm::new_ephemeral(|_| grpc::Behaviour::new(router)); let server_peer_id = *server_swarm.local_peer_id(); server_swarm.listen().await; let server_address = server_swarm.listeners().next().unwrap(); client_swarm .behaviour_mut() .add_address(&server_peer_id, server_address.clone()); let outbound_connection = client_swarm.behaviour_mut().open_outbound_connection( &server_peer_id, protocol_name!(GreeterServer::::NAME), ); let shutdown = CancellationToken::new(); let client_shutdown = shutdown.child_token(); let server_shutdown = shutdown.child_token(); let client_swarm = async move { loop { tokio::select! { event = client_swarm.next_swarm_event() => {} _ = client_shutdown.cancelled() => { return client_swarm; } } } }; let server_swarm = async move { loop { tokio::select! { _ = server_swarm.next_swarm_event() => {} _ = server_shutdown.cancelled() => { return server_swarm; } } } }; let server_swarm = spawn(server_swarm); let client_swarm = spawn(client_swarm); println!("Starting"); let connection = outbound_connection.into_future().await.unwrap(); println!("Stopping"); shutdown.cancel(); let server_swarm = server_swarm.await.unwrap(); let client_swarm = client_swarm.await.unwrap(); assert_eq!( server_swarm.connected_peers().collect::>(), vec![client_swarm.local_peer_id()] ); } #[test(tokio::test)] async fn opening_outbound_stream() {} #[test(tokio::test)] async fn opening_outbound_stream_half_close() {} #[test(tokio::test)] #[ignore = "TP-757: Need to find a way to properly close the connection after sending the query"] async fn closing_stream() { let dummy = DummyServer {}; let router = GrpcContext::default() .with_router(GrpcRouter::new(Server::builder()).add_service(GreeterServer::new(dummy))); let client_protocols = { let mut protocols = HashSet::new(); protocols.insert(protocol_name!(GreeterServer::::NAME)); protocols }; let mut client_swarm = Swarm::new_ephemeral(|_| grpc::Behaviour::new(GrpcContext::default())); let mut server_swarm = Swarm::new_ephemeral(|_| grpc::Behaviour::new(router)); let server_peer_id = *server_swarm.local_peer_id(); server_swarm.listen().await; let server_address = server_swarm.listeners().next().unwrap(); client_swarm .behaviour_mut() .add_address(&server_peer_id, server_address.clone()); let outbound_connection = client_swarm.behaviour_mut().open_outbound_connection( &server_peer_id, protocol_name!(GreeterServer::::NAME), ); let client_swarm = async move { loop { client_swarm.next_swarm_event().await; } }; let server_swarm = async move { loop { server_swarm.next_swarm_event().await; } }; spawn(server_swarm); spawn(client_swarm); let connection = outbound_connection.into_future().await.unwrap(); let mut client = GreeterClient::new(connection.channel); let result = client .say_hello_with_delay(HelloWithDelayRequest { name: "Simon".into(), delay_in_seconds: 10, }) .await .unwrap(); assert_eq!(result.into_inner().message, "Hello Simon"); } #[test(tokio::test)] async fn execute_query() { let dummy = DummyServer {}; let router = GrpcContext::default() .with_router(GrpcRouter::new(Server::builder()).add_service(GreeterServer::new(dummy))); let client_protocols = { let mut protocols = HashSet::new(); protocols.insert(protocol_name!(GreeterServer::::NAME)); protocols }; let mut client_swarm = Swarm::new_ephemeral(|_| grpc::Behaviour::new(GrpcContext::default())); let mut server_swarm = Swarm::new_ephemeral(|_| grpc::Behaviour::new(router)); let (multiaddr, _) = server_swarm.listen().await; let server_peer_id = *server_swarm.local_peer_id(); client_swarm .behaviour_mut() .add_address(&server_peer_id, multiaddr); let outbound_connection = client_swarm.behaviour_mut().open_outbound_connection( &server_peer_id, protocol_name!(GreeterServer::::NAME), ); let client_swarm = async move { loop { client_swarm.next_swarm_event().await; } }; let server_swarm = async move { loop { server_swarm.next_swarm_event().await; } }; spawn(server_swarm); spawn(client_swarm); let connection = outbound_connection.into_future().await.unwrap(); let mut client = GreeterClient::new(connection.channel); let result = client .say_hello(HelloRequest { name: "Simon".into(), }) .await .unwrap(); assert_eq!(result.into_inner().message, "Hello Simon"); } #[rstest] fn create_context_with_only_router() { let context = GrpcContext::default().with_router(GrpcRouter::new(Server::builder())); let (router, (inbound, outbound)) = context.into_parts(); assert!(router.is_some()); assert_eq!(inbound, outbound); } #[rstest] fn create_context_with_only_client() { let context = GrpcContext::default(); let (router, (inbound, outbound)) = context.into_parts(); assert!(router.is_none()); assert_eq!(inbound, outbound); } #[rstest] fn create_context_with_only_client_custom_protocol() { let context = GrpcContext::default().add_client_protocol("/custom"); let (router, (inbound, outbound)) = context.into_parts(); assert!(router.is_none()); assert_ne!(inbound, outbound); assert_eq!(outbound.into_iter().collect::>(), vec!["/custom"]); assert_eq!( inbound.into_iter().collect::>(), Vec::::new() ); } #[test(tokio::test)] async fn incompatible_protocol() { let dummy = DummyServer {}; let router = GrpcContext::default() .with_router(GrpcRouter::new(Server::builder()).add_service(GreeterServer::new(dummy))); let client_protocols = { let mut protocols = HashSet::new(); protocols.insert(protocol_name!( NoopServer::::NAME )); protocols }; let mut client_swarm = Swarm::new_ephemeral(|_| { grpc::Behaviour::new(GrpcContext::default().with_client_protocols(client_protocols)) }); let mut server_swarm = Swarm::new_ephemeral(|_| grpc::Behaviour::new(router)); let server_peer_id = *server_swarm.local_peer_id(); let (multiaddr, _) = server_swarm.listen().await; client_swarm .behaviour_mut() .add_address(&server_peer_id, multiaddr); let outbound_connection = client_swarm.behaviour_mut().open_outbound_connection( &server_peer_id, protocol_name!(NoopServer::::NAME), ); let client_swarm = async move { loop { client_swarm.next_swarm_event().await; } }; let server_swarm = async move { loop { server_swarm.next_swarm_event().await; } }; spawn(server_swarm); spawn(client_swarm); let result = outbound_connection.into_future().await; assert!(result.is_err()); assert!(matches!( result, Err(OutboundConnectionError::Outbound( OutboundError::UnsupportedProtocol(_) )) )); } ================================================ FILE: crates/topos-p2p/src/tests/behaviour/mod.rs ================================================ mod grpc; ================================================ FILE: crates/topos-p2p/src/tests/bootstrap.rs ================================================ use std::time::Duration; use futures::{future::join_all, FutureExt}; use rstest::rstest; use test_log::test; use topos_test_sdk::tce::NodeConfig; use tracing::Instrument; #[rstest] #[test(tokio::test)] #[timeout(Duration::from_secs(5))] async fn two_bootnode_communicating() { let bootnode = NodeConfig::memory(2); let local = NodeConfig::memory(1); let bootnode_known_peers = vec![(local.peer_id(), local.addr.clone())]; let local_known_peers = vec![(bootnode.peer_id(), bootnode.addr.clone())]; let mut handlers = Vec::new(); let context_local = tracing::info_span!("start_node", "peer_id" = local.peer_id().to_string()); let context_bootnode = tracing::info_span!("start_node", "peer_id" = bootnode.peer_id().to_string()); handlers.push( async move { let (client, mut stream, runtime) = crate::network::builder() .minimum_cluster_size(1) .peer_key(local.keypair.clone()) .listen_addresses(&[local.addr.clone()]) .known_peers(&local_known_peers) .memory() .build() .await .expect("Unable to create p2p network"); runtime.bootstrap(&mut stream).await } .instrument(context_local) .boxed(), ); handlers.push( async move { let (client, mut stream, runtime) = crate::network::builder() .minimum_cluster_size(1) .peer_key(bootnode.keypair.clone()) .listen_addresses(&[bootnode.addr.clone()]) .known_peers(&bootnode_known_peers) .memory() .build() .await .expect("Unable to create p2p network"); runtime.bootstrap(&mut stream).await } .instrument(context_bootnode) .boxed(), ); assert!(join_all(handlers).await.iter().all(Result::is_ok)); } ================================================ FILE: crates/topos-p2p/src/tests/command/mod.rs ================================================ mod random_peer; ================================================ FILE: crates/topos-p2p/src/tests/command/random_peer.rs ================================================ use std::time::Duration; use rstest::rstest; use test_log::test; use tokio::spawn; use topos_test_sdk::tce::NodeConfig; use crate::error::P2PError; #[rstest] #[test(tokio::test)] #[timeout(Duration::from_secs(5))] async fn no_random_peer() { let local = NodeConfig::from_seed(1); let (client, stream, runtime) = crate::network::builder() .minimum_cluster_size(0) .peer_key(local.keypair.clone()) .public_addresses(&[local.addr.clone()]) .listen_addresses(&[local.addr.clone()]) .public_addresses(vec![local.addr.clone()]) .listen_addresses(vec![local.addr.clone()]) .build() .await .expect("Unable to create p2p network"); tokio::spawn(runtime.run()); let result = client.random_known_peer().await; assert!(result.is_err()); assert!(matches!( result, Err(P2PError::CommandError( crate::error::CommandExecutionError::NoKnownPeer )) )); } #[rstest] #[test(tokio::test)] #[timeout(Duration::from_secs(5))] async fn return_a_peer() { let local = NodeConfig::from_seed(1); let expected = NodeConfig::from_seed(2); let expected_peer_id = expected.keypair.public().to_peer_id(); let (client, stream, mut runtime) = crate::network::builder() .minimum_cluster_size(0) .peer_key(local.keypair.clone()) .public_addresses(vec![local.addr.clone()]) .listen_addresses(vec![local.addr.clone()]) .build() .await .expect("Unable to create p2p network"); runtime.peer_set.insert(expected_peer_id); spawn(runtime.run()); let result = client.random_known_peer().await; assert!(result.is_ok()); assert!(matches!( result, Ok(peer) if peer == expected_peer_id )); } #[rstest] #[test(tokio::test)] #[timeout(Duration::from_secs(5))] async fn return_a_random_peer_among_100() { let local = NodeConfig::from_seed(1); let (client, stream, mut runtime) = crate::network::builder() .minimum_cluster_size(0) .peer_key(local.keypair.clone()) .public_addresses(vec![local.addr.clone()]) .listen_addresses(vec![local.addr.clone()]) .build() .await .expect("Unable to create p2p network"); for i in 2..=100 { let peer = NodeConfig::from_seed(i); runtime.peer_set.insert(peer.keypair.public().to_peer_id()); } spawn(runtime.run()); let first_try = client.random_known_peer().await.unwrap(); let second_try = client.random_known_peer().await.unwrap(); let third_try = client.random_known_peer().await.unwrap(); assert!(first_try != second_try); assert!(first_try != third_try); } ================================================ FILE: crates/topos-p2p/src/tests/mod.rs ================================================ mod behaviour; mod bootstrap; mod command; mod support; ================================================ FILE: crates/topos-p2p/src/tests/support/macros.rs ================================================ #[macro_export] macro_rules! wait_for_event { ($node:ident, matches: $(|)? $( $pattern:pat_param )|+ $( if $guard: expr )? $(,)?) => { let assertion = async { while let Some(event) = $node.next().await { if matches!(event, $( $pattern )|+ $( if $guard )?) { break; } } }; if let Err(_) = tokio::time::timeout(std::time::Duration::from_millis(100), assertion).await { panic!("Timeout waiting for event"); } }; } ================================================ FILE: crates/topos-p2p/src/tests/support/mod.rs ================================================ use libp2p::{ identity::{self, Keypair}, Multiaddr, PeerId, }; use rstest::fixture; use tokio::spawn; use topos_test_sdk::networking::get_available_port; use crate::{network::NetworkBuilder, NetworkClient, Runtime}; pub mod macros; pub type PeerAddr = (PeerId, Multiaddr); #[fixture] pub async fn dummy_peer() -> (NetworkClient, PeerAddr) { let (key, addr_dummy) = local_peer(1); let dummy_peer = (key.public().to_peer_id(), addr_dummy.clone()); let (client, _stream, runtime): (_, _, Runtime) = NetworkBuilder::default() .peer_key(key) .listen_addresses(vec![addr_dummy.clone()]) .public_addresses(vec![addr_dummy]) .build() .await .unwrap(); spawn(runtime.run()); (client, dummy_peer) } pub fn keypair_from_byte(seed: u8) -> Keypair { let mut bytes = [0u8; 32]; bytes[0] = seed; identity::Keypair::ed25519_from_bytes(bytes).expect("Invalid keypair") } pub fn local_peer(peer_index: u8) -> (Keypair, Multiaddr) { let peer_id: Keypair = keypair_from_byte(peer_index); let port = get_available_port(); let local_listen_addr: Multiaddr = format!("/ip4/127.0.0.1/tcp/{port}").parse().unwrap(); (peer_id, local_listen_addr) } ================================================ FILE: crates/topos-p2p/tests/support/network.rs ================================================ use futures::{Stream, StreamExt}; use libp2p::{identity::Keypair, Multiaddr, PeerId}; use tokio::spawn; use topos_p2p::{network, Client}; pub use topos_test_sdk::p2p::local_peer; pub async fn start_node( (peer_key, _, peer_addr): (Keypair, u16, Multiaddr), known_peers: Vec<(PeerId, Multiaddr)>, ) -> TestNodeContext { let peer_id = peer_key.public().to_peer_id(); let (client, event_stream, event_loop) = network::builder() .peer_key(peer_key) .known_peers(&known_peers) .listen_addr(peer_addr.clone()) .exposed_addresses(peer_addr.clone()) .build() .await .unwrap(); spawn(event_loop.run()); let _ = client.start_listening(peer_addr.clone()).await; TestNodeContext { peer_id, client, stream: Box::new(event_stream), } } pub struct TestNodeContext { pub(crate) peer_id: PeerId, pub(crate) client: Client, stream: Box + Unpin + Send>, } impl TestNodeContext { pub(crate) async fn next_event(&mut self) -> Option { self.stream.next().await } } ================================================ FILE: crates/topos-sequencer/Cargo.toml ================================================ [package] name = "topos-sequencer" description = "Implementation of the Topos protocol" version = "0.1.0" edition = "2021" [lints] workspace = true [dependencies] hex.workspace = true serde = { workspace = true, features = ["derive"] } tokio = { workspace = true, features = ["full"] } tokio-util.workspace = true tracing-subscriber = {workspace = true, features = ["fmt", "std", "env-filter",]} tracing.workspace = true tracing-opentelemetry.workspace = true opentelemetry.workspace = true topos-crypto.workspace = true topos-wallet = { path = "../topos-wallet" } topos-core = { workspace = true, features = ["uci"] } topos-sequencer-subnet-runtime = { package = "topos-sequencer-subnet-runtime", path = "../topos-sequencer-subnet-runtime" } topos-tce-proxy = { package = "topos-tce-proxy", path = "../topos-tce-proxy" } ================================================ FILE: crates/topos-sequencer/src/app_context.rs ================================================ //! //! Application logic glue //! use crate::SequencerConfiguration; use opentelemetry::trace::FutureExt; use tokio::sync::mpsc; use tokio_util::sync::CancellationToken; use topos_sequencer_subnet_runtime::proxy::{SubnetRuntimeProxyCommand, SubnetRuntimeProxyEvent}; use topos_sequencer_subnet_runtime::SubnetRuntimeProxyWorker; use topos_tce_proxy::{worker::TceProxyWorker, TceProxyCommand, TceProxyEvent}; use tracing::{debug, error, info, info_span, warn, Instrument, Span}; use tracing_opentelemetry::OpenTelemetrySpanExt; /// Top-level transducer sequencer app context & driver (alike) /// /// Implements <...Host> traits for network and Api, listens for protocol events in events /// (store is not active component). /// /// In the end we shall come to design where this struct receives /// config+data as input and runs app returning data as output /// pub struct AppContext { pub config: SequencerConfiguration, pub subnet_runtime_proxy_worker: SubnetRuntimeProxyWorker, pub tce_proxy_worker: TceProxyWorker, } pub enum AppContextStatus { Finished, Restarting, } impl AppContext { /// Factory pub fn new( config: SequencerConfiguration, runtime_proxy_worker: SubnetRuntimeProxyWorker, tce_proxy_worker: TceProxyWorker, ) -> Self { Self { config, subnet_runtime_proxy_worker: runtime_proxy_worker, tce_proxy_worker, } } /// Main processing loop pub(crate) async fn run( &mut self, shutdown: (CancellationToken, mpsc::Sender<()>), ) -> AppContextStatus { loop { tokio::select! { // Subnet event handling Ok(evt) = self.subnet_runtime_proxy_worker.next_event() => { debug!("runtime_proxy_worker.next_event(): {:?}", &evt); self.on_subnet_runtime_proxy_event(evt).await; }, // TCE event handling Ok(tce_evt) = self.tce_proxy_worker.next_event() => { debug!("tce_proxy_worker.next_event(): {:?}", &tce_evt); match tce_evt { TceProxyEvent::TceServiceFailure | TceProxyEvent::WatchCertificatesChannelFailed => { // Unrecoverable failure in interaction with the TCE. Sequencer needs to be restarted error!( "Unrecoverable failure in sequencer <-> tce interaction. Shutting down sequencer \ sequencer..." ); if let Err(e) = self.shutdown().await { warn!("Failed to shutdown: {e:?}"); } info!("Shutdown finished, restarting sequencer..."); return AppContextStatus::Restarting; }, _ => self.on_tce_proxy_event(tce_evt).await, } }, // Shutdown signal _ = shutdown.0.cancelled() => { info!("Shutting down Sequencer app context..."); if let Err(e) = self.shutdown().await { error!("Failed to shutdown the Sequencer app context: {e}"); } // Drop the sender to notify the Sequencer termination drop(shutdown.1); return AppContextStatus::Finished; } } } } async fn on_subnet_runtime_proxy_event(&mut self, evt: SubnetRuntimeProxyEvent) { debug!("on_subnet_runtime_proxy_event : {:?}", &evt); match evt { SubnetRuntimeProxyEvent::NewCertificate { cert, block_number: _, ctx, } => { let span = info_span!("Sequencer app context"); span.set_parent(ctx); if let Err(e) = self .tce_proxy_worker .send_command(TceProxyCommand::SubmitCertificate { cert, ctx: span.context(), }) .with_context(span.context()) .instrument(span) .await { error!("Unable to send tce proxy command {e}"); } } SubnetRuntimeProxyEvent::NewEra(_authorities) => { todo!() } } } async fn on_tce_proxy_event(&mut self, evt: TceProxyEvent) { if let TceProxyEvent::NewDeliveredCerts { certificates, ctx } = evt { let span = info_span!("Sequencer app context"); span.set_parent(ctx); async { // New certificates acquired from TCE for (cert, cert_position) in certificates { self.subnet_runtime_proxy_worker .eval(SubnetRuntimeProxyCommand::OnNewDeliveredCertificate { certificate: cert, position: cert_position, ctx: Span::current().context(), }) .await .expect("Propagate new delivered Certificate to the runtime"); } } .with_context(span.context()) .instrument(span) .await } } // Shutdown app async fn shutdown(&mut self) -> Result<(), Box> { self.tce_proxy_worker.shutdown().await?; self.subnet_runtime_proxy_worker.shutdown().await?; Ok(()) } } ================================================ FILE: crates/topos-sequencer/src/lib.rs ================================================ use crate::app_context::{AppContext, AppContextStatus}; use std::io::ErrorKind::InvalidInput; use std::process::ExitStatus; use tokio::{ spawn, sync::{ mpsc, oneshot::{self, Sender}, }, }; use tokio_util::sync::CancellationToken; use topos_core::uci::{CertificateId, SubnetId}; use topos_sequencer_subnet_runtime::{SubnetRuntimeProxyConfig, SubnetRuntimeProxyWorker}; use topos_tce_proxy::{worker::TceProxyWorker, TceProxyConfig}; use topos_wallet::SecretKey; use tracing::{debug, info, warn}; mod app_context; #[derive(Debug, Clone)] pub struct SequencerConfiguration { pub subnet_id: Option, pub public_key: Option>, pub subnet_jsonrpc_http: String, pub subnet_jsonrpc_ws: Option, pub subnet_contract_address: String, pub tce_grpc_endpoint: String, pub signing_key: SecretKey, pub verifier: u32, pub start_block: Option, } async fn launch_workers( config: SequencerConfiguration, ctx_send: Sender, subnet_id: SubnetId, ) -> Result<(), Box> { let (http_endpoint, mut ws_endpoint) = topos_sequencer_subnet_runtime::derive_endpoints(&config.subnet_jsonrpc_http)?; if let Some(config_ws_endpoint) = config.subnet_jsonrpc_ws.as_ref() { // Use explicitly provided websocket subnet endpoint ws_endpoint = config_ws_endpoint.clone(); } // Instantiate subnet runtime proxy, handling interaction with subnet node let subnet_runtime_proxy_worker = match SubnetRuntimeProxyWorker::new( SubnetRuntimeProxyConfig { subnet_id, http_endpoint, ws_endpoint, subnet_contract_address: config.subnet_contract_address.clone(), source_head_certificate_id: None, // Must be acquired later after TCE proxy is connected verifier: config.verifier, start_block: config.start_block, }, config.signing_key.clone(), ) .await { Ok(subnet_runtime_proxy) => subnet_runtime_proxy, Err(e) => { return Err(Box::new(e)); } }; // Get subnet checkpoints from subnet to pass them to the TCE node // It will retry using backoff algorithm, but if it fails (default max backoff elapsed time is 15 min) we can not proceed let target_subnet_stream_positions = match subnet_runtime_proxy_worker.get_checkpoints().await { Ok(checkpoints) => checkpoints, Err(e) => { return Err(Box::new(e)); } }; // Launch Tce proxy worker for handling interaction with TCE node // For initialization it will retry using backoff algorithm, but if it fails we can not proceed and we restart sequencer // Once it is initialized, TCE proxy will try reconnecting in the loop (with backoff) if TCE becomes unavailable let (tce_proxy_worker, source_head_certificate_id) = match TceProxyWorker::new(TceProxyConfig { subnet_id, tce_endpoint: config.tce_grpc_endpoint.clone(), positions: target_subnet_stream_positions, }) .await { Ok((tce_proxy_worker, mut source_head_certificate)) => { // FIXME: If TCE returns all zeros for the source head certificate, it means that it does not have // any information about the subnet. Until registration of the subnets with the topos subnet is implemented, // we get genesis block (and create genesis certificate) directly from the subnet block 0 if let Some((cert, _position)) = &mut source_head_certificate { if cert.id == CertificateId::default() { warn!( "Tce has not provided source head certificate, starting from subnet \ genesis block..." ); source_head_certificate = None; } } info!( "TCE proxy client is starting for the source subnet {:?} from the head {:?}", subnet_id, source_head_certificate ); let source_head_certificate_id = source_head_certificate.map(|(cert, position)| (cert.id, position)); (tce_proxy_worker, source_head_certificate_id) } Err(e) => { panic!("Unable to create TCE Proxy: {e}"); } }; // Set source head certificate to know from where to // start producing certificates if let Err(e) = subnet_runtime_proxy_worker .set_source_head_certificate_id(source_head_certificate_id) .await { panic!("Unable to set source head certificate id: {e}"); } let _ = ctx_send.send(AppContext::new( config, subnet_runtime_proxy_worker, tce_proxy_worker, )); Ok(()) } pub async fn launch( config: SequencerConfiguration, ctx_send: Sender, ) -> Result<(), Box> { debug!("Starting topos-sequencer application"); // If subnetID is specified as command line argument, use it let subnet_id: SubnetId = if let Some(pk) = &config.public_key { SubnetId::try_from(&pk[1..]).expect("Can parse public key into a SubnetID") } else if let Some(subnet_id) = &config.subnet_id { if &subnet_id[0..2] != "0x" { return Err(Box::new(std::io::Error::new( InvalidInput, "Subnet id must start with `0x`", ))); } hex::decode(&subnet_id[2..])?.as_slice().try_into()? } // Get subnet id from the subnet node if not provided via the command line argument // It will retry using backoff algorithm, but if it fails (default max backoff elapsed time is 15 min) we can not proceed else { let http_endpoint = topos_sequencer_subnet_runtime::derive_endpoints(&config.subnet_jsonrpc_http) .map_err(|e| { Box::new(std::io::Error::new( InvalidInput, format!("Invalid subnet endpoint: {e}"), )) })? .0; match SubnetRuntimeProxyWorker::get_subnet_id( &http_endpoint, config.subnet_contract_address.as_str(), ) .await { Ok(subnet_id) => { info!("Retrieved subnet id from the subnet node {subnet_id}"); subnet_id } Err(e) => { panic!("Unable to get subnet id from the subnet {e}"); } } }; launch_workers(config, ctx_send, subnet_id).await } pub async fn run( config: SequencerConfiguration, shutdown: (CancellationToken, mpsc::Sender<()>), ) -> Result> { loop { let shutdown_appcontext = shutdown.clone(); let (ctx_send, mut ctx_recv) = oneshot::channel::(); let config = config.clone(); let launching = spawn(async move { let _ = launch(config, ctx_send).await; }); let app_context: Option = tokio::select! { app = &mut ctx_recv => { match app { Ok(context) => Some(context), Err(e) => { info!("Application initialized with error: {e}"); None } } }, // Shutdown signal _ = shutdown.0.cancelled() => { info!("Stopping Sequencer launch..."); drop(shutdown.1); launching.abort(); return Ok(ExitStatus::default()); } }; if let Some(mut app) = app_context { match app.run(shutdown_appcontext).await { AppContextStatus::Restarting => { // We finish the loop, restarting sequencer here warn!("Restarting sequencer..."); tokio::time::sleep(tokio::time::Duration::from_secs(10)).await; } AppContextStatus::Finished => { info!("Sequencer app finished, exiting..."); return Ok(ExitStatus::default()); } } } else { warn!("Sequencer startup sequencer failed, restarting sequencer..."); tokio::time::sleep(tokio::time::Duration::from_secs(10)).await; } } } ================================================ FILE: crates/topos-sequencer-subnet-client/Cargo.toml ================================================ [package] name = "topos-sequencer-subnet-client" version = "0.1.0" edition = "2021" [lints] workspace = true [dependencies] hex.workspace = true serde_json.workspace = true thiserror.workspace = true tracing.workspace = true tokio.workspace = true backoff.workspace = true serde = { workspace = true, features = ["derive"] } tiny-keccak.workspace = true ethers.workspace = true ethers-providers = { version = "2.0.8", features = ["ws"] } rustc-hex = "2.1.0" topos-core = { workspace = true, features = ["uci", "api"] } [build-dependencies] ethers.workspace = true ================================================ FILE: crates/topos-sequencer-subnet-client/src/lib.rs ================================================ pub mod subnet_contract; use crate::subnet_contract::{create_topos_core_contract_from_json, get_block_events}; use ethers::abi::ethabi::ethereum_types::{H160, U256}; use ethers::core::k256::ecdsa::SigningKey; use ethers::signers::Wallet; use ethers::types::TransactionReceipt; use ethers::{ abi::Token, core::rand::thread_rng, middleware::SignerMiddleware, providers::{Http, Provider, ProviderError, StreamExt, Ws, WsClientError}, signers::{LocalWallet, Signer, WalletError}, }; use ethers_providers::{Middleware, SubscriptionStream}; use serde::{Deserialize, Serialize}; use std::sync::Arc; use std::time::Duration; use topos_core::api::grpc::checkpoints::TargetStreamPosition; pub use topos_core::uci::{ Address, Certificate, CertificateId, ReceiptsRootHash, StateRoot, SubnetId, TxRootHash, CERTIFICATE_ID_LENGTH, SUBNET_ID_LENGTH, }; use tracing::{error, info, warn}; const PUSH_CERTIFICATE_GAS_LIMIT: u64 = 1000000; // Maximum backoff retry timeout in seconds (12 hours) const SUBNET_CONNECT_BACKOFF_TIMEOUT: Duration = Duration::from_secs(12 * 3600); const SUBNET_GET_CHECKPOINTS_BACKOFF_TIMEOUT: Duration = Duration::from_secs(3600); const SUBNET_GET_SUBNET_ID_BACKOFF_TIMEOUT: Duration = Duration::from_secs(3600); pub type BlockData = Vec; pub type BlockNumber = u64; pub type Hash = String; /// Event collected from the sending subnet #[derive(Debug, Clone, Serialize, Deserialize)] pub enum SubnetEvent { CrossSubnetMessageSent { target_subnet_id: SubnetId, source_subnet_id: SubnetId, nonce: u64, }, } #[derive(Default, Debug, Clone, Serialize, Deserialize)] pub struct BlockInfo { /// hash of the block. pub hash: Hash, /// hash of the parent block. pub parent_hash: Hash, /// block's number. pub number: u64, /// state root pub state_root: StateRoot, /// tx root hash pub tx_root_hash: TxRootHash, /// receipts root hash pub receipts_root_hash: ReceiptsRootHash, /// Subnet events collected in this block pub events: Vec, } #[derive(Debug, thiserror::Error)] pub enum Error { #[error("new finalized block not available")] BlockNotAvailable(u64), #[error("next stream block is not available")] StreamBlockNotAvailable, #[error("invalid block number: {0}")] InvalidBlockNumber(u64), #[error("block number not available")] BlockNumberNotAvailable, #[error("failed mutable cast")] MutableCastFailed, #[error("json error: {source}")] JsonError { #[from] source: serde_json::Error, }, #[error("json parse error")] JsonParseError, #[error("invalid url: {0}")] InvalidUrl(String), #[error("hex data decoding error: {0}")] HexDecodingError(rustc_hex::FromHexError), #[error("ethers provider error: {0}")] EthersProviderError(ProviderError), #[error("ethereum contract error: {0}")] ContractError(String), #[error("event decoding error: {0}")] EventDecodingError(String), #[error("ethereum event error: {0}")] EventError(String), #[error("invalid argument: {message}")] InvalidArgument { message: String }, #[error("wallet error: {0}")] WalletError(WalletError), #[error("invalid secret key: {0}")] InvalidKey(String), #[error("error with signing ethereum transaction")] EthereumTxSignError, #[error("web socket client error: {0}")] WsClientError(WsClientError), #[error("input output error: {source}")] InputOutputError { #[from] source: std::io::Error, }, #[error("invalid certificate id")] InvalidCertificateId, #[error("invalid checkpoints data")] InvalidCheckpointsData, } // Subnet client for listening events from subnet node pub struct SubnetClientListener { contract: subnet_contract::IToposCore>, provider: Arc>, } impl SubnetClientListener { /// Initialize a new Subnet client pub async fn new(ws_subnet_endpoint: &str, contract_address: &str) -> Result { info!( "Connecting to subnet node at endpoint: {}", ws_subnet_endpoint ); let ws = Provider::::connect(ws_subnet_endpoint) .await .map_err(Error::EthersProviderError)?; let provider = Arc::new(ws); // Initialize Topos Core Contract from json abi let contract = create_topos_core_contract_from_json(contract_address, provider.clone())?; Ok(SubnetClientListener { contract, provider }) } pub async fn new_block_subscription_stream( &self, ) -> Result>, Error> { self.provider .subscribe_blocks() .await .map_err(Error::EthersProviderError) } /// Subscribe and listen to runtime finalized blocks pub async fn get_finalized_block( &mut self, next_block_number: u64, ) -> Result { let latest_subnet_block_number = self .provider .get_block_number() .await .map_err(Error::EthersProviderError)?; info!( "Finalized block number: next={} and latest={}", next_block_number, latest_subnet_block_number ); if latest_subnet_block_number.as_u64() < next_block_number { return Err(Error::BlockNotAvailable(next_block_number)); } let block = self .provider .get_block(next_block_number) .await .map_err(Error::EthersProviderError)? .ok_or(Error::InvalidBlockNumber(next_block_number))?; let block_number = block .number .ok_or(Error::InvalidBlockNumber(next_block_number))?; let events = match get_block_events(&self.contract, block_number).await { Ok(events) => events, Err(Error::EventDecodingError(e)) => { // FIXME: Happens in block before subnet contract is deployed, seems like bug in ethers warn!( "Error decoding events from block {}: {e}. Topos smart contracts may not be \ deployed before the parsed block?", block_number ); Vec::new() } Err(e) => { error!("Unable to parse events from block {}: {e}", block_number); return Err(e); } }; // Make block info result from all collected info let block_info = BlockInfo { hash: block.hash.unwrap_or_default().to_string(), parent_hash: block.parent_hash.to_string(), number: block_number.to_owned().as_u64(), state_root: block.state_root.0, tx_root_hash: block.transactions_root.0, receipts_root_hash: block.receipts_root.0, events, }; info!( "Fetched new finalized block from subnet: {:?}", block_info.number ); Ok(block_info) } /// Subscribe and listen to runtime finalized blocks pub async fn get_subnet_block_number(&mut self) -> Result { self.provider .get_block_number() .await .map(|block_number| block_number.as_u64()) .map_err(Error::EthersProviderError) } pub async fn wait_for_new_block( &self, stream: &mut SubscriptionStream<'_, Ws, ethers::types::Block>, ) -> Result { if let Some(block) = stream.next().await { let block_number = block.number.ok_or(Error::BlockNumberNotAvailable)?; let events = match get_block_events(&self.contract, block_number).await { Ok(events) => events, Err(Error::EventDecodingError(e)) => { // FIXME: Happens in block before subnet contract is deployed, seems like bug in ethers warn!( "Error decoding events from block {}: {e}. Topos smart contracts may not \ be deployed before the parsed block?", block_number ); Vec::new() } Err(e) => { error!("Unable to parse events from block {}: {e}", block_number); return Err(e); } }; // Make block info result from all collected info let block_info = BlockInfo { hash: block.hash.unwrap_or_default().to_string(), parent_hash: block.parent_hash.to_string(), number: block_number.to_owned().as_u64(), state_root: block.state_root.0, tx_root_hash: block.transactions_root.0, receipts_root_hash: block.receipts_root.0, events, }; Ok(block_info) } else { Err(Error::StreamBlockNotAvailable) } } } /// Create subnet client listener and open connection to the subnet /// Retry until connection is valid pub async fn connect_to_subnet_listener_with_retry( ws_runtime_endpoint: &str, subnet_contract_address: &str, ) -> Result { info!( "Connecting to subnet endpoint to listen events from {} using backoff strategy...", ws_runtime_endpoint ); let op = || async { // Create subnet listener match SubnetClientListener::new(ws_runtime_endpoint, subnet_contract_address).await { Ok(subnet_listener) => Ok(subnet_listener), Err(e) => { error!("Unable to instantiate the subnet client listener: {e}"); Err(new_subnet_client_proxy_backoff_err(e)) } } }; let backoff_configuration = backoff::ExponentialBackoff { max_elapsed_time: Some(SUBNET_CONNECT_BACKOFF_TIMEOUT), ..Default::default() }; backoff::future::retry(backoff_configuration, op).await } // Subnet client for calling target network smart contract pub struct SubnetClient { pub eth_admin_address: H160, contract: subnet_contract::IToposCore, Wallet>>, } impl SubnetClient { /// Polling interval for event filters and pending transactions pub const NODE_POLLING_INTERVAL: Duration = Duration::from_millis(2000u64); /// Initialize a new Subnet client pub async fn new( http_subnet_endpoint: &str, eth_admin_secret_key: Option>, contract_address: &str, ) -> Result { info!( "Connecting to subnet node at endpoint: {}", http_subnet_endpoint ); let http = Provider::::try_from(http_subnet_endpoint) .map_err(|e| Error::InvalidUrl(e.to_string()))? .interval(SubnetClient::NODE_POLLING_INTERVAL); let wallet: LocalWallet = if let Some(eth_admin_secret_key) = ð_admin_secret_key { hex::encode(eth_admin_secret_key) .parse() .map_err(Error::WalletError)? } else { // Dummy random key, will not be used to sign transactions LocalWallet::new(&mut thread_rng()) }; let chain_id = http .get_chainid() .await .map_err(Error::EthersProviderError)?; let client = Arc::new(SignerMiddleware::new( http, wallet.clone().with_chain_id(chain_id.as_u64()), )); // Initialize Topos Core Contract from json abi let contract = create_topos_core_contract_from_json(contract_address, client)?; let eth_admin_address = if let Some(eth_admin_secret_key) = eth_admin_secret_key { match subnet_contract::derive_eth_address(ð_admin_secret_key) { Ok(address) => address, Err(e) => { error!( "Unable to derive admin address from secret key, error instantiating \ subnet client: {}", e ); return Err(e); } } } else { Default::default() }; Ok(SubnetClient { eth_admin_address, contract, }) } pub async fn push_certificate( &self, cert: &Certificate, cert_position: u64, ) -> Result, Error> { let prev_cert_id: Token = Token::FixedBytes(cert.prev_id.as_array().to_vec()); let source_subnet_id: Token = Token::FixedBytes(cert.source_subnet_id.into()); let state_root: Token = Token::FixedBytes(cert.state_root.to_vec()); let tx_root: Token = Token::FixedBytes(cert.tx_root_hash.to_vec()); let receipt_root: Token = Token::FixedBytes(cert.receipts_root_hash.to_vec()); let target_subnets: Token = Token::Array( cert.target_subnets .iter() .map(|target_subnet| Token::FixedBytes((*target_subnet).into())) .collect::>(), ); let verifier = Token::Uint(U256::from(cert.verifier)); let cert_id: Token = Token::FixedBytes(cert.id.as_array().to_vec()); let stark_proof: Token = Token::Bytes(cert.proof.clone()); let signature: Token = Token::Bytes(cert.signature.clone()); let cert_position = U256::from(cert_position); let encoded_cert_bytes = ethers::abi::encode(&[ prev_cert_id, source_subnet_id, state_root, tx_root, receipt_root, target_subnets, verifier, cert_id, stark_proof, signature, ]); let tx = self .contract .push_certificate(encoded_cert_bytes.into(), cert_position) .gas(PUSH_CERTIFICATE_GAS_LIMIT) .legacy(); // Polygon Edge only supports legacy transactions let receipt = tx .send() .await .map_err(|e| { error!("Unable to push certificate: {e}"); Error::ContractError(e.to_string()) })? .await .map_err(Error::EthersProviderError)?; Ok(receipt) } /// Ask subnet for latest pushed certificates, for every source subnet /// Returns list of latest stream positions for every source subnet pub async fn get_checkpoints( &self, target_subnet_id: &topos_core::uci::SubnetId, ) -> Result, Error> { let op = || async { let mut target_stream_positions: Vec = Vec::new(); let stream_positions = self.contract.get_checkpoints().call().await.map_err(|e| { error!("Unable to get checkpoints: {e}"); Error::ContractError(e.to_string()) })?; for position in stream_positions { target_stream_positions.push(TargetStreamPosition { target_subnet_id: *target_subnet_id, certificate_id: Some( TryInto::<[u8; CERTIFICATE_ID_LENGTH]>::try_into(position.cert_id) .map_err(|_| Error::InvalidCheckpointsData)? .into(), ), source_subnet_id: TryInto::<[u8; SUBNET_ID_LENGTH]>::try_into( position.source_subnet_id, ) .map_err(|_| Error::InvalidCheckpointsData)? .into(), position: position.position.as_u64(), }); } Ok(target_stream_positions) }; let backoff_configuration = backoff::ExponentialBackoff { max_elapsed_time: Some(SUBNET_GET_CHECKPOINTS_BACKOFF_TIMEOUT), ..Default::default() }; backoff::future::retry(backoff_configuration, op).await } /// Ask subnet for its subnet id pub async fn get_subnet_id(&self) -> Result { let op = || async { let subnet_id = self .contract .network_subnet_id() .call() .await .map_err(|e| { error!("Unable to query network subnet id: {e}"); Error::ContractError(e.to_string()) })?; Ok(SubnetId::from_array(subnet_id)) }; let backoff_configuration = backoff::ExponentialBackoff { max_elapsed_time: Some(SUBNET_GET_SUBNET_ID_BACKOFF_TIMEOUT), ..Default::default() }; backoff::future::retry(backoff_configuration, op).await } } /// Create new backoff library error based on error that happened pub(crate) fn new_subnet_client_proxy_backoff_err( err: E, ) -> backoff::Error { // Retry according to backoff policy backoff::Error::Transient { err, retry_after: None, } } /// Create subnet client and open connection to the subnet /// Retry until connection is valid pub async fn connect_to_subnet_with_retry( http_subnet_endpoint: &str, signing_key: Option>, contract_address: &str, ) -> Result { info!( "Connecting to subnet endpoint {} using backoff strategy...", http_subnet_endpoint ); let op = || async { match SubnetClient::new(http_subnet_endpoint, signing_key.clone(), contract_address).await { Ok(subnet_client) => Ok(subnet_client), Err(e) => { error!( "Unable to instantiate http subnet client to endpoint {}: {e}", http_subnet_endpoint, ); Err(new_subnet_client_proxy_backoff_err(e)) } } }; let backoff_configuration = backoff::ExponentialBackoff { max_elapsed_time: Some(SUBNET_CONNECT_BACKOFF_TIMEOUT), ..Default::default() }; backoff::future::retry(backoff_configuration, op).await } ================================================ FILE: crates/topos-sequencer-subnet-client/src/subnet_contract.rs ================================================ use crate::{Error, SubnetEvent}; use ethers::abi::ethabi::ethereum_types::{H160, U64}; use ethers::contract::ContractError; use ethers::signers::LocalWallet; use ethers::{ prelude::abigen, providers::{Middleware, Provider, Ws}, signers::Signer, }; use std::sync::Arc; use tracing::info; abigen!( IToposCore, "npm:@topos-protocol/topos-smart-contracts@3.4.0/artifacts/contracts/interfaces/IToposCore.\ sol/IToposCore.json" ); pub(crate) fn create_topos_core_contract_from_json( contract_address: &str, client: Arc, ) -> Result, Error> { let address: ethers::types::Address = contract_address.parse().map_err(Error::HexDecodingError)?; let contract = IToposCore::new(address, client); Ok(contract) } pub(crate) async fn get_block_events( contract: &IToposCore>, block_number: U64, ) -> Result, Error> { // FIXME: There is some ethers issue when parsing events // from genesis block so skip it - we certainly don't expect any valid event here if block_number.as_u64() == 0 { return Ok(Vec::new()); } // Parse only event from this particular block let events = contract .events() .from_block(block_number) .to_block(block_number); let topos_core_events = events.query_with_meta().await.map_err(|e| { match e { ContractError::DecodingError(e) => { // FIXME: events have decoding error in the blocks before contract is deployed Error::EventDecodingError(e.to_string()) } _ => Error::ContractError(e.to_string()), } })?; let mut result = Vec::new(); for event in topos_core_events { if let (IToposCoreEvents::CrossSubnetMessageSentFilter(f), meta) = event { info!( "Received CrossSubnetMessageSentFilter event: {f:?}, meta {:?}", meta ); result.push(SubnetEvent::CrossSubnetMessageSent { target_subnet_id: f.target_subnet_id.into(), source_subnet_id: f.source_subnet_id.into(), nonce: f.nonce.as_u64(), }) } else { // Ignored other events until we need them } } Ok(result) } pub fn derive_eth_address(secret_key: &[u8]) -> Result { let signer = hex::encode(secret_key) .parse::() .map_err(|e| Error::InvalidKey(e.to_string()))?; Ok(signer.address()) } ================================================ FILE: crates/topos-sequencer-subnet-runtime/Cargo.toml ================================================ [package] name = "topos-sequencer-subnet-runtime" version = "0.1.0" edition = "2021" [lints] workspace = true [dependencies] byteorder.workspace = true hex.workspace = true rand = { workspace = true, features = ["default"] } rand_core.workspace = true serde = { workspace = true, features = ["derive"] } thiserror.workspace = true tokio = { workspace = true, features = [ "io-util", "io-std", "macros", "rt", "rt-multi-thread", "fs", "time", "sync", ] } tracing-subscriber = { workspace = true, features = ["env-filter", "fmt"] } tracing.workspace = true tracing-opentelemetry.workspace = true opentelemetry.workspace = true topos-core = { workspace = true, features = ["uci"] } topos-sequencer-subnet-client = { package = "topos-sequencer-subnet-client", path = "../topos-sequencer-subnet-client" } topos-crypto = {package = "topos-crypto", path = "../topos-crypto"} [dev-dependencies] rstest = { workspace = true, features = ["async-timeout"] } serde_json.workspace = true test-log.workspace = true env_logger.workspace = true secp256k1.workspace = true serial_test.workspace = true tiny-keccak.workspace = true ethers.workspace = true fs_extra = "1.3" topos-test-sdk = { path = "../topos-test-sdk/" } ================================================ FILE: crates/topos-sequencer-subnet-runtime/src/certification.rs ================================================ use crate::Error; use std::collections::{HashSet, LinkedList}; use std::fmt::{Debug, Formatter}; use std::sync::Arc; use tokio::sync::Mutex; use topos_core::uci::{Certificate, CertificateId, SubnetId}; use topos_sequencer_subnet_client::{BlockInfo, SubnetEvent}; use tracing::debug; pub struct Certification { /// Last known certificate id for subnet pub last_certificate_id: Option, /// Subnet id for which certificates are generated pub subnet_id: SubnetId, /// Type of verifier used pub verifier: u32, /// Key for signing certificates, currently secp256k1 signing_key: Vec, /// Optional synchronization from particular block number pub start_block: Option, /// Blocks received from subnet, not yet certified. We keep them in memory until we can /// generate certificate for them. They are kept as linked list to maintain /// order of blocks, latest received blocks are at the end of the list finalized_blocks: LinkedList, } impl Debug for Certification { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { f.debug_struct("Certification instance").finish() } } impl Certification { pub const BLOCK_HISTORY_LENGTH: usize = 256; pub fn new( subnet_id: &SubnetId, source_head_certificate_id: Option, verifier: u32, signing_key: Vec, start_block: Option, ) -> Result>, crate::Error> { Ok(Arc::new(Mutex::from(Self { last_certificate_id: source_head_certificate_id, finalized_blocks: LinkedList::::new(), subnet_id: *subnet_id, verifier, signing_key, start_block, }))) } /// Generation of Certificates pub(crate) async fn generate_certificates(&mut self) -> Result, Error> { let subnet_id = self.subnet_id; let mut generated_certificates = Vec::new(); // Keep account of blocks with generated certificates so that we can remove them from // finalized blocks let mut certified_blocks: Vec = Vec::with_capacity(self.finalized_blocks.len()); // For every block, create one certificate for block_info in &self.finalized_blocks { // Parse target subnets from events let mut target_subnets: HashSet = HashSet::new(); for event in &block_info.events { match event { SubnetEvent::CrossSubnetMessageSent { target_subnet_id, .. } => { target_subnets.insert(*target_subnet_id); } } } // Get the id of the previous Certificate from local history let previous_cert_id: CertificateId = match self.last_certificate_id { Some(cert_id) => cert_id, None => { // FIXME: This is genesis certificate we are generating because we are unable // to retrieve one from TCE yet CertificateId::default() } }; // TODO: acquire proof let proof = Vec::new(); let mut certificate = Certificate::new( previous_cert_id, subnet_id, block_info.state_root, block_info.tx_root_hash, block_info.receipts_root_hash, &target_subnets.into_iter().collect::>(), self.verifier, proof, ) .map_err(|e| Error::CertificateGenerationError(e.to_string()))?; certificate .update_signature(self.get_signing_key()) .map_err(Error::CertificateSigningError)?; generated_certificates.push(certificate); certified_blocks.push(block_info.number); } // Check for inconsistencies let is_genesis_certificate: bool = self .finalized_blocks .front() .map(|b| b.number == 0 || self.start_block.is_some()) .unwrap_or(false); let last_known_certificate_id = if is_genesis_certificate { // We are creating genesis certificate, there were no previous certificates // In case where start block is present, we also consider start block as genesis certificate, // so it has no history (prev cert id all 0) CertificateId::default() } else { self.last_certificate_id .ok_or(Error::InvalidPreviousCertificateId)? }; for new_cert in &generated_certificates { if last_known_certificate_id == new_cert.id { // This should not happen panic!("Same certificate generated multiple times: {new_cert:?}"); } } // Set info about latest known certificate for subnet if let Some(generated_certificate) = generated_certificates.iter().last() { self.last_certificate_id = Some(generated_certificate.id); } // Remove processed blocks for processed_block_number in certified_blocks { let front_block_number = self.finalized_blocks.front().map(|front| front.number); if front_block_number.is_some() { if Some(processed_block_number) == front_block_number { debug!( "Block {processed_block_number} processed and removed from the block list" ); self.finalized_blocks.pop_front(); } else { panic!( "Block history is inconsistent, this should not happen! \ processed_block_number: {processed_block_number}, front_number: {:?}", front_block_number ); } } } Ok(generated_certificates) } pub fn get_signing_key(&self) -> &[u8] { self.signing_key.as_slice() } /// Expand short block history. Remove older blocks pub fn append_blocks(&mut self, blocks: Vec) { self.finalized_blocks.extend(blocks); } } ================================================ FILE: crates/topos-sequencer-subnet-runtime/src/lib.rs ================================================ //! implementation of Topos Reliable Broadcast to be used in the Transmission Control Engine (TCE) //! //! Abstracted from actual transport implementation. //! Abstracted from actual storage implementation. //! use proxy::SubnetRuntimeProxy; use std::sync::Arc; use thiserror::Error; use tokio::sync::Mutex; use tokio::sync::{mpsc, oneshot}; use topos_core::api::grpc::checkpoints::TargetStreamPosition; use topos_core::uci::{CertificateId, SubnetId}; pub type Peer = String; pub mod certification; pub mod proxy; use crate::proxy::{SubnetRuntimeProxyCommand, SubnetRuntimeProxyEvent}; // Optimal Size of event channel is yet to be determined. Now just putting a number const EVENT_SUBSCRIBER_CHANNEL_SIZE: usize = 64; #[derive(Debug, Error)] pub enum Error { #[error("Peers error: {err}")] BadPeers { err: String }, #[error("Command error: {err}")] BadCommand { err: String }, #[error("Tokio join error: {source}")] TokioError { source: tokio::task::JoinError }, #[error("Failed to acquire locked object")] UnlockError, #[error("Unexpected type of transaction")] InvalidTransactionType, #[error("subnet client error: {source}")] SubnetError { #[from] source: topos_sequencer_subnet_client::Error, }, #[error("Unable to retrieve key error: {source}")] UnableToRetrieveKey { #[from] source: topos_crypto::Error, }, #[error("Unable to execute shutdown on the subnet runtime proxy: {0}")] ShutdownCommunication(mpsc::error::SendError>), #[error("Shutdown channel receive error {0}")] ShutdownSignalReceiveError(tokio::sync::oneshot::error::RecvError), #[error("Invalid previous certificate id")] InvalidPreviousCertificateId, #[error("Ill formed subnet history")] IllFormedSubnetHistory, #[error("Unable to create certificate {0}")] CertificateGenerationError(String), #[error("Certificate signing error: {0}")] CertificateSigningError(topos_core::uci::Error), #[error("Unable to set source head certificate: {0}")] SourceHeadCertChannelError(String), #[error("Unable to send command: {0}")] CommandEvalChannelError(String), #[error("Invalid endpoint: {0}")] InvalidEndpoint(String), } #[derive(Debug, Clone)] pub struct SubnetRuntimeProxyConfig { pub subnet_id: SubnetId, pub http_endpoint: String, pub ws_endpoint: String, pub subnet_contract_address: String, pub source_head_certificate_id: Option, pub verifier: u32, pub start_block: Option, } /// Thread safe client to the protocol aggregate #[derive(Debug)] pub struct SubnetRuntimeProxyWorker { runtime_proxy: Arc>, commands: mpsc::Sender, events: mpsc::Receiver, } impl SubnetRuntimeProxyWorker { /// Creates new instance of the aggregate and returns proxy to it. /// New client instances to the same aggregate can be cloned from the returned one. /// Aggregate is spawned as new task. pub async fn new( config: SubnetRuntimeProxyConfig, signing_key: Vec, ) -> Result { let runtime_proxy = SubnetRuntimeProxy::spawn_new(config, signing_key)?; let (events_sender, events_rcv) = mpsc::channel::(EVENT_SUBSCRIBER_CHANNEL_SIZE); let commands; { let mut runtime_proxy = runtime_proxy.lock().await; commands = runtime_proxy.commands_channel.clone(); runtime_proxy.events_subscribers.push(events_sender); } Ok(Self { runtime_proxy, commands, events: events_rcv, }) } /// Schedule command for execution pub async fn eval(&self, cmd: SubnetRuntimeProxyCommand) -> Result<(), Error> { self.commands .send(cmd) .await .map_err(|e| Error::CommandEvalChannelError(e.to_string())) } /// Pollable (in select!) events' listener pub async fn next_event(&mut self) -> Result { let event = self.events.recv().await; Ok(event.unwrap()) } /// Shutdown subnet runtime proxy worker pub async fn shutdown(&mut self) -> Result<(), Error> { let runtime_proxy = self.runtime_proxy.lock().await; runtime_proxy.shutdown().await } pub async fn get_checkpoints(&self) -> Result, Error> { let runtime_proxy = self.runtime_proxy.lock().await; runtime_proxy.get_checkpoints().await } pub async fn get_subnet_id( http_endpoint: &str, contract_address: &str, ) -> Result { SubnetRuntimeProxy::get_subnet_id(http_endpoint, contract_address).await } pub async fn set_source_head_certificate_id( &self, source_head_certificate_id: Option<(CertificateId, u64)>, ) -> Result<(), Error> { let mut runtime_proxy = self.runtime_proxy.lock().await; runtime_proxy .set_source_head_certificate_id(source_head_certificate_id) .await } } /// From the user provided subnet node endpoint (could be ip:port, http://ip:port, https://ip:port) /// derive http and ws endpoints that will be used to communicate with the subnet pub fn derive_endpoints(endpoint: &str) -> Result<(String, String), Error> { let http_endpoint: String; let ws_endpoint: String; if endpoint.starts_with("https") { // Use https endpoint as it is // Derive wss endpoint http_endpoint = endpoint.to_string(); ws_endpoint = http_endpoint.replace("https", "wss") + "/ws"; } else if endpoint.starts_with("http") { // Use http endpoint as it is // Derive ws endpoint http_endpoint = endpoint.to_string(); ws_endpoint = http_endpoint.replace("http", "ws") + "/ws"; } else { http_endpoint = format!("http://{}", endpoint); ws_endpoint = format!("ws://{}/ws", endpoint); } Ok((http_endpoint, ws_endpoint)) } pub mod testing { use super::*; pub fn get_runtime( runtime_proxy_worker: &SubnetRuntimeProxyWorker, ) -> Arc> { runtime_proxy_worker.runtime_proxy.clone() } } #[cfg(test)] mod tests { #[test] fn test_derive_endpoints() { use super::derive_endpoints; let (http_endpoint, ws_endpoint) = derive_endpoints("10.10.10.13:321").unwrap(); assert_eq!( (http_endpoint.as_str(), ws_endpoint.as_str()), ("http://10.10.10.13:321", "ws://10.10.10.13:321/ws") ); let (http_endpoint, ws_endpoint) = derive_endpoints("http://www.example.com").unwrap(); assert_eq!( (http_endpoint.as_str(), ws_endpoint.as_str()), ("http://www.example.com", "ws://www.example.com/ws") ); let (http_endpoint, ws_endpoint) = derive_endpoints("https://www.example.com:123").unwrap(); assert_eq!( (http_endpoint.as_str(), ws_endpoint.as_str()), ( "https://www.example.com:123", "wss://www.example.com:123/ws" ) ); } } ================================================ FILE: crates/topos-sequencer-subnet-runtime/src/proxy.rs ================================================ //! Protocol implementation guts. //! use crate::{certification::Certification, Error, SubnetRuntimeProxyConfig}; use opentelemetry::trace::FutureExt; use opentelemetry::Context; use serde::{Deserialize, Serialize}; use std::fmt::{Debug, Formatter}; use std::sync::Arc; use tokio::sync::Mutex; use tokio::sync::{mpsc, oneshot}; use tokio::time::Duration; use topos_core::api::grpc::checkpoints::TargetStreamPosition; use topos_core::uci::{Certificate, CertificateId, SubnetId}; use topos_sequencer_subnet_client::{BlockInfo, SubnetClient, SubnetClientListener}; use tracing::{debug, error, field, info, info_span, instrument, warn, Instrument, Span}; use tracing_opentelemetry::OpenTelemetrySpanExt; #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Authorities { // TODO: proper dependencies to block type etc } #[derive(Debug, Clone)] pub enum SubnetRuntimeProxyEvent { /// New certificate is generated NewCertificate { cert: Box, block_number: u64, ctx: Context, }, /// New set of authorities in charge of the threshold signature NewEra(Vec), } #[derive(Debug)] pub enum SubnetRuntimeProxyCommand { /// Upon receiving a new delivered Certificate from the TCE OnNewDeliveredCertificate { certificate: Certificate, position: u64, ctx: Context, }, } pub struct SubnetRuntimeProxy { pub commands_channel: mpsc::Sender, pub events_subscribers: Vec>, pub config: SubnetRuntimeProxyConfig, pub certification: Arc>, command_task_shutdown: mpsc::Sender>, block_task_shutdown: mpsc::Sender>, source_head_certificate_id_sender: Option>>, } impl Debug for SubnetRuntimeProxy { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { f.debug_struct("RuntimeProxy instance").finish() } } impl SubnetRuntimeProxy { pub fn spawn_new( config: SubnetRuntimeProxyConfig, signing_key: Vec, ) -> Result>, crate::Error> { info!( "Spawning new runtime proxy, http endpoint: {}, ws endpoint {} ethereum contract \ address: {}, ", &config.http_endpoint, &config.ws_endpoint, &config.subnet_contract_address ); let (command_sender, mut command_rcv) = mpsc::channel::(256); let ws_runtime_endpoint = config.ws_endpoint.clone(); let http_runtime_endpoint = config.http_endpoint.clone(); let subnet_contract_address = Arc::new(config.subnet_contract_address.clone()); let (command_task_shutdown_channel, mut command_task_shutdown) = mpsc::channel::>(1); let (block_task_shutdown_channel, mut block_task_shutdown) = mpsc::channel::>(1); let (source_head_certificate_id_sender, source_head_certificate_id_received) = oneshot::channel(); let certification = Certification::new( &config.subnet_id, None, config.verifier, signing_key.clone(), config.start_block, )?; let runtime_proxy = Arc::new(Mutex::from(Self { commands_channel: command_sender, events_subscribers: Vec::new(), config: config.clone(), command_task_shutdown: command_task_shutdown_channel, block_task_shutdown: block_task_shutdown_channel, certification: certification.clone(), source_head_certificate_id_sender: Some(source_head_certificate_id_sender), })); // Runtime block task { let runtime_proxy = runtime_proxy.clone(); let subnet_contract_address = subnet_contract_address.clone(); tokio::spawn(async move { // If the `start_block` sequencer parameter is provided, first block retrieved from blockchain (for genesis certificate) // will be `start_block`. `default_block_sync_start` is hence `start_block`-1 // as first block retrieved from subnet node is `latest_acquired_subnet_block_number` + 1 let default_block_sync_start: i128 = config .start_block .map(|block_number| (block_number - 1) as i128) .unwrap_or(-1); let mut latest_acquired_subnet_block_number: i128 = default_block_sync_start; { // To start producing certificates, we need to know latest delivered or pending certificate id from TCE // Lock certification component and wait until we acquire first certificate id for this network let mut certification = certification.lock().await; if certification.last_certificate_id.is_none() { info!( "Waiting for the source head certificate id to continue with \ certificate generation" ); // Wait for last_certificate_id retrieved on TCE component setup match source_head_certificate_id_received.await { Ok(certificate_and_position) => { info!( "Source head certificate id received {:?}", certificate_and_position ); // If tce source head position is provided, continue synchronizing from it // If the `start_block` sequencer parameter is provided and tce source head is missing, // we should start synchronizing from that block instead of genesis // If neither tce source head position nor start_block parameters are provided, // sync should start form -1, so that first fetched is subnet genesis block let cert_id = certificate_and_position.map(|(id, _position)| id); let position: i128 = certificate_and_position .map(|(_id, position)| position as i128) .unwrap_or(default_block_sync_start); // Certificate generation is now ready to run certification.last_certificate_id = cert_id; latest_acquired_subnet_block_number = position; } Err(e) => { // This panic should not happen unless other task retrieving source head certificate has failed // In that case, close the tread with panic panic!( "Failed to get source head certificate, unable to proceed \ with certificate generation: {e}" ) } } } } // Establish the connection with the Subnet let subnet_listener: Option = tokio::select! { // Create subnet client Ok(client) = topos_sequencer_subnet_client::connect_to_subnet_listener_with_retry( ws_runtime_endpoint.as_str(), subnet_contract_address.as_str(), ) => { Some(client) } _ = block_task_shutdown.recv() => { None } }; let mut subnet_listener = subnet_listener.expect("subnet listener"); // Sync missing blocks loop { let current_subnet_block_number: Option = tokio::select! { block_number = subnet_listener.get_subnet_block_number() => { match block_number { Ok(block_number) => { Some(block_number as i128) } Err(e) => { error!("Failed to get subnet block number: {:?}, trying again...", e); tokio::time::sleep(Duration::from_secs(10)).await; continue; } } } _ = block_task_shutdown.recv() => { info!("Shutting down sync missing blocks task"); return; } }; let current_subnet_block_number = current_subnet_block_number .expect("need valid subnet block number to start syncing"); if latest_acquired_subnet_block_number == current_subnet_block_number { info!( "Finished synchronization of blocks, latest block received is {}", latest_acquired_subnet_block_number ); break; } info!( "Latest retrieved subnet block is {}, current subnet block is {}", latest_acquired_subnet_block_number, current_subnet_block_number ); // Sync historical blocks while latest_acquired_subnet_block_number < current_subnet_block_number { let next_block_number = latest_acquired_subnet_block_number + 1; info!("Retrieving historical block {}", next_block_number); tokio::select! { result = SubnetRuntimeProxy::retrieve_and_process_block( runtime_proxy.clone(), &mut subnet_listener, certification.clone(), next_block_number as u64, ) => { if let Err(e) = result { error!("Unable to perform initial subnet block sync: {e}, trying again..."); tokio::time::sleep(Duration::from_secs(10)).await; continue; } else { latest_acquired_subnet_block_number = next_block_number; } } _ = block_task_shutdown.recv() => { info!("Shutting down sync missing blocks task during synchronization"); return; } } // Give it a little rest for other threads to do their job tokio::time::sleep(Duration::from_millis(20)).await; } } // Create a new subscription stream to listen for new blocks from subnet node let mut subscription_stream = match subnet_listener.new_block_subscription_stream().await { Ok(stream) => stream, Err(e) => { panic!( "Failed to open subnet node block subscription stream, unable to \ proceed with certificate generation: {e}" ) } }; info!("Block subscription stream opened, listening for new blocks..."); // Go to standard mode of listening for new blocks let shutdowned: Option> = loop { tokio::select! { result = subnet_listener.wait_for_new_block(&mut subscription_stream) => { match result { Ok(block) => { let new_block_number = block.number as i128; info!("Successfully received new block {} from the subnet subscription", new_block_number); if let Err(e) = SubnetRuntimeProxy::process_block( runtime_proxy.clone(), certification.clone(), block ).await { error!("Failed to process next block: {}, exit block production!", e); break None; } } Err(e) => { error!("Failed to retrieve next block: {}, trying again soon", e); tokio::time::sleep(Duration::from_millis(1000)).await; continue; } } } shutdown = block_task_shutdown.recv() => { break shutdown; } } }; if let Some(sender) = shutdowned { info!("Shutting down subnet runtime block processing task"); _ = sender.send(()); } else { warn!("Shutting down subnet runtime block processing task due to error"); } }) }; // Runtime command task tokio::spawn(async move { // Establish the connection with the Subnet let mut subnet_client: Option = tokio::select! { // Create subnet client Ok(client) = topos_sequencer_subnet_client::connect_to_subnet_with_retry( http_runtime_endpoint.as_ref(), Some(signing_key.clone()), subnet_contract_address.as_str(), ) => { info!("Connected to subnet node {}", &http_runtime_endpoint); Some(client) } _ = command_task_shutdown.recv() => { None } }; let shutdowned: Option> = loop { tokio::select! { // Poll runtime proxy commands channel cmd = command_rcv.recv() => { Self::on_command(&config, subnet_client.as_mut().unwrap(), cmd).await; }, shutdown = command_task_shutdown.recv() => { break shutdown; } } }; if let Some(sender) = shutdowned { info!("Shutting down subnet runtime command processing task"); _ = sender.send(()); } else { warn!("Shutting down subnet runtime command processing task due to error"); } }); Ok(runtime_proxy) } async fn retrieve_and_process_block( subnet_runtime_proxy: Arc>, subnet_listener: &mut SubnetClientListener, certification: Arc>, next_block: u64, ) -> Result<(), Error> { match subnet_listener.get_finalized_block(next_block).await { Ok(block_info) => { let block_number = block_info.number; info!( "Successfully fetched the finalized block {block_number} from the subnet \ runtime" ); let mut certification = certification.lock().await; // Update certificate block history certification.append_blocks(vec![block_info]); let new_certificates = match certification.generate_certificates().await { Ok(certificates) => certificates, Err(e) => { error!("Unable to generate certificates: {e}"); return Err(e); } }; debug!("Generated new certificates {new_certificates:?}"); for cert in new_certificates { Self::send_new_certificate(subnet_runtime_proxy.clone(), cert, next_block).await } info!("Block {} processed", next_block); Ok(()) } Err(topos_sequencer_subnet_client::Error::BlockNotAvailable(block_number)) => { warn!("New block {block_number} not yet available, trying again soon"); Err(Error::SubnetError { source: topos_sequencer_subnet_client::Error::BlockNotAvailable(block_number), }) } Err(e) => { // TODO: Determine if task should end on some type of error error!("Failed to fetch the new finalized block: {e}"); Err(Error::SubnetError { source: e }) } } } async fn process_block( subnet_runtime_proxy: Arc>, certification: Arc>, block_info: BlockInfo, ) -> Result<(), Error> { let mut certification = certification.lock().await; let block_number = block_info.number; // Update certificate block history certification.append_blocks(vec![block_info]); let new_certificates = certification.generate_certificates().await?; debug!("Generated new certificates {new_certificates:?}"); for cert in new_certificates { Self::send_new_certificate(subnet_runtime_proxy.clone(), cert, block_number).await } info!("Block {} processed", block_number); Ok(()) } /// Dispatch newly generated certificate to TCE client #[instrument(name = "NewCertificate", fields(certification = field::Empty, source_subnet_id = field::Empty, certificate_id = field::Empty))] async fn send_new_certificate( subnet_runtime_proxy: Arc>, cert: Certificate, block_number: u64, ) { let mut runtime_proxy = subnet_runtime_proxy.lock().await; Span::current().record("certificate_id", cert.id.to_string()); Span::current().record("source_subnet_id", cert.source_subnet_id.to_string()); runtime_proxy .send_out_event(SubnetRuntimeProxyEvent::NewCertificate { cert: Box::new(cert), block_number, ctx: Span::current().context(), }) .with_current_context() .instrument(Span::current()) .await; } /// Send certificate to target subnet Topos Core contract for verification async fn push_certificate( runtime_proxy_config: &SubnetRuntimeProxyConfig, subnet_client: &SubnetClient, cert: &Certificate, position: u64, ) -> Result, Error> { debug!( "Pushing certificate with id {} to target subnet {}, tcc {}", cert.id, runtime_proxy_config.subnet_id, runtime_proxy_config.subnet_contract_address, ); let receipt = subnet_client.push_certificate(cert, position).await?; debug!("Push certificate transaction receipt: {:?}", &receipt); let tx_hash = receipt.map(|tx_receipt| "0x".to_string() + &hex::encode(tx_receipt.transaction_hash)); Ok(tx_hash) } async fn on_command( runtime_proxy_config: &SubnetRuntimeProxyConfig, subnet_client: &SubnetClient, mb_cmd: Option, ) { match mb_cmd { Some(cmd) => match cmd { // Process certificate retrieved from TCE node SubnetRuntimeProxyCommand::OnNewDeliveredCertificate { certificate, position, ctx, } => { let span_subnet_runtime_proxy = info_span!("Subnet Runtime Proxy"); span_subnet_runtime_proxy.set_parent(ctx); async { info!( "Processing certificate received from TCE, cert_id={}", &certificate.id ); // Verify certificate signature // Well known subnet id is public key for certificate verification // Public key of secp256k1 is 33 bytes, we are keeping last 32 bytes as subnet id // Add manually first byte 0x02 let public_key = certificate.source_subnet_id.to_secp256k1_public_key(); // Verify signature of the certificate match topos_crypto::signatures::verify( &public_key, certificate.get_payload().as_slice(), certificate.signature.as_slice(), ) { Ok(()) => { info!("Certificate {} passed verification", certificate.id) } Err(e) => { error!("Failed to verify certificate id {}: {e}", certificate.id); return; } } let span_push_certificate = info_span!("Subnet push certificate call"); // Push the Certificate to the ToposCore contract on the target subnet match SubnetRuntimeProxy::push_certificate( runtime_proxy_config, subnet_client, &certificate, position, ) .with_context(span_push_certificate.context()) .instrument(span_push_certificate) .await { Ok(tx_hash) => { debug!( "Successfully pushed the Certificate {} to target subnet with \ tx hash {:?}", &certificate.id, &tx_hash ); } Err(e) => { error!( "Failed to push the Certificate {} to target subnet: {e}", &certificate.id ); } } } .with_context(span_subnet_runtime_proxy.context()) .instrument(span_subnet_runtime_proxy) .await } }, _ => { warn!("Empty command was passed"); } } } async fn send_out_event(&mut self, evt: SubnetRuntimeProxyEvent) { for tx in &self.events_subscribers { if let Err(e) = tx.send(evt.clone()).await { error!("Unable to send subnet runtime proxy event: {e}"); } } } /// Shutdown subnet runtime proxy tasks pub async fn shutdown(&self) -> Result<(), Error> { let (command_task_sender, command_task_receiver) = oneshot::channel(); self.command_task_shutdown .send(command_task_sender) .await .map_err(Error::ShutdownCommunication)?; command_task_receiver .await .map_err(Error::ShutdownSignalReceiveError)?; let (block_task_sender, block_task_receiver) = oneshot::channel(); self.block_task_shutdown .send(block_task_sender) .await .map_err(Error::ShutdownCommunication)?; block_task_receiver .await .map_err(Error::ShutdownSignalReceiveError)?; Ok(()) } pub async fn set_source_head_certificate_id( &mut self, source_head_certificate_id: Option<(CertificateId, u64)>, ) -> Result<(), Error> { self.source_head_certificate_id_sender .take() .ok_or_else(|| { Error::SourceHeadCertChannelError( "source head certificate id was previously set".to_string(), ) })? .send(source_head_certificate_id) .map_err(|_| Error::SourceHeadCertChannelError("channel error".to_string())) } pub async fn get_checkpoints(&self) -> Result, Error> { info!("Connecting to subnet to query for checkpoints..."); let http_runtime_endpoint = self.config.http_endpoint.as_ref(); // Create subnet client let subnet_client = match topos_sequencer_subnet_client::connect_to_subnet_with_retry( http_runtime_endpoint, None, // We do not need actual key here as we are just reading state self.config.subnet_contract_address.as_str(), ) .await { Ok(subnet_client) => { info!( "Connected to subnet node to acquire checkpoints {}", http_runtime_endpoint ); subnet_client } Err(e) => { error!("Unable to connect to the subnet node to get checkpoints: {e}"); return Err(Error::SubnetError { source: e }); } }; match subnet_client.get_checkpoints(&self.config.subnet_id).await { Ok(checkpoints) => { info!("Successfully retrieved the Checkpoints"); Ok(checkpoints) } Err(e) => { error!( "Unable to get the checkpoints for subnet {}", self.config.subnet_id ); Err(Error::SubnetError { source: e }) } } } /// Get the particular subnet id (identifying subnet in the topos protocol) /// from the subnet node smart contract pub async fn get_subnet_id( http_endpoint: &str, contract_address: &str, ) -> Result { info!("Connecting to subnet to query for subnet id..."); // Create subnet client let subnet_client = match topos_sequencer_subnet_client::connect_to_subnet_with_retry( http_endpoint, None, // We do not need actual key here as we are just reading state contract_address, ) .await { Ok(subnet_client) => { info!( "Connected to subnet node to acquire subnet id {}", http_endpoint ); subnet_client } Err(e) => { error!("Unable to connect to the subnet node to get subnet id: {e}"); return Err(Error::SubnetError { source: e }); } }; match subnet_client.get_subnet_id().await { Ok(subnet_id) => { info!("Successfully retrieved the subnet id for subnet: {subnet_id}"); Ok(subnet_id) } Err(e) => { error!("Unable to get the subnet id {e}",); Err(Error::SubnetError { source: e }) } } } } ================================================ FILE: crates/topos-sequencer-subnet-runtime/tests/common/abi.rs ================================================ use ethers::{ contract::abigen, core::k256::ecdsa::SigningKey, middleware::SignerMiddleware, prelude::Wallet, providers::{Http, Provider}, }; //TODO I haven't find a way to parametrize version, macro accepts strictly string literal // `topos-smart-contracts` build artifacts directory must be copied to the root topos directory to run these tests abigen!( TokenDeployerContract, "./artifacts/contracts/topos-core/TokenDeployer.sol/TokenDeployer.json" ); abigen!( ToposCoreContract, "./artifacts/contracts/topos-core/ToposCore.sol/ToposCore.json" ); abigen!( ToposCoreProxyContract, "./artifacts/contracts/topos-core/ToposCoreProxy.sol/ToposCoreProxy.json" ); abigen!( ToposMessagingContract, "./artifacts/contracts/topos-core/ToposMessaging.sol/ToposMessaging.json" ); abigen!( ERC20MessagingContract, "./artifacts/contracts/examples/ERC20Messaging.sol/ERC20Messaging.json" ); abigen!( IToposCore, "./artifacts/contracts/interfaces/IToposCore.sol/IToposCore.json" ); abigen!( IToposMessaging, "./artifacts/contracts/interfaces/IToposMessaging.sol/IToposMessaging.json" ); abigen!( IERC20Messaging, "./artifacts/contracts/interfaces/IERC20Messaging.sol/IERC20Messaging.json" ); abigen!( IERC20, r"[ function totalSupply() external view returns (uint) function balanceOf(address account) external view returns (uint) function transfer(address recipient, uint amount) external returns (bool) function allowance(address owner, address spender) external view returns (uint) function approve(address spender, uint amount) external returns (bool) function transferFrom(address sender, address recipient, uint amount) external returns (bool) ]" ); pub type IToposCoreClient = IToposCore, Wallet>>; pub type IToposMessagingClient = IToposMessaging, Wallet>>; pub type IERC20Client = IERC20, Wallet>>; pub type IERC20MessagingClient = IERC20Messaging, Wallet>>; ================================================ FILE: crates/topos-sequencer-subnet-runtime/tests/common/mod.rs ================================================ pub(crate) mod abi; pub mod subnet_test_data; ================================================ FILE: crates/topos-sequencer-subnet-runtime/tests/common/subnet_test_data.rs ================================================ use std::io::Write; pub const TEST_VALIDATOR_KEY_FILE_DATA: &str = r#"11eddfae7abe45531b3f18342c8062969323a7131d3043f1a33c40df74803cc7"#; #[allow(dead_code)] pub fn generate_test_subnet_data_dir() -> Result> { const TEST_VALIDATOR_SUBNET_DATA_DIR: &str = "/test_data"; const TEST_VALIDATOR_KEY_FILE_NAME: &str = "/consensus/validator.key"; let tmp = std::env::temp_dir().to_str().unwrap().to_string(); let subnet_data_dir = std::path::PathBuf::from(tmp.clone() + TEST_VALIDATOR_SUBNET_DATA_DIR); let consensus_dir = std::path::PathBuf::from(tmp.clone() + TEST_VALIDATOR_SUBNET_DATA_DIR + "/consensus"); let keystore_file_path = std::path::PathBuf::from( tmp + TEST_VALIDATOR_SUBNET_DATA_DIR + TEST_VALIDATOR_KEY_FILE_NAME, ); std::fs::create_dir_all(consensus_dir)?; let mut keystore_file = std::fs::File::create(keystore_file_path)?; writeln!(&mut keystore_file, "{TEST_VALIDATOR_KEY_FILE_DATA}",)?; Ok(subnet_data_dir) } pub fn generate_test_private_key() -> Vec { hex::decode(TEST_VALIDATOR_KEY_FILE_DATA).unwrap() } ================================================ FILE: crates/topos-sequencer-subnet-runtime/tests/subnet_contract.rs ================================================ #![allow(unknown_lints)] use crate::common::abi; use ethers::{ abi::{ethabi::ethereum_types::U256, Address}, core::types::Filter, middleware::SignerMiddleware, providers::{Http, Middleware, Provider}, signers::{LocalWallet, Signer}, types::{Block, H256}, }; use rstest::*; use serial_test::serial; use std::collections::HashSet; use std::process::{Child, Command}; use std::sync::Arc; use test_log::test; use tokio::sync::Mutex; use topos_core::uci::{Certificate, CertificateId, SubnetId, SUBNET_ID_LENGTH}; use topos_sequencer_subnet_runtime::proxy::{SubnetRuntimeProxyCommand, SubnetRuntimeProxyEvent}; use tracing::{error, info, warn, Span}; use tracing_opentelemetry::OpenTelemetrySpanExt; mod common; use crate::common::subnet_test_data::generate_test_private_key; use topos_core::api::grpc::checkpoints::TargetStreamPosition; use topos_sequencer_subnet_runtime::{SubnetRuntimeProxyConfig, SubnetRuntimeProxyWorker}; use topos_test_sdk::constants::*; // Local test network with default 2 seconds block const STANDALONE_SUBNET_BLOCK_TIME: u64 = 2; // Local test network with 12 seconds block, usefull for multiple transactions in one block tests const STANDALONE_SUBNET_WITH_LONG_BLOCKS_BLOCK_TIME: u64 = 12; const SUBNET_RPC_PORT: u32 = 8545; // Account 0x4AAb25B4fAd0Beaac466050f3A7142A502f4Cf0a const TEST_SECRET_ETHEREUM_KEY: &str = "ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80"; const TEST_ETHEREUM_ACCOUNT: &str = "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"; const TEST_SUBNET_ID: &str = "6464646464646464646464646464646464646464646464646464646464646464"; const TOKEN_SYMBOL: &str = "TKX"; // Accounts pre-filled in STANDALONE_SUBNET_WITH_LONG_BLOCKS const TEST_ACCOUNT_ALITH_KEY: &str = "59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d"; const TEST_ACCOUNT_ALITH_ACCOUNT: &str = "0x70997970C51812dc3A010C7d01b50e0d17dc79C8"; const TEST_ACCOUNT_BALATHAR_KEY: &str = "5de4111afa1a4b94908f83103eb1f1706367c2e68ca870fc3fb9a804cdab365a"; const TEST_ACCOUNT_BALATHAR_ACCOUNT: &str = "0x3C44CdDdB6a900fa2b585dd299e03d12FA4293BC"; const TEST_ACCOUNT_CEZAR_KEY: &str = "7c852118294e51e653712a81e05800f419141751be58f605c371e15141b007a6"; const TEST_ACCOUNT_CEZAR_ACCOUNT: &str = "0x90F79bf6EB2c4f870365E785982E1f101E93b906"; const PREV_CERTIFICATE_ID_1: CertificateId = CERTIFICATE_ID_4; const PREV_CERTIFICATE_ID_2: CertificateId = CERTIFICATE_ID_5; const CERTIFICATE_ID_1: CertificateId = CERTIFICATE_ID_6; const CERTIFICATE_ID_2: CertificateId = CERTIFICATE_ID_7; const CERTIFICATE_ID_3: CertificateId = CERTIFICATE_ID_8; const DEFAULT_GAS: u64 = 5_000_000; fn spawn_subnet_node( port: u32, block_time: u64, // Block time in seconds ) -> std::io::Result { // Ignore output, too verbose let child = Command::new("anvil") .args([ "--block-time", &block_time.to_string(), "--port", &port.to_string(), ]) .stdout(std::process::Stdio::null()) .spawn(); child } #[allow(dead_code)] struct Context { pub i_topos_core: abi::IToposCoreClient, pub i_topos_messaging: abi::IToposMessagingClient, pub i_erc20_messaging: abi::IERC20MessagingClient, pub subnet_node_handle: Option, pub port: u32, } impl Context { pub async fn shutdown(mut self) -> Result<(), Box> { // Wait for the subnet node to close self.subnet_node_handle .take() .expect("Valid subnet node handle") .kill() .expect("Could not kill anvil subprocess"); Ok(()) } pub fn jsonrpc(&self) -> String { format!("http://127.0.0.1:{}", self.port) } pub fn jsonrpc_ws(&self) -> String { format!("ws://127.0.0.1:{}", self.port) } } impl Drop for Context { fn drop(&mut self) { if let Some(mut child) = self.subnet_node_handle.take() { child.kill().expect("Could not kill anvil subprocess"); } } } async fn create_new_erc20msg_client( deploy_key: &str, endpoint: &str, erc20_messaging_contract_address: Address, ) -> Result> { let wallet: LocalWallet = deploy_key.parse()?; let http_provider = Provider::::try_from(endpoint)?.interval(std::time::Duration::from_millis(20u64)); let chain_id = http_provider.get_chainid().await?; let client = Arc::new(SignerMiddleware::new( http_provider, wallet.with_chain_id(chain_id.as_u64()), )); Ok(abi::IERC20Messaging::new( erc20_messaging_contract_address, client, )) } async fn create_new_erc20_client( deploy_key: &str, endpoint: &str, erc20_contract_address: Address, ) -> Result> { let wallet: LocalWallet = deploy_key.parse()?; let http_provider = Provider::::try_from(endpoint)?.interval(std::time::Duration::from_millis(20u64)); let chain_id = http_provider.get_chainid().await?; let client = Arc::new(SignerMiddleware::new( http_provider, wallet.with_chain_id(chain_id.as_u64()), )); let i_erc20 = abi::IERC20::new(erc20_contract_address, client); Ok(i_erc20) } async fn deploy_contracts( deploy_key: &str, endpoint: &str, ) -> Result< ( abi::IToposCoreClient, abi::IToposMessagingClient, abi::IERC20MessagingClient, ), Box, > { use ethers::abi::Token; let wallet: LocalWallet = deploy_key.parse()?; let http_provider = Provider::::try_from(endpoint)?.interval(std::time::Duration::from_millis(20u64)); let chain_id = http_provider.get_chainid().await?; let wallet_account = wallet.address(); let client = Arc::new(SignerMiddleware::new( http_provider, wallet.with_chain_id(chain_id.as_u64()), )); // Deploying contracts info!("Deploying TokenDeployer contract..."); let token_deployer_contract = abi::TokenDeployerContract::deploy(client.clone(), ())? .gas(DEFAULT_GAS) .chain_id(chain_id.as_u64()) .legacy() .send() .await?; info!( "TokenDeployer contract deployed to 0x{:x}", token_deployer_contract.address() ); info!("Deploying ToposCore contract..."); let topos_core_contract = abi::ToposCoreContract::deploy(client.clone(), ())? .gas(DEFAULT_GAS) .chain_id(chain_id.as_u64()) .legacy() .send() .await?; info!( "ToposCore contract deployed to 0x{:x}", topos_core_contract.address() ); let topos_core_contact_address: Token = Token::Address(topos_core_contract.address()); let admin_account = vec![wallet_account]; let new_admin_threshold = U256::from(1); info!("Deploying ToposCoreProxy contract..."); let topos_core_proxy_contract = abi::ToposCoreProxyContract::deploy(client.clone(), topos_core_contact_address)? .gas(DEFAULT_GAS) .chain_id(chain_id.as_u64()) .legacy() .send() .await?; info!( "ToposCoreProxy contract deployed to 0x{:x}", topos_core_proxy_contract.address() ); let i_topos_core = abi::IToposCore::new(topos_core_proxy_contract.address(), client.clone()); if let Err(e) = i_topos_core .initialize(admin_account, new_admin_threshold) .legacy() .gas(DEFAULT_GAS) .send() .await .map_err(|e| { error!("Unable to initalize topos core contract: {e}"); e })? .await { panic!("Error setting network subnet id: {e}"); } info!("Deploying ERC20Messaging contract..."); let erc20_messaging_contract = abi::ERC20MessagingContract::deploy( client.clone(), ( token_deployer_contract.address(), topos_core_proxy_contract.address(), ), )? .gas(DEFAULT_GAS) .chain_id(chain_id.as_u64()) .legacy() .send() .await?; info!( "ERC20 contract deployed to 0x{:x}", erc20_messaging_contract.address() ); let i_topos_messaging = abi::IToposMessaging::new(erc20_messaging_contract.address(), client.clone()); let i_erc20_messaging = abi::IERC20Messaging::new(erc20_messaging_contract.address(), client); // Set network subnet id info!( "Updating new contract subnet network id to {}", SOURCE_SUBNET_ID_1.to_string() ); if let Err(e) = i_topos_core .set_network_subnet_id(SOURCE_SUBNET_ID_1.as_array().to_owned()) .legacy() .gas(DEFAULT_GAS) .send() .await .map_err(|e| { error!("Unable to set network id: {e}"); e })? .await { panic!("Error setting network subnet id: {e}"); } match i_topos_core.network_subnet_id().await { Ok(subnet_id) => { info!("Network subnet id {:?} successfully set", subnet_id); } Err(e) => { error!("Error retrieving subnet id: {e}"); } } Ok((i_topos_core, i_topos_messaging, i_erc20_messaging)) } async fn deploy_test_token( deploy_key: &str, endpoint: &str, topos_messaging_address: Address, ) -> Result> { use ethers::abi::Token; let wallet: LocalWallet = deploy_key.parse()?; let http_provider = Provider::::try_from(endpoint)?.interval(std::time::Duration::from_millis(20u64)); let chain_id = http_provider.get_chainid().await?; let client = Arc::new(SignerMiddleware::new( http_provider, wallet.with_chain_id(chain_id.as_u64()), )); let ierc20_messaging = abi::IERC20Messaging::new(topos_messaging_address, client.clone()); // Deploy token let token_name: Token = Token::String("Test Token".to_string()); let token_symbol: Token = Token::String(TOKEN_SYMBOL.to_string()); let token_mint_cap: Token = Token::Uint(U256::from(100_000_000)); let token_daily_mint_limit: Token = Token::Uint(U256::from(100)); let token_initial_supply: Token = Token::Uint(U256::from(10_000_000)); let token_encoded_params: ethers::types::Bytes = ethers::abi::encode(&[ token_name.clone(), token_symbol.clone(), token_mint_cap, token_daily_mint_limit, token_initial_supply, ]) .into(); info!( "Deploying new token {} with symbol {}", token_name, token_symbol ); let deploy_outcome = ierc20_messaging .deploy_token(token_encoded_params) .legacy() .gas(DEFAULT_GAS) .send() .await .map_err(|e| { error!("Unable deploy token: {e}"); e })? .await; match deploy_outcome { Ok(r) => { info!("Token deployed: {:?}", r); } Err(e) => { panic!("Error deploying token: {e}"); } } let events = ierc20_messaging .event::() .from_block(0); let events = events.query().await?; if events.is_empty() { panic!( "Missing TokenDeployed event. Token contract is not deployed to test subnet. Could \ not execute test" ); } let token_address = events[0].token_address; info!("Token contract deployed to {}", token_address.to_string()); let i_erc20 = abi::IERC20Client::new(token_address, client); Ok(i_erc20) } async fn check_received_certificate( mut runtime_proxy_worker: SubnetRuntimeProxyWorker, received_certificates: Arc>>, expected_block_numbers: Vec, expected_blocks: Vec>, ) -> Result<(), Box> { let start_height = *expected_block_numbers.first().unwrap(); let end_height = *expected_block_numbers.last().unwrap(); while let Ok(event) = runtime_proxy_worker.next_event().await { if let SubnetRuntimeProxyEvent::NewCertificate { cert, block_number, ctx: _, } = event { info!( "New certificate event received, block number: {} cert id: {} target subnets: \ {:?} state root {}", block_number, cert.id, cert.target_subnets, hex::encode(cert.state_root) ); let mut received_certificates = received_certificates.lock().await; received_certificates.push((block_number, *cert)); if received_certificates .iter() .take(end_height as usize + 1) .map(|(height, _cert)| height) .copied() .collect::>() == expected_block_numbers { info!( "Received all certificates for blocks from {} to {}", start_height, end_height ); // Check if state root matches for all blocks for expected_height in expected_block_numbers { let index = (expected_height - start_height) as usize; let received_certificate = &received_certificates[index].1; if expected_blocks[index].state_root != ethers::types::TxHash(received_certificate.state_root) { error!( "State root mismatch, block: {:#?}\n received certificate: {:#?}", expected_blocks[index], received_certificates[index].1 ); panic!("State root mismatch"); } } info!( "State root check successfully passed for blocks from {} to {}", start_height, end_height ); return Ok::<(), Box>(()); } } } panic!("Expected event not received"); } #[fixture] async fn context_running_subnet_node( #[default(8545)] port: u32, #[default(STANDALONE_SUBNET_BLOCK_TIME)] block_time: u64, ) -> Context { info!( "Starting subnet node on port {}, block time: {}s", port, block_time ); let subnet_node_handle = match spawn_subnet_node(port, block_time) { Ok(subnet_node_handle) => subnet_node_handle, Err(e) => { if e.kind() == std::io::ErrorKind::NotFound { panic!( "Could not find Anvil binary. Please install and add to path Foundry tools \ including Anvil" ); } else { panic!("Failed to start the Anvil subnet node as part of test context: {e}"); } } }; // Wait a bit for anvil subprocess to spin itself up tokio::time::sleep(tokio::time::Duration::from_secs(3)).await; info!("Subnet node started..."); // Deploy contracts let json_rpc_endpoint = format!("http://127.0.0.1:{port}"); match deploy_contracts(TEST_SECRET_ETHEREUM_KEY, &json_rpc_endpoint).await { Ok((i_topos_core, i_topos_messaging, i_erc20_messaging)) => { info!("Contracts successfully deployed"); // Context with subnet container working in the background and ready deployed contracts Context { i_topos_core, i_topos_messaging, i_erc20_messaging, subnet_node_handle: Some(subnet_node_handle), port, } } Err(e) => { panic!("Unable to deploy contracts: {e}"); } } } // Test to start subnet and deploy subnet smart contract #[rstest] #[test(tokio::test)] #[serial] async fn test_subnet_node_contract_deployment( #[with(8544)] #[future] context_running_subnet_node: Context, ) -> Result<(), Box> { let context = context_running_subnet_node.await; info!("Subnet running in the background with deployed contract"); context.shutdown().await?; info!("Subnet node test finished"); Ok(()) } // Test subnet client RPC connection to subnet #[rstest] #[test(tokio::test)] #[serial] async fn test_subnet_node_get_block_info( #[with(8545)] #[future] context_running_subnet_node: Context, ) -> Result<(), Box> { //Context with subnet let context = context_running_subnet_node.await; match topos_sequencer_subnet_client::SubnetClientListener::new( &context.jsonrpc_ws(), &("0x".to_string() + &hex::encode(context.i_topos_core.address())), ) .await { Ok(mut subnet_client) => match subnet_client.get_finalized_block(6).await { Ok(block_info) => { info!( "Block info successfully retrieved for block {}", block_info.number ); // Blocks must have been mined while we deployed contracts assert!(block_info.number == 6); } Err(e) => { panic!("Error getting next finalized block: {e}"); } }, Err(e) => { panic!("Unable to get block info, error {e}"); } } context.shutdown().await?; info!("Subnet node test finished"); Ok(()) } // Test runtime initialization #[rstest] #[test(tokio::test)] #[serial] async fn test_create_runtime() -> Result<(), Box> { let test_private_key = generate_test_private_key(); info!("Creating runtime proxy..."); let runtime_proxy_worker = SubnetRuntimeProxyWorker::new( SubnetRuntimeProxyConfig { subnet_id: SOURCE_SUBNET_ID_1, http_endpoint: format!("http://localhost:{SUBNET_RPC_PORT}"), ws_endpoint: format!("ws://localhost:{SUBNET_RPC_PORT}"), subnet_contract_address: "0x0000000000000000000000000000000000000000".to_string(), verifier: 0, source_head_certificate_id: None, start_block: None, }, test_private_key, ) .await?; let runtime_proxy = topos_sequencer_subnet_runtime::testing::get_runtime(&runtime_proxy_worker); let runtime_proxy = runtime_proxy.lock().await; info!("New runtime proxy created:{:?}", &runtime_proxy); Ok(()) } // Test push certificate to subnet smart contract #[rstest] #[test(tokio::test)] #[serial] async fn test_subnet_certificate_push_call( #[with(8546)] #[future] context_running_subnet_node: Context, ) -> Result<(), Box> { let context = context_running_subnet_node.await; let test_private_key = generate_test_private_key(); let admin_key = hex::decode(TEST_SECRET_ETHEREUM_KEY).unwrap(); let subnet_smart_contract_address = "0x".to_string() + &hex::encode(context.i_topos_core.address()); let runtime_proxy_worker = SubnetRuntimeProxyWorker::new( SubnetRuntimeProxyConfig { subnet_id: SOURCE_SUBNET_ID_1, http_endpoint: context.jsonrpc(), ws_endpoint: context.jsonrpc_ws(), subnet_contract_address: subnet_smart_contract_address.clone(), verifier: 0, source_head_certificate_id: None, start_block: None, }, admin_key.clone(), ) .await?; let source_subnet_id_1 = topos_crypto::keys::derive_public_key(test_private_key.as_slice()).unwrap(); let mut certs = Vec::new(); let new_cert = |id, prev_id| { let mut mock_cert = Certificate { source_subnet_id: SubnetId::from_array( TryInto::<[u8; SUBNET_ID_LENGTH]>::try_into(&source_subnet_id_1[1..33]).unwrap(), ), id, prev_id, target_subnets: vec![SOURCE_SUBNET_ID_1], receipts_root_hash: *id.as_array(), // just to have different receipt root ..Default::default() }; mock_cert .update_signature(test_private_key.as_slice()) .expect("valid signature update"); mock_cert }; certs.push(new_cert(CERTIFICATE_ID_1, PREV_CERTIFICATE_ID_1)); certs.push(new_cert(CERTIFICATE_ID_2, PREV_CERTIFICATE_ID_2)); certs.push(new_cert(CERTIFICATE_ID_15, CERTIFICATE_ID_14)); info!("Sending mock certificate to subnet smart contract..."); // Multiple push for (idx, mock_cert) in certs.iter().enumerate() { info!( "Push #{idx} for the Certificate: {:?}, Receipt root: {:?}", mock_cert.id, mock_cert.receipts_root_hash ); if let Err(e) = runtime_proxy_worker .eval(SubnetRuntimeProxyCommand::OnNewDeliveredCertificate { certificate: mock_cert.clone(), position: idx as u64, ctx: Span::current().context(), }) .await { error!("Failed to send OnNewDeliveredTxns command: {}", e); return Err(Box::from(e)); } } info!("Waiting for CrossSubnetMessageSent event"); tokio::time::sleep(tokio::time::Duration::from_secs(15)).await; let provider = Provider::::try_from(format!("http://127.0.0.1:{}", context.port))?; let client = Arc::new(provider); let filter = Filter::new() .address(context.i_topos_core.address()) .event("CertStored(bytes32,bytes32)") .from_block(0); let logs = client.get_logs(&filter).await?; info!("ALL LOGS: {:?}", logs); let expected_logs = certs .iter() .map(|c| { let mut log = c.id.as_array().to_vec(); log.extend_from_slice(&c.receipts_root_hash); log }) .collect::>(); assert_eq!( logs.len(), expected_logs.len(), "should have as much logs as pushed Certificates" ); for log in logs { info!( "CrossSubnetMessageSent received: block number {:?} from contract {}", log.block_number, log.address ); assert_eq!(hex::encode(log.address), subnet_smart_contract_address[2..]); assert!( expected_logs.iter().any(|l| *l == log.data.0), "discrepencies in the logs" ); } info!("Shutting down context..."); context.shutdown().await?; Ok(()) } // Test get last checkpoints from subnet smart contract #[rstest] #[test(tokio::test)] #[serial] async fn test_subnet_certificate_get_checkpoints_call( #[with(8546)] #[future] context_running_subnet_node: Context, ) -> Result<(), Box> { use topos_core::api::grpc::checkpoints; let context = context_running_subnet_node.await; let subnet_smart_contract_address = "0x".to_string() + &hex::encode(context.i_topos_core.address()); let subnet_jsonrpc_http = context.jsonrpc(); // Get checkpoints when contract is empty let subnet_client = topos_sequencer_subnet_client::SubnetClient::new( &subnet_jsonrpc_http, Some(hex::decode(TEST_SECRET_ETHEREUM_KEY).unwrap()), &subnet_smart_contract_address, ) .await .expect("Valid subnet client"); let target_stream_positions = match subnet_client.get_checkpoints(&TARGET_SUBNET_ID_1).await { Ok(result) => result, Err(e) => { panic!("Unable to get latest certificate id and position: {e}"); } }; assert_eq!( target_stream_positions, Vec::::new() ); let test_certificates = vec![ ( Certificate { source_subnet_id: SOURCE_SUBNET_ID_1, id: CERTIFICATE_ID_1, prev_id: PREV_CERTIFICATE_ID_1, target_subnets: vec![TARGET_SUBNET_ID_1], ..Default::default() }, 0, ), ( Certificate { source_subnet_id: SOURCE_SUBNET_ID_2, id: CERTIFICATE_ID_2, prev_id: PREV_CERTIFICATE_ID_2, target_subnets: vec![TARGET_SUBNET_ID_1], ..Default::default() }, 0, ), ( Certificate { source_subnet_id: SOURCE_SUBNET_ID_1, id: CERTIFICATE_ID_3, prev_id: CERTIFICATE_ID_1, target_subnets: vec![TARGET_SUBNET_ID_1], ..Default::default() }, 1, ), ]; for (test_cert, test_cert_position) in test_certificates.iter() { info!("Pushing certificate id={}", test_cert.id); match subnet_client .push_certificate(test_cert, *test_cert_position as u64) .await { Ok(_) => { info!("Certificate id={} pushed", test_cert.id); } Err(e) => { panic!("Unable to push certificate: {e}"); } } } info!("Getting latest checkpoints "); let target_stream_positions = match subnet_client.get_checkpoints(&TARGET_SUBNET_ID_1).await { Ok(result) => result, Err(e) => { panic!("Unable to get the latest certificate id and position: {e}"); } }; let expected_positions = vec![ TargetStreamPosition { target_subnet_id: TARGET_SUBNET_ID_1, source_subnet_id: SOURCE_SUBNET_ID_1, certificate_id: Some(CERTIFICATE_ID_3), position: 1, }, TargetStreamPosition { target_subnet_id: TARGET_SUBNET_ID_1, source_subnet_id: SOURCE_SUBNET_ID_2, certificate_id: Some(CERTIFICATE_ID_2), position: 0, }, ] .into_iter() .collect::>(); assert_eq!( target_stream_positions .into_iter() .collect::>(), expected_positions ); info!("Shutting down context..."); context.shutdown().await?; Ok(()) } // Test get subnet id from subnet smart contract #[rstest] #[test(tokio::test)] #[serial] async fn test_subnet_id_call( #[with(8546)] #[future] context_running_subnet_node: Context, ) -> Result<(), Box> { let context = context_running_subnet_node.await; let subnet_smart_contract_address = "0x".to_string() + &hex::encode(context.i_topos_core.address()); let subnet_jsonrpc_http = context.jsonrpc(); // Create subnet client let subnet_client = topos_sequencer_subnet_client::SubnetClient::new( &subnet_jsonrpc_http, Some(hex::decode(TEST_SECRET_ETHEREUM_KEY).unwrap()), &subnet_smart_contract_address, ) .await .expect("Valid subnet client"); // Get subnet id let retrieved_subnet_id = match subnet_client.get_subnet_id().await { Ok(result) => { info!("Retrieved subnet id {result}"); result } Err(e) => { panic!("Unable to get subnet id: {e}"); } }; let expected_subnet_id: SubnetId = hex::decode(TEST_SUBNET_ID) .unwrap() .as_slice() .try_into() .unwrap(); assert_eq!(retrieved_subnet_id, expected_subnet_id); info!("Shutting down context..."); context.shutdown().await?; Ok(()) } // Test perform send token and check for transaction // in the certificate (by observing target subnets) #[rstest] #[test(tokio::test)] #[serial] async fn test_subnet_send_token_processing( #[with(8546)] #[future] context_running_subnet_node: Context, ) -> Result<(), Box> { let context = context_running_subnet_node.await; let test_private_key = hex::decode(TEST_SECRET_ETHEREUM_KEY).unwrap(); let subnet_jsonrpc_http = context.jsonrpc(); let subnet_smart_contract_address = "0x".to_string() + &hex::encode(context.i_topos_core.address()); // Create runtime proxy worker info!("Creating subnet runtime proxy"); let mut runtime_proxy_worker = SubnetRuntimeProxyWorker::new( SubnetRuntimeProxyConfig { subnet_id: SOURCE_SUBNET_ID_1, http_endpoint: context.jsonrpc(), ws_endpoint: context.jsonrpc_ws(), subnet_contract_address: subnet_smart_contract_address.clone(), verifier: 0, source_head_certificate_id: None, start_block: None, }, test_private_key.clone(), ) .await?; tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; info!("Set source head certificate to 0"); if let Err(e) = runtime_proxy_worker .set_source_head_certificate_id(Some((CERTIFICATE_ID_1, 0))) .await { panic!("Unable to set source head certificate id: {e}"); } // Deploy token contract let i_erc20 = deploy_test_token( &hex::encode(&test_private_key), &subnet_jsonrpc_http, context.i_topos_messaging.address(), ) .await?; // Approve token spending if let Err(e) = i_erc20 .approve(context.i_topos_messaging.address(), U256::from(10)) .legacy() .gas(DEFAULT_GAS) .send() .await? .await { panic!("Unable to perform token approval {e}"); } // Perform send token info!("Sending token"); if let Err(e) = context .i_erc20_messaging .send_token( TARGET_SUBNET_ID_2.into(), TOKEN_SYMBOL.into(), "00000000000000000000000000000000000000AA".parse()?, U256::from(2), ) .legacy() .gas(DEFAULT_GAS) .send() .await? .await { panic!("Unable to send token: {e}"); }; info!("Waiting for certificate with send token transaction..."); let assertion = async move { while let Ok(event) = runtime_proxy_worker.next_event().await { if let SubnetRuntimeProxyEvent::NewCertificate { cert, block_number: _, ctx: _, } = event { info!( "New certificate event received, cert id: {} target subnets: {:?}", cert.id, cert.target_subnets ); if cert.target_subnets.len() == 1 && cert.target_subnets == vec![TARGET_SUBNET_ID_2] { info!( "Received certificate with requested target subnet {}", cert.target_subnets[0] ); return Ok::<(), Box>(()); } } } panic!("Expected event not received"); }; // Set big timeout to prevent flaky fails. Instead fail/panic early in the test to indicate actual error if tokio::time::timeout(std::time::Duration::from_secs(10), assertion) .await .is_err() { panic!("Timeout waiting for command"); } info!("Shutting down context..."); context.shutdown().await?; Ok(()) } // Test sync of blocks and generating certificates from genesis block // and test sync from particular source head received from tce #[rstest] #[test(tokio::test)] #[timeout(std::time::Duration::from_secs(600))] #[serial] async fn test_sync_from_genesis_and_particular_source_head( #[with(8546)] #[future] context_running_subnet_node: Context, ) -> Result<(), Box> { let context = context_running_subnet_node.await; let test_private_key = hex::decode(TEST_SECRET_ETHEREUM_KEY).unwrap(); let subnet_jsonrpc_http = context.jsonrpc(); let subnet_smart_contract_address = "0x".to_string() + &hex::encode(context.i_topos_core.address()); // Wait for some time to simulate network history tokio::time::sleep(tokio::time::Duration::from_secs(10)).await; // Get block height let http_provider = Provider::::try_from(subnet_jsonrpc_http.clone())? .interval(std::time::Duration::from_millis(20u64)); let subnet_height = http_provider.get_block_number().await?.as_u64(); // Create runtime proxy worker info!("Creating subnet runtime proxy"); let runtime_proxy_worker = SubnetRuntimeProxyWorker::new( SubnetRuntimeProxyConfig { subnet_id: SOURCE_SUBNET_ID_1, http_endpoint: context.jsonrpc(), ws_endpoint: context.jsonrpc_ws(), subnet_contract_address: subnet_smart_contract_address.clone(), verifier: 0, source_head_certificate_id: None, start_block: None, }, test_private_key.clone(), ) .await?; tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; info!("Manually set source head certificate to 0 as TCE is not available"); if let Err(e) = runtime_proxy_worker .set_source_head_certificate_id(None) .await { panic!("Unable to set source head certificate id: {e}"); } info!("Waiting for the certificates from zero until height {subnet_height}..."); let received_certificates = Arc::new(Mutex::new(Vec::new())); // Test sync from genesis block { let expected_block_numbers = (0..=subnet_height).collect::>(); let mut expected_blocks = Vec::new(); for height in &expected_block_numbers { match http_provider.get_block(*height).await { Ok(block_info) => expected_blocks.push(block_info.expect("valid block")), Err(e) => { panic!("Unable to get block number {}: {}", height, e); } } } let received_certificates = received_certificates.clone(); // Set big timeout to prevent flaky fails. Instead fail/panic early in the test to indicate actual error if tokio::time::timeout( std::time::Duration::from_secs(60), check_received_certificate( runtime_proxy_worker, received_certificates, expected_block_numbers, expected_blocks, ), ) .await .is_err() { panic!("Timeout waiting for command"); } } //--------------------------------------------------------------------- // Now, instantiate new subnet runtime and sync from known position to // test sync from particular point //--------------------------------------------------------------------- // // Get block height let http_provider = Provider::::try_from(subnet_jsonrpc_http)? .interval(std::time::Duration::from_millis(20u64)); let subnet_height = http_provider.get_block_number().await?.as_u64(); const SYNC_START_BLOCK_NUMBER: u64 = 11; // Create second runtime proxy worker info!("Creating second subnet runtime proxy worker"); let runtime_proxy_worker_2 = SubnetRuntimeProxyWorker::new( SubnetRuntimeProxyConfig { subnet_id: SOURCE_SUBNET_ID_1, http_endpoint: context.jsonrpc(), ws_endpoint: context.jsonrpc_ws(), subnet_contract_address: subnet_smart_contract_address.clone(), verifier: 0, source_head_certificate_id: None, start_block: None, }, test_private_key.clone(), ) .await?; tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; info!( "Manually set source head certificate to certificate from block {}", SYNC_START_BLOCK_NUMBER ); let last_certificate_retrieved = received_certificates.lock().await[SYNC_START_BLOCK_NUMBER as usize - 1].clone(); received_certificates.lock().await.clear(); if let Err(e) = runtime_proxy_worker_2 .set_source_head_certificate_id(Some(( last_certificate_retrieved.1.id, last_certificate_retrieved.0, ))) .await { panic!("Unable to set source head certificate id: {e}"); } // Test sync from 11 block { let expected_block_numbers = (SYNC_START_BLOCK_NUMBER..=subnet_height).collect::>(); let mut expected_blocks = Vec::new(); for height in &expected_block_numbers { match http_provider.get_block(*height).await { Ok(block_info) => expected_blocks.push(block_info.expect("valid block")), Err(e) => { panic!("Unable to get block number {}: {}", height, e); } } } let received_certificates = Arc::new(Mutex::new(Vec::new())); // Set big timeout to prevent flaky fails. Instead fail/panic early in the test to indicate actual error if tokio::time::timeout( std::time::Duration::from_secs(60), check_received_certificate( runtime_proxy_worker_2, received_certificates, expected_block_numbers, expected_blocks, ), ) .await .is_err() { panic!("Timeout waiting for command"); } } info!("Shutting down context..."); context.shutdown().await?; Ok(()) } // Test sync of blocks and generating certificates start block parameter #[rstest] #[test(tokio::test)] #[timeout(std::time::Duration::from_secs(600))] #[serial] async fn test_sync_from_start_block( #[with(8546)] #[future] context_running_subnet_node: Context, ) -> Result<(), Box> { let context = context_running_subnet_node.await; let test_private_key = hex::decode(TEST_SECRET_ETHEREUM_KEY).unwrap(); let subnet_jsonrpc_http = context.jsonrpc(); let subnet_smart_contract_address = "0x".to_string() + &hex::encode(context.i_topos_core.address()); // Wait for some time to simulate network history tokio::time::sleep(tokio::time::Duration::from_secs(10)).await; // Get block height let http_provider = Provider::::try_from(subnet_jsonrpc_http.clone())? .interval(std::time::Duration::from_millis(20u64)); let subnet_height = http_provider.get_block_number().await?.as_u64(); // Define start block as current subnet height reduced by 5 let start_block: u64 = subnet_height - 5; // Create runtime proxy worker info!("Creating subnet runtime proxy"); let runtime_proxy_worker = SubnetRuntimeProxyWorker::new( SubnetRuntimeProxyConfig { subnet_id: SOURCE_SUBNET_ID_1, http_endpoint: context.jsonrpc(), ws_endpoint: context.jsonrpc_ws(), subnet_contract_address: subnet_smart_contract_address.clone(), verifier: 0, source_head_certificate_id: None, start_block: Some(start_block), }, test_private_key.clone(), ) .await?; tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; info!("Manually set source head certificate to 0 as TCE is not available"); if let Err(e) = runtime_proxy_worker .set_source_head_certificate_id(None) .await { panic!("Unable to set source head certificate id: {e}"); } info!( "Syncing from the start block {} to current height {}", start_block, subnet_height ); let received_certificates = Arc::new(Mutex::new(Vec::new())); // Test sync from start block { let expected_block_numbers = (start_block..=subnet_height).collect::>(); let mut expected_blocks = Vec::new(); for height in &expected_block_numbers { match http_provider.get_block(*height).await { Ok(block_info) => expected_blocks.push(block_info.expect("valid block")), Err(e) => { panic!("Unable to get block number {}: {}", height, e); } } } let received_certificates = received_certificates.clone(); // Set big timeout to prevent flaky fails. Instead fail/panic early in the test to indicate actual error if tokio::time::timeout( std::time::Duration::from_secs(60), check_received_certificate( runtime_proxy_worker, received_certificates, expected_block_numbers, expected_blocks, ), ) .await .is_err() { panic!("Timeout waiting for command"); } } info!("Shutting down context..."); context.shutdown().await?; Ok(()) } // Test multiple send token events in a block // Test is slow, block time is 12 seconds #[rstest] #[test(tokio::test)] #[timeout(std::time::Duration::from_secs(600))] #[serial] async fn test_subnet_multiple_send_token_in_a_block( #[with(8546, STANDALONE_SUBNET_WITH_LONG_BLOCKS_BLOCK_TIME)] #[future] context_running_subnet_node: Context, ) -> Result<(), Box> { let context = context_running_subnet_node.await; let test_private_key = hex::decode(TEST_SECRET_ETHEREUM_KEY).unwrap(); let subnet_jsonrpc_http = context.jsonrpc(); let subnet_smart_contract_address = "0x".to_string() + &hex::encode(context.i_topos_core.address()); let number_of_send_token_transactions: usize = 4; warn!("Block time is intentionally long, this is slow test..."); // Create runtime proxy worker info!("Creating subnet runtime proxy"); let mut runtime_proxy_worker = SubnetRuntimeProxyWorker::new( SubnetRuntimeProxyConfig { subnet_id: SOURCE_SUBNET_ID_1, http_endpoint: context.jsonrpc(), ws_endpoint: context.jsonrpc_ws(), subnet_contract_address: subnet_smart_contract_address.clone(), verifier: 0, source_head_certificate_id: None, start_block: None, }, test_private_key.clone(), ) .await?; tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; info!("Set source head certificate to 0"); if let Err(e) = runtime_proxy_worker .set_source_head_certificate_id(Some((CERTIFICATE_ID_1, 0))) .await { panic!("Unable to set source head certificate id: {e}"); } // Deploy token contract let i_erc20 = deploy_test_token( &hex::encode(&test_private_key), &subnet_jsonrpc_http, context.i_topos_messaging.address(), ) .await?; info!("Reading balance of the main account..."); match i_erc20.balance_of(TEST_ETHEREUM_ACCOUNT.parse()?).await { Ok(balance) => { info!("Balance of admin account is {:?}", balance); } Err(e) => { error!("Unable to read balance {e}"); } } // Send token to other addresses let test_accounts: Vec<_> = vec![ TEST_ACCOUNT_ALITH_ACCOUNT.parse()?, TEST_ACCOUNT_BALATHAR_ACCOUNT.parse()?, TEST_ACCOUNT_CEZAR_ACCOUNT.parse()?, ]; info!("Transferring tokens to {} accounts", test_accounts.len()); for test_account in test_accounts { info!( "Transferring token to address {}", "0x".to_string() + &hex::encode(test_account) ); if let Err(e) = i_erc20 .transfer(test_account, U256::from(10)) .legacy() .gas(DEFAULT_GAS) .send() .await? .await { panic!("Unable to perform token transfer {e}"); } } info!("Tokens transferred"); let mut erc20_clients = vec![ create_new_erc20_client( TEST_SECRET_ETHEREUM_KEY, &subnet_jsonrpc_http, i_erc20.address(), ) .await .expect("Valid erc20 client"), create_new_erc20_client( TEST_ACCOUNT_ALITH_KEY, &subnet_jsonrpc_http, i_erc20.address(), ) .await .expect("Valid erc20 client"), create_new_erc20_client( TEST_ACCOUNT_BALATHAR_KEY, &subnet_jsonrpc_http, i_erc20.address(), ) .await .expect("Valid erc20 client"), create_new_erc20_client( TEST_ACCOUNT_CEZAR_KEY, &subnet_jsonrpc_http, i_erc20.address(), ) .await .expect("Valid erc20 client"), ]; info!("Approve token spending"); for erc20_client in &mut erc20_clients { if let Err(e) = erc20_client .approve(context.i_topos_messaging.address(), U256::from(10)) .legacy() .gas(DEFAULT_GAS) .send() .await? .await { panic!("Unable to perform token approval {e}"); } else { info!("Token spending approved for {}", erc20_client.address()); } } info!("All token spending approved"); info!("Initializing multiple i_erc20_messaging subnet clients"); let mut target_subnets = vec![ ( TARGET_SUBNET_ID_5, create_new_erc20msg_client( TEST_SECRET_ETHEREUM_KEY, &subnet_jsonrpc_http, context.i_erc20_messaging.address(), ) .await .expect("Valid client"), ), ( TARGET_SUBNET_ID_4, create_new_erc20msg_client( TEST_ACCOUNT_ALITH_KEY, &subnet_jsonrpc_http, context.i_erc20_messaging.address(), ) .await .expect("Valid client"), ), ( TARGET_SUBNET_ID_3, create_new_erc20msg_client( TEST_ACCOUNT_BALATHAR_KEY, &subnet_jsonrpc_http, context.i_erc20_messaging.address(), ) .await .expect("Valid client"), ), ( TARGET_SUBNET_ID_2, create_new_erc20msg_client( TEST_ACCOUNT_CEZAR_KEY, &subnet_jsonrpc_http, context.i_erc20_messaging.address(), ) .await .expect("Valid client"), ), ]; // Perform multiple send token actions info!("Sending multiple transactions in parallel"); let mut handles = Vec::new(); for i in 1..=number_of_send_token_transactions { let (target_subnet, i_erc20_messaging) = target_subnets.pop().unwrap(); let i_erc20_messaging_address = i_erc20_messaging.address(); let handle = tokio::spawn(async move { info!( "Sending transaction {} to target subnet {} erc20 messaging account {}", i, &target_subnet, "0x".to_string() + &hex::encode(i_erc20_messaging_address) ); if let Err(e) = i_erc20_messaging .send_token( target_subnet.into(), TOKEN_SYMBOL.into(), "00000000000000000000000000000000000000AA".parse().unwrap(), U256::from(i), ) .legacy() .gas(DEFAULT_GAS) .send() .await .map_err(|e| { error!("Unable to send token, contract error: {e}"); }) .unwrap() .await { error!("Unable to send token {e}"); panic!("Unable to send token: {e}"); }; info!("Transaction {} sent", i); }); handles.push(handle); } for handle in handles { handle.await.expect("Send token task correctly finished"); } info!("All token transactions sent!"); info!("Waiting for certificate with send token transaction..."); let mut received_certificates = Vec::new(); let assertion = async move { while let Ok(event) = runtime_proxy_worker.next_event().await { if let SubnetRuntimeProxyEvent::NewCertificate { cert, block_number, ctx: _, } = event { info!( "New certificate event received, block number: {} cert id: {} target subnets: \ {:?}", block_number, cert.id, cert.target_subnets ); if !cert.target_subnets.is_empty() { received_certificates.push(cert); let target_subnets = received_certificates .iter() .flat_map(|c| c.target_subnets.iter()) .collect::>(); if target_subnets.len() == number_of_send_token_transactions { info!("Received all expected target subnets {:?}", target_subnets); return Ok::<(), Box>(()); } } } else { info!("Received subnet event: {:?}", event); } } panic!("Expected event not received"); }; // Set big timeout to prevent flaky failures. Instead fail/panic early in the test to indicate actual error if tokio::time::timeout(std::time::Duration::from_secs(120), assertion) .await .is_err() { panic!("Timeout waiting for command"); } info!("Shutting down context..."); context.shutdown().await?; Ok(()) } ================================================ FILE: crates/topos-tce/Cargo.toml ================================================ [package] name = "topos-tce" description = "TCE Node Service" version = "0.1.0" edition = "2021" rust-version = "1.65" [lints] workspace = true [dependencies] libp2p.workspace = true async-trait.workspace = true bincode.workspace = true clap.workspace = true hex.workspace = true futures.workspace = true opentelemetry.workspace = true prometheus-client.workspace = true prometheus.workspace = true serde.workspace = true thiserror.workspace = true tokio.workspace = true tokio-util.workspace = true tokio-stream.workspace = true topos-core.workspace = true tracing-attributes.workspace = true tracing-opentelemetry.workspace = true tracing-subscriber = { workspace = true, default-features = false, features = ["std", "env-filter", "fmt", "ansi"] } tracing.workspace = true tonic.workspace = true bytes.workspace = true prost.workspace = true topos-config = { path = "../topos-config" } topos-p2p = { path = "../topos-p2p" } topos-metrics = { path = "../topos-metrics" } topos-tce-api = { path = "../topos-tce-api"} topos-crypto = { path = "../topos-crypto" } topos-tce-broadcast = { path = "../topos-tce-broadcast" } topos-tce-gatekeeper = { path = "../topos-tce-gatekeeper" } topos-tce-storage = { package = "topos-tce-storage", path = "../topos-tce-storage" } topos-tce-synchronizer = { path = "../topos-tce-synchronizer" } topos-telemetry = { path = "../topos-telemetry" } axum = "0.7.4" axum-prometheus = "0.6" [dev-dependencies] topos-test-sdk = { path = "../topos-test-sdk/" } async-stream.workspace = true async-trait.workspace = true hyper.workspace = true libp2p.workspace = true rand.workspace = true rand_core.workspace = true rand_distr.workspace = true rstest.workspace = true tonic.workspace = true tracing-subscriber = { workspace = true, features = ["env-filter", "fmt"] } tracing.workspace = true test-log.workspace = true cucumber = "0.13.0" env_logger.workspace = true [features] default = [] log-json = ["tracing-subscriber/json"] ================================================ FILE: crates/topos-tce/src/app_context/api.rs ================================================ use crate::AppContext; use std::collections::HashMap; use topos_core::uci::{Certificate, SubnetId}; use topos_metrics::CERTIFICATE_DELIVERY_LATENCY; use topos_tce_api::RuntimeError; use topos_tce_api::RuntimeEvent as ApiEvent; use topos_tce_broadcast::DoubleEchoCommand; use topos_tce_storage::errors::{InternalStorageError, StorageError}; use topos_tce_storage::types::PendingResult; use tracing::debug; use tracing::{error, warn}; impl AppContext { pub async fn on_api_event(&mut self, event: ApiEvent) { match event { ApiEvent::CertificateSubmitted { certificate, sender, } => { self.delivery_latency .insert(certificate.id, CERTIFICATE_DELIVERY_LATENCY.start_timer()); _ = match self .validator_store .insert_pending_certificate(&certificate) .await { Ok(Some(pending_id)) => { let certificate_id = certificate.id; debug!( "Certificate {} from subnet {} has been inserted into pending pool", certificate_id, certificate.source_subnet_id ); if self .tce_cli .get_double_echo_channel() .send(DoubleEchoCommand::Broadcast { need_gossip: true, cert: *certificate, pending_id, }) .await .is_err() { error!( "Unable to send DoubleEchoCommand::Broadcast command to double \ echo for {}", certificate_id ); sender.send(Err(RuntimeError::CommunicationError( "Unable to send DoubleEchoCommand::Broadcast command to double \ echo" .to_string(), ))) } else { sender.send(Ok(PendingResult::InPending(pending_id))) } } Ok(None) => { debug!( "Certificate {} from subnet {} has been inserted into precedence pool \ waiting for {}", certificate.id, certificate.source_subnet_id, certificate.prev_id ); sender.send(Ok(PendingResult::AwaitPrecedence)) } Err(StorageError::InternalStorage( InternalStorageError::CertificateAlreadyPending, )) => { debug!( "Certificate {} has already been added to the pending pool, skipping", certificate.id ); sender.send(Ok(PendingResult::AlreadyPending)) } Err(StorageError::InternalStorage( InternalStorageError::CertificateAlreadyExists, )) => { debug!( "Certificate {} has already been delivered, skipping", certificate.id ); sender.send(Ok(PendingResult::AlreadyDelivered)) } Err(error) => { error!( "Unable to insert pending certificate {}: {}", certificate.id, error ); sender.send(Err(error.into())) } }; } ApiEvent::GetSourceHead { subnet_id, sender } => { // Get source head certificate let mut result = self .pending_storage .get_source_head(subnet_id) .await .and_then(|result| match result { None => Err(StorageError::InternalStorage( InternalStorageError::MissingHeadForSubnet(subnet_id), )), value => Ok(value), }) .map_err(|e| match e { StorageError::InternalStorage(internal) => { if let InternalStorageError::MissingHeadForSubnet(subnet_id) = internal { RuntimeError::UnknownSubnet(subnet_id) } else { RuntimeError::UnableToGetSourceHead(subnet_id, internal.to_string()) } } e => RuntimeError::UnableToGetSourceHead(subnet_id, e.to_string()), }); // TODO: Initial genesis certificate eventually will be fetched from the topos subnet // Currently, for subnet starting from scratch there are no certificates in the database // So for MissingHeadForSubnet error we will return some default dummy certificate if let Err(RuntimeError::UnknownSubnet(subnet_id)) = result { warn!("Returning dummy certificate as head certificate, to be fixed..."); result = Ok(Some(( 0, topos_core::uci::Certificate { prev_id: AppContext::DUMMY_INITIAL_CERTIFICATE_ID, source_subnet_id: subnet_id, state_root: Default::default(), tx_root_hash: Default::default(), receipts_root_hash: Default::default(), target_subnets: vec![], verifier: 0, id: AppContext::DUMMY_INITIAL_CERTIFICATE_ID, proof: Default::default(), signature: Default::default(), }, ))); }; _ = sender.send(result); } ApiEvent::GetLastPendingCertificates { mut subnet_ids, sender, } => { let mut last_pending_certificates: HashMap> = subnet_ids .iter() .map(|subnet_id| (*subnet_id, None)) .collect(); if let Ok(pending_certificates) = self.pending_storage.get_pending_certificates().await { // Count number of pending certificates for every subnet let mut indexes: HashMap = HashMap::new(); for (_pending_certificate_id, cert) in pending_certificates.iter() { *indexes.entry(cert.source_subnet_id).or_insert(0) += 1; } // Iterate through pending certificates and determine last one for every subnet // Last certificate in the subnet should be one with the highest index for (_pending_certificate_id, cert) in pending_certificates.into_iter().rev() { if let Some(subnet_id) = subnet_ids.take(&cert.source_subnet_id) { *last_pending_certificates.entry(subnet_id).or_insert(None) = Some((cert, indexes[&subnet_id])); } if subnet_ids.is_empty() { break; } } } // Add None pending certificate for any other requested subnet_id subnet_ids.iter().for_each(|subnet_id| { last_pending_certificates.insert(*subnet_id, None); }); _ = sender.send(Ok(last_pending_certificates)); } } } } ================================================ FILE: crates/topos-tce/src/app_context/network.rs ================================================ use prost::Message; use std::collections::hash_map; use topos_tce_storage::errors::{InternalStorageError, StorageError}; use tokio::spawn; use topos_metrics::CERTIFICATE_DELIVERY_LATENCY; use topos_p2p::Event as NetEvent; use topos_tce_broadcast::DoubleEchoCommand; use tracing::{debug, error, info, trace}; use topos_core::api::grpc::tce::v1::{double_echo_request, DoubleEchoRequest, Echo, Gossip, Ready}; use topos_core::uci; use crate::AppContext; impl AppContext { pub async fn on_net_event(&mut self, evt: NetEvent) { trace!( "on_net_event: peer: {} event {:?}", &self.network_client.local_peer_id, &evt ); if let NetEvent::Gossip { data, from } = evt { if let Ok(DoubleEchoRequest { request: Some(double_echo_request), }) = DoubleEchoRequest::decode(&data[..]) { match double_echo_request { double_echo_request::Request::Gossip(Gossip { certificate: Some(certificate), }) => match uci::Certificate::try_from(certificate) { Ok(cert) => { if let hash_map::Entry::Vacant(entry) = self.delivery_latency.entry(cert.id) { entry.insert(CERTIFICATE_DELIVERY_LATENCY.start_timer()); } info!( "Received certificate {} from GossipSub from {}", cert.id, from ); match self.validator_store.insert_pending_certificate(&cert).await { Ok(Some(pending_id)) => { let certificate_id = cert.id; debug!( "Certificate {} has been inserted into pending pool", certificate_id ); if self .tce_cli .get_double_echo_channel() .send(DoubleEchoCommand::Broadcast { need_gossip: false, cert, pending_id, }) .await .is_err() { error!( "Unable to send DoubleEchoCommand::Broadcast command \ to double echo for {}", certificate_id ); } } Ok(None) => { debug!( "Certificate {} from subnet {} has been inserted into \ precedence pool waiting for {}", cert.id, cert.source_subnet_id, cert.prev_id ); } Err(StorageError::InternalStorage( InternalStorageError::CertificateAlreadyPending, )) => { debug!( "Certificate {} has been already added to the pending \ pool, skipping", cert.id ); } Err(StorageError::InternalStorage( InternalStorageError::CertificateAlreadyExists, )) => { debug!( "Certificate {} has been already delivered, skipping", cert.id ); } Err(error) => { error!( "Unable to insert pending certificate {}: {}", cert.id, error ); } } } Err(e) => { error!("Failed to parse the received Certificate: {e}"); } }, double_echo_request::Request::Echo(Echo { certificate_id: Some(certificate_id), signature: Some(signature), validator_id: Some(validator_id), }) => { let channel = self.tce_cli.get_double_echo_channel(); spawn(async move { let certificate_id = certificate_id.clone().try_into().map_err(|e| { error!( "Failed to parse the CertificateId {certificate_id} from \ Echo: {e}" ); e }); let validator_id = validator_id.clone().try_into().map_err(|e| { error!( "Failed to parse the ValidatorId {validator_id} from Echo: {e}" ); e }); if let (Ok(certificate_id), Ok(validator_id)) = (certificate_id, validator_id) { trace!( "Received Echo message, certificate_id: {certificate_id}, \ validator_id: {validator_id} from: {from}", certificate_id = certificate_id, validator_id = validator_id ); if let Err(e) = channel .send(DoubleEchoCommand::Echo { signature: signature.into(), certificate_id, validator_id, }) .await { error!("Unable to pass received Echo message: {:?}", e); } } else { error!("Unable to process Echo message due to invalid data"); } }); } double_echo_request::Request::Ready(Ready { certificate_id: Some(certificate_id), signature: Some(signature), validator_id: Some(validator_id), }) => { let channel = self.tce_cli.get_double_echo_channel(); spawn(async move { let certificate_id = certificate_id.clone().try_into().map_err(|e| { error!( "Failed to parse the CertificateId {certificate_id} from \ Ready: {e}" ); e }); let validator_id = validator_id.clone().try_into().map_err(|e| { error!( "Failed to parse the ValidatorId {validator_id} from Ready: \ {e}" ); e }); if let (Ok(certificate_id), Ok(validator_id)) = (certificate_id, validator_id) { trace!( "Received Ready message, certificate_id: {certificate_id}, \ validator_id: {validator_id} from: {from}", certificate_id = certificate_id, validator_id = validator_id ); if let Err(e) = channel .send(DoubleEchoCommand::Ready { signature: signature.into(), certificate_id, validator_id, }) .await { error!("Unable to pass received Ready message: {:?}", e); } } else { error!("Unable to process Ready message due to invalid data"); } }); } _ => {} } } } } } ================================================ FILE: crates/topos-tce/src/app_context/protocol.rs ================================================ use topos_core::api::grpc::tce::v1::{double_echo_request, DoubleEchoRequest, Echo, Gossip, Ready}; use topos_tce_broadcast::event::ProtocolEvents; use tracing::{error, info, warn}; use crate::AppContext; impl AppContext { pub async fn on_protocol_event(&mut self, evt: ProtocolEvents) { match evt { ProtocolEvents::Broadcast { certificate_id } => { info!("Broadcasting certificate {}", certificate_id); } ProtocolEvents::Gossip { cert } => { let cert_id = cert.id; let request = DoubleEchoRequest { request: Some(double_echo_request::Request::Gossip(Gossip { certificate: Some(cert.into()), })), }; info!("Sending Gossip for certificate {}", cert_id); if let Err(e) = self .network_client .publish(topos_p2p::TOPOS_GOSSIP, request) .await { error!("Unable to send Gossip: {e}"); } } ProtocolEvents::Echo { certificate_id, signature, validator_id, } if self.is_validator => { // Send echo message let request = DoubleEchoRequest { request: Some(double_echo_request::Request::Echo(Echo { certificate_id: Some(certificate_id.into()), signature: Some(signature.into()), validator_id: Some(validator_id.into()), })), }; if let Err(e) = self .network_client .publish(topos_p2p::TOPOS_ECHO, request) .await { error!("Unable to send Echo: {e}"); } } ProtocolEvents::Ready { certificate_id, signature, validator_id, } if self.is_validator => { let request = DoubleEchoRequest { request: Some(double_echo_request::Request::Ready(Ready { certificate_id: Some(certificate_id.into()), signature: Some(signature.into()), validator_id: Some(validator_id.into()), })), }; if let Err(e) = self .network_client .publish(topos_p2p::TOPOS_READY, request) .await { error!("Unable to send Ready: {e}"); } } ProtocolEvents::BroadcastFailed { certificate_id } => { warn!("Broadcast failed for certificate {certificate_id}") } ProtocolEvents::AlreadyDelivered { certificate_id } => { info!("Certificate {certificate_id} already delivered") } _ => {} } } } ================================================ FILE: crates/topos-tce/src/app_context.rs ================================================ //! //! Application logic glue //! use crate::events::Events; use futures::{Stream, StreamExt}; use prometheus::HistogramTimer; use std::collections::HashMap; use std::sync::Arc; use tokio::sync::mpsc; use tokio_util::sync::CancellationToken; use topos_core::uci::CertificateId; use topos_metrics::CERTIFICATE_DELIVERED_TOTAL; use topos_p2p::{Event as NetEvent, NetworkClient}; use topos_tce_api::RuntimeClient as ApiClient; use topos_tce_api::RuntimeContext; use topos_tce_api::RuntimeEvent as ApiEvent; use topos_tce_broadcast::event::ProtocolEvents; use topos_tce_broadcast::ReliableBroadcastClient; use topos_tce_gatekeeper::GatekeeperClient; use topos_tce_storage::store::ReadStore; use topos_tce_storage::types::CertificateDeliveredWithPositions; use topos_tce_storage::validator::ValidatorStore; use topos_tce_storage::StorageClient; use topos_tce_synchronizer::SynchronizerEvent; use tracing::{error, info, warn}; mod api; mod network; pub(crate) mod protocol; /// Top-level transducer main app context & driver (alike) /// /// Implements <...Host> traits for network and Api, listens for protocol events in events /// (store is not active component). /// /// In the end we shall come to design where this struct receives /// config+data as input and runs app returning data as output /// pub struct AppContext { pub is_validator: bool, pub events: mpsc::Sender, pub tce_cli: ReliableBroadcastClient, pub network_client: NetworkClient, pub api_client: ApiClient, pub pending_storage: StorageClient, pub gatekeeper: GatekeeperClient, pub delivery_latency: HashMap, pub validator_store: Arc, pub api_context: RuntimeContext, } impl AppContext { // Default previous certificate id for first certificate in the subnet // TODO: Remove, it will be genesis certificate id retrieved from Topos Subnet const DUMMY_INITIAL_CERTIFICATE_ID: CertificateId = CertificateId::from_array([0u8; topos_core::uci::CERTIFICATE_ID_LENGTH]); /// Factory #[allow(clippy::too_many_arguments)] pub fn new( is_validator: bool, pending_storage: StorageClient, tce_cli: ReliableBroadcastClient, network_client: NetworkClient, api_client: ApiClient, gatekeeper: GatekeeperClient, validator_store: Arc, api_context: RuntimeContext, ) -> (Self, mpsc::Receiver) { let (events, receiver) = mpsc::channel(100); ( Self { is_validator, events, tce_cli, network_client, api_client, pending_storage, gatekeeper, delivery_latency: Default::default(), validator_store, api_context, }, receiver, ) } /// Main processing loop #[allow(clippy::too_many_arguments)] pub async fn run( mut self, mut network_stream: impl Stream + Unpin, mut tce_stream: impl Stream + Unpin, mut api_stream: impl Stream + Unpin, mut synchronizer_stream: impl Stream + Unpin, mut broadcast_stream: impl Stream + Unpin, shutdown: (CancellationToken, mpsc::Sender<()>), ) { loop { tokio::select! { Some(delivery) = broadcast_stream.next() => { let certificate_id = delivery.0.certificate.id; CERTIFICATE_DELIVERED_TOTAL.inc(); if let Some(timer) = self.delivery_latency.remove(&certificate_id) { let duration = timer.stop_and_record(); info!("Certificate {} delivered with total latency: {}s", certificate_id, duration); } } // protocol Some(evt) = tce_stream.next() => { self.on_protocol_event(evt).await; }, // network Some(net_evt) = network_stream.next() => { self.on_net_event(net_evt).await; } // api events Some(event) = api_stream.next() => { self.on_api_event(event).await; } // Synchronizer events Some(_event) = synchronizer_stream.next() => { } // Shutdown signal _ = shutdown.0.cancelled() => { info!("Shutting down TCE app context..."); if let Err(e) = self.shutdown().await { error!("Failed to shutdown the TCE app context: {e}"); } // Drop the sender to notify the TCE termination drop(shutdown.1); break; } } } warn!("Exiting main TCE app processing loop") } pub async fn shutdown(&mut self) -> Result<(), Box> { info!("Shutting down the TCE client..."); self.api_client.shutdown().await?; self.tce_cli.shutdown().await?; self.gatekeeper.shutdown().await?; self.network_client.shutdown().await?; let certificates_synced = self .validator_store .count_certificates_delivered() .map_err(|error| format!("Unable to count certificates delivered: {error}")) .unwrap(); let pending_certificates = self .validator_store .pending_pool_size() .map_err(|error| format!("Unable to count pending certificates: {error}")) .unwrap(); let precedence_pool_certificates = self .validator_store .precedence_pool_size() .map_err(|error| format!("Unable to count precedence pool certificates: {error}")) .unwrap(); info!( "Stopping with {} certificates delivered, {} pending certificates and {} certificates \ in the precedence pool", certificates_synced, pending_certificates, precedence_pool_certificates ); Ok(()) } } ================================================ FILE: crates/topos-tce/src/events.rs ================================================ #[derive(Debug)] pub enum Events { StableSample, } ================================================ FILE: crates/topos-tce/src/lib.rs ================================================ use futures::{Future, StreamExt}; use opentelemetry::global; use std::process::ExitStatus; use std::{future::IntoFuture, sync::Arc}; use tokio::{ spawn, sync::{broadcast, mpsc}, }; use tokio_stream::wrappers::BroadcastStream; use tokio_util::sync::CancellationToken; use topos_config::tce::TceConfig; use topos_core::api::grpc::tce::v1::synchronizer_service_server::SynchronizerServiceServer; use topos_crypto::{messages::MessageSigner, validator_id::ValidatorId}; use topos_p2p::{ utils::{local_key_pair, local_key_pair_from_slice}, GrpcContext, GrpcRouter, }; use topos_tce_broadcast::{ReliableBroadcastClient, ReliableBroadcastConfig}; use topos_tce_storage::{store::ReadStore, validator::ValidatorStore, StorageClient}; use topos_tce_synchronizer::SynchronizerService; use tracing::{debug, info, warn}; mod app_context; pub mod events; #[cfg(test)] mod tests; pub use app_context::AppContext; use topos_config::tce::{AuthKey, StorageConfiguration}; // TODO: Estimate on the max broadcast throughput, could need to be override by config const BROADCAST_CHANNEL_SIZE: usize = 10_000; pub async fn launch( config: &TceConfig, shutdown: (CancellationToken, mpsc::Sender<()>), ) -> Result> { let cancel = shutdown.0.clone(); let run_fut = run(config, shutdown); let app_context_run = tokio::select! { _ = cancel.cancelled() => { return Err(Box::from("Killed before readiness".to_string())); } result = run_fut => { match result { Ok(app_context_run)=> app_context_run, Err(error) => return Err(error) } } }; app_context_run.await; global::shutdown_tracer_provider(); Ok(ExitStatus::default()) } pub async fn run( config: &TceConfig, shutdown: (CancellationToken, mpsc::Sender<()>), ) -> Result, Box> { // Preboot phase - start topos_metrics::init_metrics(); let key = match config.auth_key.as_ref() { Some(AuthKey::Seed(seed)) => local_key_pair_from_slice(&seed[..]), Some(AuthKey::PrivateKey(pk)) => topos_p2p::utils::keypair_from_protobuf_encoding(&pk[..]), None => local_key_pair(None), }; let message_signer = match &config.signing_key { Some(AuthKey::PrivateKey(pk)) => Arc::new(MessageSigner::new(&pk[..])?), _ => return Err(Box::from("Error, no singing key".to_string())), }; let validator_id: ValidatorId = message_signer.public_address.into(); let public_address = validator_id.to_string(); warn!("Public node address: {public_address}"); let peer_id = key.public().to_peer_id(); warn!("I am {peer_id}"); tracing::Span::current().record("peer_id", &peer_id.to_string()); let mut boot_peers = config.boot_peers.clone(); // Remove myself from the bootnode list boot_peers.retain(|(p, _)| *p != peer_id); let is_validator = config.validators.contains(&validator_id); // Preboot phase - stop // Healthiness phase - start debug!("Starting the Storage"); let path = if let StorageConfiguration::RocksDB(Some(ref path)) = config.storage { path } else { return Err(Box::new(std::io::Error::new( std::io::ErrorKind::Other, format!("Unsupported storage type {:?}", config.storage), ))); }; let validator_store = ValidatorStore::new(path) .map_err(|error| format!("Unable to create validator store: {error}"))?; let fullnode_store = validator_store.fullnode_store(); let storage_client = StorageClient::new(validator_store.clone()); let certificates_synced = fullnode_store .count_certificates_delivered() .map_err(|error| format!("Unable to count certificates delivered: {error}"))?; let pending_certificates = validator_store .pending_pool_size() .map_err(|error| format!("Unable to count pending certificates: {error}"))?; let precedence_pool_certificates = validator_store .precedence_pool_size() .map_err(|error| format!("Unable to count precedence pool certificates: {error}"))?; info!( "Storage initialized with {} certificates delivered, {} pending certificates and {} \ certificates in the precedence pool", certificates_synced, pending_certificates, precedence_pool_certificates ); let grpc_context = GrpcContext::default().with_router( GrpcRouter::new(tonic::transport::Server::builder()).add_service( SynchronizerServiceServer::new(SynchronizerService { validator_store: validator_store.clone(), }), ), ); let (network_client, mut event_stream, network_runtime) = topos_p2p::network::builder() .peer_key(key) .listen_addresses(config.p2p.listen_addresses.clone()) .minimum_cluster_size(config.minimum_tce_cluster_size) .public_addresses(config.p2p.public_addresses.clone()) .known_peers(&boot_peers) .grpc_context(grpc_context) .build() .await?; debug!("Starting the p2p network"); let _network_handle = network_runtime.bootstrap(&mut event_stream).await?; debug!("P2P layer bootstrapped"); debug!("Creating the Synchronizer"); let (synchronizer_runtime, synchronizer_stream) = topos_tce_synchronizer::Synchronizer::builder() .with_config(config.synchronization.clone()) .with_shutdown(shutdown.0.child_token()) .with_store(validator_store.clone()) .with_network_client(network_client.clone()) .build()?; debug!("Synchronizer created"); debug!("Starting gRPC api"); let (broadcast_sender, broadcast_receiver) = broadcast::channel(BROADCAST_CHANNEL_SIZE); let (api_client, api_stream, ctx) = topos_tce_api::Runtime::builder() .with_peer_id(peer_id.to_string()) .with_broadcast_stream(broadcast_receiver.resubscribe()) .serve_grpc_addr(config.grpc_api_addr) .serve_graphql_addr(config.graphql_api_addr) .serve_metrics_addr(config.metrics_api_addr) .store(validator_store.clone()) .storage(storage_client.clone()) .build_and_launch() .await; debug!("gRPC api started"); // Healthiness phase - stop debug!("Starting the gatekeeper"); let (gatekeeper_client, gatekeeper_runtime) = topos_tce_gatekeeper::Gatekeeper::builder().await?; spawn(gatekeeper_runtime.into_future()); debug!("Gatekeeper started"); debug!("Starting reliable broadcast"); let (tce_cli, tce_stream) = ReliableBroadcastClient::new( ReliableBroadcastConfig { tce_params: config.tce_params.clone(), validator_id, validators: config.validators.clone(), message_signer, }, validator_store.clone(), broadcast_sender, ) .await; debug!("Reliable broadcast started"); spawn(synchronizer_runtime.into_future()); // setup transport-tce-storage-api connector let (app_context, _tce_stream) = AppContext::new( is_validator, storage_client, tce_cli, network_client, api_client, gatekeeper_client, validator_store, ctx, ); Ok(app_context.run( event_stream, tce_stream, api_stream, synchronizer_stream, BroadcastStream::new(broadcast_receiver).filter_map(|v| futures::future::ready(v.ok())), shutdown, )) } ================================================ FILE: crates/topos-tce/src/tests/api.rs ================================================ use std::sync::Arc; use rstest::rstest; use test_log::test; use tokio::sync::{mpsc, oneshot}; use topos_crypto::messages::MessageSigner; use topos_tce_storage::{store::WriteStore, types::PendingResult}; use topos_test_sdk::{ certificates::create_certificate_chain, constants::{SOURCE_SUBNET_ID_1, TARGET_SUBNET_ID_1}, }; use crate::AppContext; use super::setup_test; #[rstest] #[test(tokio::test)] async fn handle_new_certificate( #[future] setup_test: ( AppContext, mpsc::Receiver, Arc, ), ) { let (mut context, _, _) = setup_test.await; let mut certificates = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 1); let certificate = certificates.pop().unwrap().certificate; let (sender, receiver) = oneshot::channel(); context .on_api_event(topos_tce_api::RuntimeEvent::CertificateSubmitted { certificate: Box::new(certificate), sender, }) .await; let response = receiver.await; assert!(matches!(response, Ok(Ok(PendingResult::InPending(_))))); } #[rstest] #[test(tokio::test)] async fn handle_certificate_in_precedence_pool( #[future] setup_test: ( AppContext, mpsc::Receiver, Arc, ), ) { let (mut context, _, _) = setup_test.await; let mut certificates = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 2); let certificate = certificates.pop().unwrap().certificate; let (sender, receiver) = oneshot::channel(); context .on_api_event(topos_tce_api::RuntimeEvent::CertificateSubmitted { certificate: Box::new(certificate), sender, }) .await; let response = receiver.await; assert!(matches!(response, Ok(Ok(PendingResult::AwaitPrecedence)))); } #[rstest] #[test(tokio::test)] async fn handle_certificate_already_delivered( #[future] setup_test: ( AppContext, mpsc::Receiver, Arc, ), ) { let (mut context, _, _) = setup_test.await; let mut certificates = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 1); let certificate_delivered = certificates.pop().unwrap(); _ = context .validator_store .insert_certificate_delivered(&certificate_delivered) .await .unwrap(); let certificate = certificate_delivered.certificate; let (sender, receiver) = oneshot::channel(); context .on_api_event(topos_tce_api::RuntimeEvent::CertificateSubmitted { certificate: Box::new(certificate), sender, }) .await; let response = receiver.await; assert!(matches!(response, Ok(Ok(PendingResult::AlreadyDelivered)))); } ================================================ FILE: crates/topos-tce/src/tests/mod.rs ================================================ use libp2p::PeerId; use rstest::{fixture, rstest}; use std::{collections::HashSet, future::IntoFuture, sync::Arc}; use tokio_stream::Stream; use topos_tce_api::RuntimeEvent; use topos_tce_broadcast::event::ProtocolEvents; use topos_tce_gatekeeper::Gatekeeper; use tokio::sync::{broadcast, mpsc}; use topos_crypto::messages::MessageSigner; use topos_p2p::{utils::GrpcOverP2P, NetworkClient}; use topos_tce_broadcast::{ReliableBroadcastClient, ReliableBroadcastConfig}; use topos_tce_storage::{validator::ValidatorStore, StorageClient}; use topos_test_sdk::{ certificates::create_certificate_chain, constants::{CERTIFICATE_ID_1, SOURCE_SUBNET_ID_1, TARGET_SUBNET_ID_1}, storage::create_validator_store, tce::public_api::{create_public_api, PublicApiContext}, }; use crate::AppContext; mod api; mod network; #[rstest] #[tokio::test] async fn non_validator_publish_gossip( #[future] setup_test: ( AppContext, mpsc::Receiver, Arc, ), ) { let (mut context, mut p2p_receiver, _) = setup_test.await; let certificates = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 1); context .on_protocol_event(ProtocolEvents::Gossip { cert: certificates[0].certificate.clone(), }) .await; assert!(matches!( p2p_receiver.try_recv(), Ok(topos_p2p::Command::Gossip { topic, .. }) if topic == "topos_gossip" )); } #[rstest] #[tokio::test] async fn non_validator_do_not_publish_echo( #[future] setup_test: ( AppContext, mpsc::Receiver, Arc, ), ) { let (mut context, mut p2p_receiver, message_signer) = setup_test.await; context .on_protocol_event(ProtocolEvents::Echo { certificate_id: CERTIFICATE_ID_1, signature: message_signer.sign_message(&[]).ok().unwrap(), validator_id: message_signer.public_address.into(), }) .await; assert!(p2p_receiver.try_recv().is_err(),); } #[rstest] #[tokio::test] async fn non_validator_do_not_publish_ready( #[future] setup_test: ( AppContext, mpsc::Receiver, Arc, ), ) { let (mut context, mut p2p_receiver, message_signer) = setup_test.await; context .on_protocol_event(ProtocolEvents::Ready { certificate_id: CERTIFICATE_ID_1, signature: message_signer.sign_message(&[]).ok().unwrap(), validator_id: message_signer.public_address.into(), }) .await; assert!(p2p_receiver.try_recv().is_err(),); } #[fixture] pub async fn setup_test( #[future] create_validator_store: Arc, #[future] create_public_api: (PublicApiContext, impl Stream), ) -> ( AppContext, mpsc::Receiver, Arc, ) { let validator_store = create_validator_store.await; let is_validator = false; let message_signer = Arc::new(MessageSigner::new(&[5u8; 32]).unwrap()); let validator_id = message_signer.public_address.into(); let (broadcast_sender, _) = broadcast::channel(1); let (tce_cli, _) = ReliableBroadcastClient::new( ReliableBroadcastConfig { tce_params: topos_config::tce::broadcast::ReliableBroadcastParams::default(), validator_id, validators: HashSet::new(), message_signer: message_signer.clone(), }, validator_store.clone(), broadcast_sender, ) .await; let (shutdown_p2p, _) = mpsc::channel(1); let (p2p_sender, p2p_receiver) = mpsc::channel(1); let grpc_over_p2p = GrpcOverP2P::new(p2p_sender.clone()); let network_client = NetworkClient { retry_ttl: 10, local_peer_id: PeerId::random(), sender: p2p_sender, grpc_over_p2p, shutdown_channel: shutdown_p2p, }; let (api_context, _api_stream) = create_public_api.await; let api_client = api_context.client; let (gatekeeper_client, _) = Gatekeeper::builder().into_future().await.unwrap(); let (context, _) = AppContext::new( is_validator, StorageClient::new(validator_store.clone()), tce_cli, network_client, api_client, gatekeeper_client, validator_store, api_context.api_context.unwrap(), ); (context, p2p_receiver, message_signer) } ================================================ FILE: crates/topos-tce/src/tests/network.rs ================================================ use std::sync::Arc; use libp2p::PeerId; use prost::Message; use rstest::rstest; use test_log::test; use tokio::sync::mpsc; use topos_core::api::grpc::tce::v1::{double_echo_request, DoubleEchoRequest, Echo, Gossip, Ready}; use topos_crypto::{messages::MessageSigner, validator_id::ValidatorId}; use topos_tce_storage::store::WriteStore; use topos_test_sdk::{ certificates::create_certificate_chain, constants::{SOURCE_SUBNET_ID_1, TARGET_SUBNET_ID_1}, }; use crate::AppContext; use super::setup_test; #[rstest] #[test(tokio::test)] async fn handle_gossip( #[future] setup_test: ( AppContext, mpsc::Receiver, Arc, ), ) { let (mut context, _, _) = setup_test.await; let mut certificates = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 1); let certificate = certificates.pop().unwrap().certificate; let msg = DoubleEchoRequest { request: Some(double_echo_request::Request::Gossip(Gossip { certificate: Some(certificate.into()), })), }; context .on_net_event(topos_p2p::Event::Gossip { from: PeerId::random(), data: msg.encode_to_vec(), }) .await; } #[rstest] #[test(tokio::test)] async fn handle_echo( #[future] setup_test: ( AppContext, mpsc::Receiver, Arc, ), ) { let (mut context, _, message_signer) = setup_test.await; let mut certificates = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 1); let certificate = certificates.pop().unwrap().certificate; let validator_id: ValidatorId = message_signer.public_address.into(); let msg = DoubleEchoRequest { request: Some(double_echo_request::Request::Echo(Echo { certificate_id: Some(certificate.id.into()), signature: Some(message_signer.sign_message(&[]).ok().unwrap().into()), validator_id: Some(validator_id.into()), })), }; context .on_net_event(topos_p2p::Event::Gossip { from: PeerId::random(), data: msg.encode_to_vec(), }) .await; } #[rstest] #[test(tokio::test)] async fn handle_ready( #[future] setup_test: ( AppContext, mpsc::Receiver, Arc, ), ) { let (mut context, _, message_signer) = setup_test.await; let mut certificates = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 1); let certificate = certificates.pop().unwrap().certificate; let validator_id: ValidatorId = message_signer.public_address.into(); let msg = DoubleEchoRequest { request: Some(double_echo_request::Request::Ready(Ready { certificate_id: Some(certificate.id.into()), signature: Some(message_signer.sign_message(&[]).ok().unwrap().into()), validator_id: Some(validator_id.into()), })), }; context .on_net_event(topos_p2p::Event::Gossip { from: PeerId::random(), data: msg.encode_to_vec(), }) .await; } #[rstest] #[test(tokio::test)] async fn handle_already_delivered( #[future] setup_test: ( AppContext, mpsc::Receiver, Arc, ), ) { let (mut context, _, _) = setup_test.await; let mut certificates = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 1); let certificate_delivered = certificates.pop().unwrap(); let certificate = certificate_delivered.certificate.clone(); let msg = DoubleEchoRequest { request: Some(double_echo_request::Request::Gossip(Gossip { certificate: Some(certificate.into()), })), }; _ = context .validator_store .insert_certificate_delivered(&certificate_delivered) .await .unwrap(); context .on_net_event(topos_p2p::Event::Gossip { from: PeerId::random(), data: msg.encode_to_vec(), }) .await; } ================================================ FILE: crates/topos-tce-api/Cargo.toml ================================================ [package] name = "topos-tce-api" version = "0.1.0" edition = "2021" [lints] workspace = true [dependencies] topos-p2p = { path = "../topos-p2p" } topos-core = { workspace = true, features = ["uci", "api"] } topos-metrics = { path = "../topos-metrics" } topos-tce-storage = { path = "../topos-tce-storage" } async-graphql-axum.workspace = true async-graphql.workspace = true async-stream.workspace = true async-trait.workspace = true axum.workspace = true base64ct.workspace = true futures.workspace = true hex.workspace = true http.workspace = true hyper.workspace = true prometheus-client.workspace = true serde.workspace = true thiserror.workspace = true tokio-stream.workspace = true tokio.workspace = true tokio-util.workspace = true tonic.workspace = true tower-http.workspace = true tower.workspace = true tracing.workspace = true uuid.workspace = true tonic-health = "0.11.0" tonic-reflection = "0.11.0" pin-project = "1.0.12" async-recursion = "1.0" [dev-dependencies] bytes.workspace = true prost.workspace = true test-log.workspace = true reqwest.workspace = true serde_json.workspace = true tracing-subscriber = { workspace = true, features = ["env-filter", "fmt"] } env_logger.workspace = true http = "0.2.8" http-body = "0.4.5" rstest = { workspace = true, features = ["async-timeout"] } topos-test-sdk = { path = "../topos-test-sdk/" } ================================================ FILE: crates/topos-tce-api/src/graphql/builder.rs ================================================ use std::{net::SocketAddr, sync::Arc}; use async_graphql::{EmptyMutation, Schema}; use async_graphql_axum::GraphQLSubscription; use axum::{extract::Extension, routing::get, Router, Server}; use http::{header, Method}; use tokio::sync::mpsc; use tower_http::cors::{Any, CorsLayer}; use crate::{ graphql::{ query::{QueryRoot, ServiceSchema}, routes::{graphql_playground, health}, }, runtime::InternalRuntimeCommand, }; use topos_tce_storage::validator::ValidatorStore; use super::query::SubscriptionRoot; #[derive(Default)] pub struct ServerBuilder { store: Option>, serve_addr: Option, runtime: Option>, } impl ServerBuilder { /// Sets the runtime command channel /// /// Mostly used to manage Transient streams pub(crate) fn runtime(mut self, runtime: mpsc::Sender) -> Self { self.runtime = Some(runtime); self } pub(crate) fn store(mut self, store: Arc) -> Self { self.store = Some(store); self } pub(crate) fn serve_addr(mut self, addr: Option) -> Self { self.serve_addr = addr; self } pub async fn build( mut self, ) -> Server> { let cors = CorsLayer::new() // allow `GET` and `POST` when accessing the resource .allow_methods([Method::GET, Method::POST]) // allow 'application/json' requests .allow_headers([header::CONTENT_TYPE]) // allow requests from any origin .allow_origin(Any); let store = self .store .take() .expect("Cannot build GraphQL server without a FullNode store"); let fullnode_store = store.fullnode_store(); let runtime = self .runtime .take() .expect("Cannot build GraphQL server without the internal runtime channel"); let schema: ServiceSchema = Schema::build(QueryRoot, EmptyMutation, SubscriptionRoot) .data(store) .data(fullnode_store) .data(runtime) .finish(); let app = Router::new() .route( "/", get(graphql_playground) .post_service(async_graphql_axum::GraphQL::new(schema.clone())), ) .route_service("/ws", GraphQLSubscription::new(schema.clone())) .route("/health", get(health)) .layer(cors) .layer(Extension(schema)); let serve_addr = self.serve_addr.take().expect("Server address is not set"); Server::bind(&serve_addr).serve(app.into_make_service()) } } ================================================ FILE: crates/topos-tce-api/src/graphql/filter.rs ================================================ pub(crate) enum FilterIs { Source, Target, } ================================================ FILE: crates/topos-tce-api/src/graphql/mod.rs ================================================ pub mod builder; mod filter; mod query; mod routes; #[cfg(test)] mod tests; ================================================ FILE: crates/topos-tce-api/src/graphql/query.rs ================================================ use std::collections::HashMap; use std::sync::Arc; use async_graphql::{Context, EmptyMutation, Object, Schema, Subscription}; use async_trait::async_trait; use futures::{Stream, StreamExt}; use tokio::sync::{mpsc, oneshot}; use topos_core::api::graphql::certificate::UndeliveredCertificate; use topos_core::api::graphql::checkpoint::SourceStreamPosition; use topos_core::api::graphql::errors::GraphQLServerError; use topos_core::api::graphql::filter::SubnetFilter; use topos_core::api::graphql::{ certificate::{Certificate, CertificateId}, checkpoint::SourceCheckpointInput, query::CertificateQuery, }; use topos_core::types::stream::CertificateSourceStreamPosition; use topos_metrics::{STORAGE_PENDING_POOL_COUNT, STORAGE_PRECEDENCE_POOL_COUNT}; use topos_tce_storage::fullnode::FullNodeStore; use topos_tce_storage::store::ReadStore; use topos_tce_storage::validator::ValidatorStore; use tracing::debug; use crate::runtime::InternalRuntimeCommand; use crate::stream::TransientStream; use super::filter::FilterIs; pub struct QueryRoot; pub(crate) type ServiceSchema = Schema; #[async_trait] impl CertificateQuery for QueryRoot { async fn certificates_per_subnet( ctx: &Context<'_>, from_source_checkpoint: SourceCheckpointInput, first: usize, ) -> Result, GraphQLServerError> { let store = ctx.data::>().map_err(|_| { tracing::error!("Failed to get store from context"); GraphQLServerError::ParseDataConnector })?; let mut certificates = Vec::default(); for (index, _) in from_source_checkpoint.source_subnet_ids.iter().enumerate() { let subnet_id: topos_core::uci::SubnetId = (&from_source_checkpoint.positions[index] .source_subnet_id) .try_into() .map_err(|_| GraphQLServerError::ParseSubnetId)?; let position = from_source_checkpoint.positions[index].position.into(); let certificates_with_position = store .get_source_stream_certificates_from_position( CertificateSourceStreamPosition { subnet_id, position, }, first, ) .map_err(|_| GraphQLServerError::StorageError)?; debug!("Returned from storage: {certificates_with_position:?}"); certificates.extend( certificates_with_position .into_iter() .map(|(ref c, _)| c.into()), ); } Ok(certificates) } async fn certificate_by_id( ctx: &Context<'_>, certificate_id: CertificateId, ) -> Result { let store = ctx.data::>().map_err(|_| { tracing::error!("Failed to get storage client from context"); GraphQLServerError::ParseDataConnector })?; store .get_certificate( &certificate_id .try_into() .map_err(|_| GraphQLServerError::ParseCertificateId)?, ) .map_err(|_| GraphQLServerError::StorageError) .and_then(|c| { c.map(|ref c| c.into()) .ok_or(GraphQLServerError::StorageError) }) } } #[Object] impl QueryRoot { /// The endpoint for the GraphQL API, calling our trait implementation on the QueryRoot object async fn certificates( &self, ctx: &Context<'_>, from_source_checkpoint: SourceCheckpointInput, first: usize, ) -> Result, GraphQLServerError> { Self::certificates_per_subnet(ctx, from_source_checkpoint, first).await } async fn certificate( &self, ctx: &Context<'_>, certificate_id: CertificateId, ) -> Result { Self::certificate_by_id(ctx, certificate_id).await } /// This endpoint is used to get the current storage pool stats. /// It returns the number of certificates in the pending and precedence pools. /// The values are estimated as having a precise count is costly. async fn get_storage_pool_stats( &self, ctx: &Context<'_>, ) -> Result, GraphQLServerError> { let mut stats = HashMap::new(); stats.insert("metrics_pending_pool", STORAGE_PENDING_POOL_COUNT.get()); stats.insert( "metrics_precedence_pool", STORAGE_PRECEDENCE_POOL_COUNT.get(), ); let store = ctx.data::>().map_err(|_| { tracing::error!("Failed to get store from context"); GraphQLServerError::ParseDataConnector })?; stats.insert( "count_pending_certificates", store .iter_pending_pool() .map_err(|_| GraphQLServerError::StorageError)? .count() .try_into() .unwrap_or(i64::MAX), ); stats.insert( "count_precedence_certificates", store .iter_precedence_pool() .map_err(|_| GraphQLServerError::StorageError)? .count() .try_into() .unwrap_or(i64::MAX), ); stats.insert( "pending_pool_size", store .pending_pool_size() .map_err(|_| GraphQLServerError::StorageError)? .try_into() .unwrap_or(i64::MAX), ); stats.insert( "precedence_pool_size", store .precedence_pool_size() .map_err(|_| GraphQLServerError::StorageError)? .try_into() .unwrap_or(i64::MAX), ); Ok(stats) } /// This endpoint is used to get the current checkpoint of the source streams. /// The checkpoint is the position of the last certificate delivered for each source stream. async fn get_checkpoint( &self, ctx: &Context<'_>, ) -> Result, GraphQLServerError> { let store = ctx.data::>().map_err(|_| { tracing::error!("Failed to get store from context"); GraphQLServerError::ParseDataConnector })?; let checkpoint = store .get_checkpoint() .map_err(|_| GraphQLServerError::StorageError)?; Ok(checkpoint .iter() .map(|(subnet_id, head)| SourceStreamPosition { source_subnet_id: subnet_id.into(), position: *head.position, certificate_id: head.certificate_id.into(), }) .collect()) } /// This endpoint is used to get the current pending pool. /// It returns [`CertificateId`] and the [`PendingCertificateId`] async fn get_pending_pool( &self, ctx: &Context<'_>, ) -> Result, GraphQLServerError> { let store = ctx.data::>().map_err(|_| { tracing::error!("Failed to get store from context"); GraphQLServerError::ParseDataConnector })?; Ok(store .iter_pending_pool() .map_err(|_| GraphQLServerError::StorageError)? .map(|(id, certificate)| (id, certificate.id.into())) .collect()) } /// This endpoint is used to check if a certificate has any child certificate in the precedence pool. async fn check_precedence( &self, ctx: &Context<'_>, certificate_id: CertificateId, ) -> Result, GraphQLServerError> { let store = ctx.data::>().map_err(|_| { tracing::error!("Failed to get store from context"); GraphQLServerError::ParseDataConnector })?; store .check_precedence( &certificate_id .try_into() .map_err(|_| GraphQLServerError::ParseCertificateId)?, ) .map_err(|_| GraphQLServerError::StorageError) .map(|certificate| certificate.as_ref().map(Into::into)) } } pub struct SubscriptionRoot; impl SubscriptionRoot { /// Try to create a new [`Stream`] of delivered [`Certificate`]s to be used in a GraphQL subscription. pub(crate) async fn new_transient_stream( &self, register: &mpsc::Sender, filter: Option, ) -> Result, GraphQLServerError> { let (sender, receiver) = oneshot::channel(); _ = register .send(InternalRuntimeCommand::NewTransientStream { sender }) .await; let stream: TransientStream = receiver .await .map_err(|_| { GraphQLServerError::InternalError( "Communication error trying to create a new transient stream", ) })? .map_err(|e| GraphQLServerError::TransientStream(e.to_string()))?; let filter: Option<(FilterIs, topos_core::uci::SubnetId)> = filter .map(|value| match value { SubnetFilter::Target(ref id) => id.try_into().map(|v| (FilterIs::Target, v)), SubnetFilter::Source(ref id) => id.try_into().map(|v| (FilterIs::Source, v)), }) .map_or(Ok(None), |v| v.map(Some)) .map_err(|_| GraphQLServerError::ParseSubnetId)?; Ok(stream .filter(move |c| { futures::future::ready( filter .as_ref() .map(|v| match v { (FilterIs::Source, id) => id == &c.certificate.source_subnet_id, (FilterIs::Target, id) => c.certificate.target_subnets.contains(id), }) .unwrap_or(true), ) }) .map(|c| c.as_ref().into())) } } #[Subscription] impl SubscriptionRoot { /// This endpoint is used to received delivered certificates. /// It uses a transient stream, which is a stream that is only valid for the current connection. /// /// Closing the connection will close the stream. /// Starting a new connection will start a new stream and the client will not receive /// any certificates that were delivered before the connection was started. async fn watch_delivered_certificates( &self, ctx: &Context<'_>, filter: Option, ) -> Result, GraphQLServerError> { let register = ctx .data::>() .map_err(|_| { tracing::error!("Failed to get the transient register client from context"); GraphQLServerError::ParseDataConnector })?; self.new_transient_stream(register, filter).await } } ================================================ FILE: crates/topos-tce-api/src/graphql/routes.rs ================================================ use async_graphql::http::GraphiQLSource; use axum::{ http::StatusCode, response::{Html, IntoResponse}, Json, }; use serde::Serialize; #[derive(Serialize)] struct Health { healthy: bool, } pub(crate) async fn health() -> impl IntoResponse { let health = Health { healthy: true }; (StatusCode::OK, Json(health)) } /// Build a GraphQL playground pub async fn graphql_playground() -> impl IntoResponse { Html( GraphiQLSource::build() .endpoint("/") .subscription_endpoint("/ws") .finish(), ) } ================================================ FILE: crates/topos-tce-api/src/graphql/tests.rs ================================================ use std::{sync::Arc, time::Duration}; use crate::{ graphql::query::{QueryRoot, SubscriptionRoot}, runtime::InternalRuntimeCommand, stream::TransientStream, }; use async_graphql::{http, value, EmptyMutation, Schema}; use futures::{SinkExt, StreamExt}; use rstest::rstest; use test_log::test; use tokio::sync::{mpsc, oneshot}; use topos_core::{ types::stream::Position, uci::{SubnetId, INITIAL_CERTIFICATE_ID}, }; use topos_test_sdk::{ certificates::{create_certificate, create_certificate_at_position}, constants::{SOURCE_SUBNET_ID_2, TARGET_SUBNET_ID_3}, }; use uuid::Uuid; #[rstest] #[test(tokio::test)] #[timeout(Duration::from_secs(2))] async fn requesting_transient_stream_from_graphql() { let (sender, mut receiver) = mpsc::channel(1); tokio::spawn(async move { let mut v = Vec::new(); while let Some(query) = receiver.recv().await { if let InternalRuntimeCommand::NewTransientStream { sender } = query { let (notifier, notifier_receiver) = oneshot::channel(); v.push(notifier_receiver); let (_s, inner) = mpsc::channel(10); _ = sender.send(Ok(TransientStream { stream_id: Uuid::new_v4(), notifier: Some(notifier), inner, })); } } }); let root = SubscriptionRoot {}; let result = root.new_transient_stream(&sender, None).await; assert!(result.is_ok()); } #[rstest] #[timeout(Duration::from_secs(4))] #[test(tokio::test)] async fn open_watch_certificate_delivered() { let (mut tx, rx) = futures::channel::mpsc::unbounded(); let (sender, mut receiver): (mpsc::Sender, _) = mpsc::channel(1); tokio::spawn(async move { let mut v = Vec::new(); while let Some(query) = receiver.recv().await { if let InternalRuntimeCommand::NewTransientStream { sender } = query { let (notifier, notifier_receiver) = oneshot::channel(); v.push(notifier_receiver); let (notify, inner) = mpsc::channel(10); _ = sender.send(Ok(TransientStream { stream_id: Uuid::new_v4(), notifier: Some(notifier), inner, })); tokio::time::sleep(Duration::from_millis(10)).await; let certificate = create_certificate_at_position( Position::ZERO, create_certificate( SOURCE_SUBNET_ID_2, &[TARGET_SUBNET_ID_3], Some(INITIAL_CERTIFICATE_ID), ), ); _ = notify.send(Arc::new(certificate)).await; } } }); let subscription = SubscriptionRoot {}; let schema = Schema::build(QueryRoot, EmptyMutation, subscription) .data(sender) .finish(); let mut stream = http::WebSocket::new(schema, rx, http::WebSocketProtocols::GraphQLWS); tx.send( serde_json::to_string(&value!({ "type": "connection_init", })) .unwrap(), ) .await .unwrap(); assert_eq!( serde_json::from_str::(&stream.next().await.unwrap().unwrap_text()) .unwrap(), serde_json::json!({ "type": "connection_ack", }), ); tx.send( serde_json::to_string(&value!({ "type": "start", "id": "1", "payload": { "query": "subscription onCertificates { watchDeliveredCertificates { id prevId proof signature sourceSubnetId stateRoot targetSubnets txRootHash receiptsRootHash verifier positions { source { sourceSubnetId position certificateId } } } }" }, })) .unwrap(), ) .await .unwrap(); let certificate = &serde_json::from_str::(&stream.next().await.unwrap().unwrap_text()) .unwrap(); let certificate = serde_json::from_value::( certificate["payload"]["data"]["watchDeliveredCertificates"].clone(), ) .unwrap(); let subnet_id: SubnetId = (&certificate.source_subnet_id).try_into().unwrap(); assert_eq!(subnet_id, SOURCE_SUBNET_ID_2,); assert_eq!( serde_json::from_str::(&stream.next().await.unwrap().unwrap_text()) .unwrap(), serde_json::json!({ "type": "complete", "id": "1", }), ); } ================================================ FILE: crates/topos-tce-api/src/grpc/builder.rs ================================================ use std::{net::SocketAddr, sync::Arc}; use futures::{future::BoxFuture, FutureExt}; use tokio::sync::{mpsc::Sender, RwLock}; use tonic_health::server::HealthReporter; use topos_core::api::grpc::tce::v1::{ api_service_server::ApiServiceServer, console_service_server::ConsoleServiceServer, StatusResponse, }; use topos_tce_storage::validator::ValidatorStore; use crate::runtime::InternalRuntimeCommand; use super::{console::TceConsoleService, TceGrpcService}; #[derive(Default)] pub struct ServerBuilder { store: Option>, local_peer_id: String, command_sender: Option>, serve_addr: Option, } impl ServerBuilder { pub(crate) fn with_store(mut self, store: Arc) -> Self { self.store = Some(store); self } pub(crate) fn with_peer_id(mut self, local_peer_id: String) -> Self { self.local_peer_id = local_peer_id; self } pub(crate) fn command_sender(mut self, sender: Sender) -> Self { self.command_sender = Some(sender); self } pub(crate) fn serve_addr(mut self, addr: Option) -> Self { self.serve_addr = addr; self } pub async fn build( mut self, ) -> ( HealthReporter, Arc>, BoxFuture<'static, Result<(), tonic::transport::Error>>, ) { let command_sender = self .command_sender .take() .expect("Cannot build gRPC without an InternalRuntimeCommand sender"); // We don't do active sampling at the start of the node, // but give it a fixed set of validators from the genesis file. // So as soon as the node starts it is ready to send and receive ECHO messages. let status = Arc::new(RwLock::new(StatusResponse { has_active_sample: true, })); let console = ConsoleServiceServer::new(TceConsoleService { command_sender: command_sender.clone(), status: status.clone(), }); let store = self .store .take() .expect("Cannot build GraphQL server without a FullNode store"); let service = ApiServiceServer::new(TceGrpcService { store, command_sender, }); let (mut health_reporter, health_service) = tonic_health::server::health_reporter(); health_reporter .set_serving::>() .await; let reflexion = tonic_reflection::server::Builder::configure() .register_encoded_file_descriptor_set(topos_core::api::grpc::FILE_DESCRIPTOR_SET) .build() .expect("Cannot build gRPC because of FILE_DESCRIPTOR_SET error"); let serve_addr = self .serve_addr .take() .expect("Cannot build gRPC without a valid serve_addr"); let grpc = tonic::transport::Server::builder() .add_service(health_service) .add_service(service) .add_service(console) .add_service(reflexion) .serve(serve_addr) .boxed(); (health_reporter, status, grpc) } } ================================================ FILE: crates/topos-tce-api/src/grpc/console.rs ================================================ use std::sync::Arc; use tokio::sync::mpsc::Sender; use crate::runtime::InternalRuntimeCommand; use tokio::sync::RwLock; use tonic::{Request, Response, Status}; use topos_core::api::grpc::tce::v1::{ console_service_server::ConsoleService, StatusRequest, StatusResponse, }; pub(crate) struct TceConsoleService { // We want to allow this unused command_sender, because we need it in the future again. // We keep it so the architecture is already obvious where to put a command_sender // One example will be changing validators during the uptime of the network #[allow(dead_code)] pub(crate) command_sender: Sender, pub(crate) status: Arc>, } #[tonic::async_trait] impl ConsoleService for TceConsoleService { async fn status( &self, _request: Request, ) -> Result, Status> { let status = self.status.read().await; Ok(Response::new(status.clone())) } } ================================================ FILE: crates/topos-tce-api/src/grpc/messaging.rs ================================================ use tonic::Status; use topos_core::api::grpc::checkpoints::{TargetCheckpoint, TargetStreamPosition}; use topos_core::api::grpc::tce::v1::watch_certificates_request::Command; use topos_core::api::grpc::tce::v1::watch_certificates_request::OpenStream as GrpcOpenStream; use topos_core::api::grpc::tce::v1::watch_certificates_response::CertificatePushed as GrpcCertificatePushed; use topos_core::api::grpc::tce::v1::watch_certificates_response::Event; use topos_core::api::grpc::tce::v1::watch_certificates_response::StreamOpened as GrpcStreamOpened; use topos_core::types::CertificateDelivered; use topos_core::uci::SubnetId; pub enum InboundMessage { OpenStream(OpenStream), } pub struct OpenStream { pub(crate) target_checkpoint: TargetCheckpoint, } #[derive(Debug)] pub struct CertificatePushed { pub(crate) certificate: CertificateDelivered, pub(crate) positions: Vec, } #[derive(Debug)] pub enum OutboundMessage { StreamOpened(StreamOpened), CertificatePushed(Box), } #[derive(Debug)] pub struct StreamOpened { pub(crate) subnet_ids: Vec, } impl TryFrom for InboundMessage { type Error = Status; fn try_from(command: Command) -> Result { match command { Command::OpenStream(value) => Ok(OpenStream::try_from(value)?.into()), } } } impl TryFrom for OpenStream { type Error = Status; fn try_from(value: GrpcOpenStream) -> Result { Ok(Self { target_checkpoint: value.target_checkpoint.map(TryInto::try_into).map_or( Err(Status::invalid_argument("missing target_checkpoint")), |value| value.map_err(|_| Status::invalid_argument("invalid checkpoint")), )?, }) } } impl From for InboundMessage { fn from(value: OpenStream) -> Self { Self::OpenStream(value) } } impl From for Event { fn from(value: OutboundMessage) -> Self { match value { OutboundMessage::StreamOpened(StreamOpened { subnet_ids }) => { Self::StreamOpened(GrpcStreamOpened { subnet_ids: subnet_ids.into_iter().map(Into::into).collect(), }) } OutboundMessage::CertificatePushed(certificate_pushed) => { Self::CertificatePushed(GrpcCertificatePushed { certificate: Some(certificate_pushed.certificate.certificate.into()), positions: certificate_pushed .positions .into_iter() .map(Into::into) .collect(), }) } } } } ================================================ FILE: crates/topos-tce-api/src/grpc/mod.rs ================================================ use base64ct::{Base64, Encoding}; use futures::{FutureExt, Stream as FutureStream, StreamExt}; use std::pin::Pin; use std::sync::Arc; use tokio::sync::{mpsc, oneshot}; use tokio_stream::wrappers::ReceiverStream; use tonic::{Request, Response, Status, Streaming}; use topos_core::api::grpc::tce::v1::LastPendingCertificate; use topos_core::api::grpc::tce::v1::{ api_service_server::ApiService, GetLastPendingCertificatesRequest, GetLastPendingCertificatesResponse, GetSourceHeadRequest, GetSourceHeadResponse, SubmitCertificateRequest, SubmitCertificateResponse, WatchCertificatesRequest, WatchCertificatesResponse, }; use topos_core::uci::SubnetId; use topos_metrics::API_GRPC_CERTIFICATE_RECEIVED_TOTAL; use topos_tce_storage::validator::ValidatorStore; use tracing::{error, info, Span}; use uuid::Uuid; use crate::{ runtime::InternalRuntimeCommand, stream::{Stream, StreamError, StreamErrorKind}, }; use self::messaging::{InboundMessage, OutboundMessage}; pub(crate) mod console; #[cfg(test)] mod tests; const DEFAULT_CHANNEL_STREAM_CAPACITY: usize = 100; pub(crate) mod builder; pub(crate) mod messaging; pub(crate) struct TceGrpcService { store: Arc, command_sender: mpsc::Sender, } impl TceGrpcService { pub fn create_stream( rx: mpsc::Receiver, OutboundMessage), Status>>, ) -> Pin> + Send + 'static>> { Box::pin(ReceiverStream::new(rx).map(|response| match response { Ok((request_id, response)) => Ok(WatchCertificatesResponse { event: Some(response.into()), request_id: request_id.map(Into::into), }), Err(error) => Err(error), })) } pub fn parse_stream( message: Result, stream_id: Uuid, ) -> Result<(Option, InboundMessage), StreamError> { match message { Ok(WatchCertificatesRequest { request_id, command, }) => match command { Some(command) => match command.try_into() { Ok(inner_command) => Ok((request_id.map(Into::into), inner_command)), Err(_) => Err(StreamError::new(stream_id, StreamErrorKind::InvalidCommand)), }, None => Err(StreamError::new(stream_id, StreamErrorKind::InvalidCommand)), }, Err(error) => Err(StreamError::new( stream_id, StreamErrorKind::Transport(error.code()), )), } } } #[tonic::async_trait] impl ApiService for TceGrpcService { async fn submit_certificate( &self, request: Request, ) -> Result, Status> { async { let data = request.into_inner(); if let Some(certificate) = data.certificate { if let Some(ref id) = certificate.id { Span::current().record("certificate_id", id.to_string()); let (sender, receiver) = oneshot::channel(); // FIXME: remove certificate cloning (may be a lot of data) when we // resolve the issue with invalid certificate error let certificate = match certificate.clone().try_into() { Ok(c) => c, Err(e) => { error!( "Invalid certificate error: {e:?}, certificate: {certificate:?}" ); return Err(Status::invalid_argument(format!( "Can't submit invalid certificate: {e}" ))); } }; if self .command_sender .send(InternalRuntimeCommand::CertificateSubmitted { certificate: Box::new(certificate), sender, }) .await .is_err() { return Err(Status::internal("Can't submit certificate: sender dropped")); } else { API_GRPC_CERTIFICATE_RECEIVED_TOTAL.inc(); } receiver .map(|value| match value { Ok(Ok(_)) => Ok(Response::new(SubmitCertificateResponse {})), Ok(Err(_)) => Err(Status::internal("Can't submit certificate")), Err(_) => Err(Status::internal("Can't submit certificate")), }) .await } else { error!("No certificate id provided"); Err(Status::invalid_argument("Certificate is malformed")) } } else { Err(Status::invalid_argument("Certificate is malformed")) } } .await } /// This RPC allows a client to get last delivered source certificate /// for particular subnet async fn get_source_head( &self, request: Request, ) -> Result, Status> { let data = request.into_inner(); if let Some(subnet_id) = data.subnet_id { let (sender, receiver) = oneshot::channel(); let subnet_id = match subnet_id.try_into() { Ok(id) => id, Err(e) => { error!("Invalid subnet id: {e:?}"); return Err(Status::invalid_argument("Invalid subnet id")); } }; if self .command_sender .send(InternalRuntimeCommand::GetSourceHead { subnet_id, sender }) .await .is_err() { return Err(Status::internal( "Can't get delivered certificate position by source: sender dropped", )); } receiver .map(|value| { match value { Ok(Ok(response)) => Ok(match response { Some((position, certificate)) => Response::new(GetSourceHeadResponse { certificate: Some(certificate.clone().into()), position: Some( topos_core::api::grpc::shared::v1::positions::SourceStreamPosition { source_subnet_id: Some(certificate.source_subnet_id.into()), certificate_id: Some((*certificate.id.as_array()).into()), position, }, ), }), None => Response::new(GetSourceHeadResponse { certificate: None, position: None }) }), Ok(Err(crate::RuntimeError::UnknownSubnet(subnet_id))) => // Tce does not have Position::Zero certificate associated { Err(Status::internal(format!( "Unknown subnet, no genesis certificate associated with subnet id \ {}", &subnet_id ))) }, Ok(Err(e)) => Err(Status::internal(format!( "Can't get source head certificate position: {e}" ))), Err(e) => Err(Status::internal(format!( "Can't get source head certificate position: {e}" ))), } }) .await } else { Err(Status::invalid_argument("Certificate is malformed")) } } async fn get_last_pending_certificates( &self, request: Request, ) -> Result, Status> { let data = request.into_inner(); let subnet_ids = data.subnet_ids; let subnet_ids: Vec = subnet_ids .into_iter() .map(TryInto::try_into) .map(|v| v.map_err(|e| Status::internal(format!("Invalid subnet id: {e}")))) .collect::>()?; let last_pending_certificate = self .store .get_pending_certificates_for_subnets(&subnet_ids) .map_err(|e| Status::internal(format!("Can't get last pending certificates: {e}")))? .into_iter() .map(|(subnet_id, (index, maybe_certificate))| { (Base64::encode_string(subnet_id.as_array()), { maybe_certificate .map(|certificate| LastPendingCertificate { index, value: Some(certificate.into()), }) .unwrap_or(LastPendingCertificate { value: None, index: 0, }) }) }) .collect(); Ok(Response::new(GetLastPendingCertificatesResponse { last_pending_certificate, })) } ///Server streaming response type for the WatchCertificates method. type WatchCertificatesStream = Pin< Box> + Send + 'static>, >; /// This RPC allows a client to open a bidirectional stream with a TCE async fn watch_certificates( &self, request: Request>, ) -> Result, Status> { match request.remote_addr() { Some(addr) => info!(client.addr = %addr, "Starting a new stream"), None => info!(client.addr = %"", "Starting a new stream"), } // TODO: Use Cow let stream_id = Uuid::new_v4(); let inbound_stream = request .into_inner() .map(move |message| Self::parse_stream(message, stream_id)) .boxed(); let (command_sender, command_receiver) = mpsc::channel(2048); let (outbound_stream, rx) = mpsc::channel::, OutboundMessage), Status>>( DEFAULT_CHANNEL_STREAM_CAPACITY, ); let stream = Stream::new( stream_id, inbound_stream, outbound_stream, command_receiver, self.command_sender.clone(), ); if self .command_sender .send(InternalRuntimeCommand::NewStream { stream, command_sender, }) .await .is_err() { return Err(Status::internal("Can't submit certificate: sender dropped")); } Ok(Response::new( Self::create_stream(rx) as Self::WatchCertificatesStream )) } } ================================================ FILE: crates/topos-tce-api/src/grpc/tests.rs ================================================ use test_log::test; #[test(tokio::test)] #[ignore = "not yet implemented"] async fn respond_to_valid_certificate_submission() {} #[test(tokio::test)] #[ignore = "not yet implemented"] async fn respond_to_invalid_certificate_submission() {} ================================================ FILE: crates/topos-tce-api/src/lib.rs ================================================ mod graphql; mod grpc; mod metrics; mod runtime; mod stream; #[cfg(test)] mod tests; pub(crate) mod constants { /// Constant size of every channel in the crate pub(crate) const CHANNEL_SIZE: usize = 2048; /// Constant size of every transient stream channel in the crate pub(crate) const TRANSIENT_STREAM_CHANNEL_SIZE: usize = 1024; } pub use runtime::{ error::RuntimeError, Runtime, RuntimeClient, RuntimeCommand, RuntimeContext, RuntimeEvent, }; ================================================ FILE: crates/topos-tce-api/src/metrics/builder.rs ================================================ use std::net::SocketAddr; use topos_metrics::gather_metrics; use axum::{routing::get, Router, Server}; use tracing::info; #[derive(Default)] pub struct ServerBuilder { serve_addr: Option, } impl ServerBuilder { pub fn serve_addr(mut self, addr: Option) -> Self { self.serve_addr = addr; self } pub async fn build( mut self, ) -> Server> { let app = Router::new().route( "/metrics", get(|| async { let topos_metrics = gather_metrics(); let mut libp2p_metrics = String::new(); let reg = topos_p2p::constants::METRIC_REGISTRY.lock().await; _ = prometheus_client::encoding::text::encode(&mut libp2p_metrics, ®); format!("{topos_metrics}{libp2p_metrics}") }), ); let serve_addr = self .serve_addr .take() .expect("Metrics server address is not set"); info!("Starting metrics server on {}", serve_addr); Server::bind(&serve_addr).serve(app.into_make_service()) } } ================================================ FILE: crates/topos-tce-api/src/metrics/mod.rs ================================================ pub(crate) mod builder; ================================================ FILE: crates/topos-tce-api/src/runtime/builder.rs ================================================ use futures::Stream; use std::{collections::HashMap, net::SocketAddr, sync::Arc}; use tokio::{ spawn, sync::{broadcast, mpsc, oneshot, RwLock}, }; use tokio_stream::wrappers::ReceiverStream; use topos_core::api::grpc::tce::v1::StatusResponse; use topos_tce_storage::{ types::CertificateDeliveredWithPositions, validator::ValidatorStore, StorageClient, }; use tracing::Instrument; use crate::{ constants::CHANNEL_SIZE, graphql::builder::ServerBuilder as GraphQLBuilder, grpc::builder::ServerBuilder, metrics::builder::ServerBuilder as MetricsBuilder, Runtime, RuntimeClient, RuntimeEvent, }; #[derive(Default)] pub struct RuntimeBuilder { storage: Option, store: Option>, broadcast_stream: Option>, local_peer_id: String, grpc_socket_addr: Option, graphql_socket_addr: Option, metrics_socket_addr: Option, status: Option>, } impl RuntimeBuilder { pub fn with_broadcast_stream( mut self, stream: broadcast::Receiver, ) -> Self { self.broadcast_stream = Some(stream); self } pub fn with_peer_id(mut self, local_peer_id: String) -> Self { self.local_peer_id = local_peer_id; self } pub fn serve_grpc_addr(mut self, addr: SocketAddr) -> Self { self.grpc_socket_addr = Some(addr); self } pub fn serve_graphql_addr(mut self, addr: SocketAddr) -> Self { self.graphql_socket_addr = Some(addr); self } pub fn serve_metrics_addr(mut self, addr: SocketAddr) -> Self { self.metrics_socket_addr = Some(addr); self } pub fn tce_status(mut self, status: RwLock) -> Self { self.status = Some(status); self } pub fn store(mut self, store: Arc) -> Self { self.store = Some(store); self } pub fn storage(mut self, storage: StorageClient) -> Self { self.storage = Some(storage); self } pub async fn build_and_launch( mut self, ) -> ( RuntimeClient, impl Stream, RuntimeContext, ) { let (internal_runtime_command_sender, internal_runtime_command_receiver) = mpsc::channel(CHANNEL_SIZE); let (api_event_sender, api_event_receiver) = mpsc::channel(CHANNEL_SIZE); let (health_reporter, tce_status, grpc) = ServerBuilder::default() .with_store( self.store .clone() .take() .expect("Unable to build gRPC Server, Store is missing"), ) .with_peer_id(self.local_peer_id) .command_sender(internal_runtime_command_sender.clone()) .serve_addr(self.grpc_socket_addr) .build() .in_current_span() .await; let (command_sender, runtime_command_receiver) = mpsc::channel(CHANNEL_SIZE); let (shutdown_channel, shutdown_receiver) = mpsc::channel::>(1); let grpc_handler = spawn(grpc.in_current_span()); let graphql_handler = if let Some(graphql_addr) = self.graphql_socket_addr { tracing::info!("Serving GraphQL on {}", graphql_addr); let graphql = GraphQLBuilder::default() .store( self.store .take() .expect("Unable to build GraphQL Server, Store is missing"), ) .runtime(internal_runtime_command_sender.clone()) .serve_addr(Some(graphql_addr)) .build() .in_current_span(); spawn(graphql.await) } else { spawn(async move { tracing::info!("Not serving GraphQL"); Ok(()) }) }; let metrics_handler = if let Some(metrics_addr) = self.metrics_socket_addr { tracing::info!("Serving metrics on {}", metrics_addr); let metrics_server = MetricsBuilder::default() .serve_addr(Some(metrics_addr)) .build() .in_current_span(); spawn(metrics_server.await) } else { spawn(async move { tracing::info!("Not serving metrics"); Ok(()) }) }; let runtime = Runtime { sync_tasks: Default::default(), running_sync_tasks: Default::default(), broadcast_stream: self .broadcast_stream .expect("Unable to build Runtime, Broadcast Stream is missing"), storage: self .storage .take() .expect("Unable to build Runtime, Storage is missing"), active_streams: HashMap::new(), pending_streams: HashMap::new(), subnet_subscriptions: HashMap::new(), internal_runtime_command_receiver, runtime_command_receiver, health_reporter, api_event_sender, shutdown: shutdown_receiver, streams: Default::default(), transient_streams: HashMap::new(), }; let runtime_handler = spawn(runtime.launch()); ( RuntimeClient { command_sender, tce_status, shutdown_channel, }, ReceiverStream::new(api_event_receiver), RuntimeContext { grpc_handler, graphql_handler, metrics_handler, runtime_handler, }, ) } pub fn set_grpc_socket_addr(mut self, socket: Option) -> Self { self.grpc_socket_addr = socket; self } } #[derive(Debug)] pub struct RuntimeContext { grpc_handler: tokio::task::JoinHandle>, graphql_handler: tokio::task::JoinHandle>, metrics_handler: tokio::task::JoinHandle>, runtime_handler: tokio::task::JoinHandle<()>, } impl Drop for RuntimeContext { fn drop(&mut self) { tracing::warn!("Dropping RuntimeContext"); self.grpc_handler.abort(); self.graphql_handler.abort(); self.metrics_handler.abort(); self.runtime_handler.abort(); } } ================================================ FILE: crates/topos-tce-api/src/runtime/client.rs ================================================ use std::collections::HashMap; use std::sync::Arc; use super::RuntimeCommand; use futures::Future; use tokio::sync::{mpsc, oneshot, RwLock}; use topos_core::api::grpc::checkpoints::TargetStreamPosition; use topos_core::api::grpc::tce::v1::StatusResponse; use topos_core::types::CertificateDelivered; use topos_core::uci::SubnetId; use tracing::error; #[derive(Clone, Debug)] pub struct RuntimeClient { pub(crate) command_sender: mpsc::Sender, pub(crate) tce_status: Arc>, pub(crate) shutdown_channel: mpsc::Sender>, } impl RuntimeClient { pub fn dispatch_certificate( &self, certificate: CertificateDelivered, positions: HashMap, ) -> impl Future + 'static + Send { let sender = self.command_sender.clone(); async move { if let Err(error) = sender .send(RuntimeCommand::DispatchCertificate { certificate, positions, }) .await { error!("Can't dispatch certificate: {error:?}"); } } } pub async fn has_active_sample(&self) -> bool { self.tce_status.read().await.has_active_sample } pub async fn set_active_sample(&self, value: bool) { let mut status = self.tce_status.write().await; status.has_active_sample = value; } pub async fn shutdown(&self) -> Result<(), Box> { let (sender, receiver) = oneshot::channel(); self.shutdown_channel.send(sender).await?; Ok(receiver.await?) } } ================================================ FILE: crates/topos-tce-api/src/runtime/commands.rs ================================================ use std::collections::HashMap; use tokio::sync::{mpsc::Sender, oneshot}; use topos_core::api::grpc::checkpoints::TargetStreamPosition; use topos_core::types::CertificateDelivered; use topos_core::uci::{Certificate, SubnetId}; use topos_tce_storage::types::PendingResult; use uuid::Uuid; use crate::stream::{Stream, StreamCommand, TransientStream}; use super::error::RuntimeError; #[derive(Debug)] pub enum RuntimeCommand { /// Dispatch certificate to gRPC API Runtime in order to push it to listening open streams DispatchCertificate { certificate: CertificateDelivered, positions: HashMap, }, } #[derive(Debug)] pub(crate) enum InternalRuntimeCommand { /// When a new stream is open, this command is dispatch to manage the stream NewStream { stream: Stream, command_sender: Sender, }, /// Register a stream as subscriber for the given subnet_streams. /// Commands or certificates pointing to one of the subnet will be forward using the given Sender Register { stream_id: Uuid, #[allow(dead_code)] target_subnet_stream_positions: HashMap>, sender: oneshot::Sender>, }, /// Notify that a Stream has successfully handshake with the server Handshaked { stream_id: Uuid }, /// Dispatch when a certificate has been submitted to the TCE. /// This command will be used to trigger the DoubleEcho process. CertificateSubmitted { certificate: Box, sender: oneshot::Sender>, }, /// Get source head certificate by source subnet id GetSourceHead { subnet_id: SubnetId, sender: oneshot::Sender, RuntimeError>>, }, /// Ask for the creation of a new TransientStream NewTransientStream { sender: oneshot::Sender>, }, } ================================================ FILE: crates/topos-tce-api/src/runtime/error.rs ================================================ use thiserror::Error; use topos_core::uci::SubnetId; use topos_tce_storage::errors::StorageError; use uuid::Uuid; #[derive(Error, Debug)] pub enum RuntimeError { #[error("The pending stream {0} was not found")] PendingStreamNotFound(Uuid), #[error("Unable to get source head certificate for subnet id {0}: {1}")] UnableToGetSourceHead(SubnetId, String), #[error("Unknown subnet with subnet id {0}")] UnknownSubnet(SubnetId), #[error("Unexpected store error: {0}")] Store(#[from] StorageError), #[error("Communication error: {0}")] CommunicationError(String), } ================================================ FILE: crates/topos-tce-api/src/runtime/events.rs ================================================ use std::collections::HashMap; use std::collections::HashSet; use tokio::sync::oneshot; use topos_core::uci::{Certificate, SubnetId}; use topos_tce_storage::types::PendingResult; use super::error::RuntimeError; pub enum RuntimeEvent { CertificateSubmitted { certificate: Box, sender: oneshot::Sender>, }, GetSourceHead { subnet_id: SubnetId, sender: oneshot::Sender, RuntimeError>>, }, GetLastPendingCertificates { subnet_ids: HashSet, #[allow(clippy::type_complexity)] sender: oneshot::Sender>, RuntimeError>>, }, } ================================================ FILE: crates/topos-tce-api/src/runtime/mod.rs ================================================ use futures::{stream::FuturesUnordered, FutureExt, StreamExt, TryFutureExt}; use std::future::{Future, IntoFuture}; use std::{ collections::{HashMap, HashSet}, pin::Pin, sync::Arc, time::Duration, }; use tokio::{ sync::mpsc::{self, Receiver, Sender}, sync::{broadcast, oneshot}, }; use tokio_util::sync::CancellationToken; use tonic_health::server::HealthReporter; use topos_core::api::grpc::checkpoints::TargetStreamPosition; use topos_core::api::grpc::tce::v1::api_service_server::ApiServiceServer; use topos_core::types::CertificateDelivered; use topos_core::uci::SubnetId; use topos_tce_storage::{types::CertificateDeliveredWithPositions, StorageClient}; use tracing::{debug, error, info}; use uuid::Uuid; use crate::{ constants::TRANSIENT_STREAM_CHANNEL_SIZE, grpc::TceGrpcService, stream::{StreamCommand, StreamError, StreamErrorKind, TransientStream}, }; pub mod builder; pub use builder::RuntimeContext; mod client; mod commands; pub mod error; mod events; mod sync_task; #[cfg(test)] mod tests; pub use client::RuntimeClient; use self::builder::RuntimeBuilder; pub(crate) use self::commands::InternalRuntimeCommand; pub use self::commands::RuntimeCommand; pub use self::events::RuntimeEvent; use crate::runtime::sync_task::{RunningTasks, SyncTask}; pub(crate) type Streams = FuturesUnordered> + Send>>>; pub struct Runtime { /// Map of sync tasks and their stream id, so we can cancel them when a new stream /// with the same id is registered pub(crate) sync_tasks: HashMap, /// Sync tasks that were registered for this node. pub(crate) running_sync_tasks: RunningTasks, pub(crate) broadcast_stream: broadcast::Receiver, pub(crate) storage: StorageClient, pub(crate) transient_streams: HashMap>>, /// Streams that are currently active (with a valid handshake) pub(crate) active_streams: HashMap>, /// Streams that are currently in negotiation pub(crate) pending_streams: HashMap>, /// Mapping between a subnet_id and streams that are subscribed to it pub(crate) subnet_subscriptions: HashMap>, /// Receiver for Internal API command pub(crate) internal_runtime_command_receiver: Receiver, /// Receiver for Outside API command pub(crate) runtime_command_receiver: Receiver, /// HealthCheck reporter for gRPC pub(crate) health_reporter: HealthReporter, /// Sender that forward Event to the rest of the system pub(crate) api_event_sender: Sender, /// Shutdown signal receiver pub(crate) shutdown: mpsc::Receiver>, /// Spawned stream that manage a gRPC stream pub(crate) streams: Streams, } impl Runtime { pub fn builder() -> RuntimeBuilder { RuntimeBuilder::default() } pub async fn launch(mut self) { let mut health_update = tokio::time::interval(Duration::from_secs(1)); let shutdowned: Option> = loop { tokio::select! { shutdown = self.shutdown.recv() => { break shutdown; }, _ = health_update.tick() => { self.health_reporter.set_serving::>().await; } Ok(certificate_delivered) = self.broadcast_stream.recv() => { let certificate = certificate_delivered.0; let certificate_id = certificate.certificate.id; let positions = certificate_delivered.1; let cmd = RuntimeCommand::DispatchCertificate { certificate, positions: positions .targets .into_iter() .map(|(subnet_id, certificate_target_stream_position)| { ( subnet_id, TargetStreamPosition { target_subnet_id: certificate_target_stream_position.target_subnet_id, source_subnet_id: certificate_target_stream_position.source_subnet_id, position: *certificate_target_stream_position.position, certificate_id: Some(certificate_id), }, ) }) .collect::>() }; self.handle_runtime_command(cmd).await; } Some(result) = self.streams.next() => { self.handle_stream_termination(result).await; } Some(internal_command) = self.internal_runtime_command_receiver.recv() => { self.handle_internal_command(internal_command).await; } Some(command) = self.runtime_command_receiver.recv() => { self.handle_runtime_command(command).await; } Some(result) = self.running_sync_tasks.next() => { debug!("SyncTask with StreamId: {:?} resulted in {:?}", result.0, result.1); } } }; if let Some(sender) = shutdowned { info!("Shutting down the TCE API service..."); _ = sender.send(()); } } async fn handle_stream_termination(&mut self, stream_result: Result) { match stream_result { Ok(stream_id) => { info!("Stream {stream_id} terminated gracefully"); self.active_streams.remove(&stream_id); self.pending_streams.remove(&stream_id); } Err(StreamError { stream_id, kind }) => match kind { StreamErrorKind::HandshakeFailed(_) | StreamErrorKind::InvalidCommand | StreamErrorKind::MalformedTargetCheckpoint | StreamErrorKind::Transport(_) | StreamErrorKind::PreStartError | StreamErrorKind::StreamClosed | StreamErrorKind::Timeout => { error!("Stream {stream_id} error: {kind:?}"); self.active_streams.remove(&stream_id); self.pending_streams.remove(&stream_id); } }, } } async fn handle_runtime_command(&mut self, command: RuntimeCommand) { match command { RuntimeCommand::DispatchCertificate { certificate, mut positions, } => { info!( "Received DispatchCertificate for certificate cert_id: {:?}", certificate.certificate.id ); // Collect target subnets from certificate cross chain transaction list let target_subnets = certificate .certificate .target_subnets .iter() .collect::>(); debug!( "Dispatching certificate cert_id: {:?} to target subnets: {:?}", &certificate.certificate.id, target_subnets ); // Notify all the transient streams that a new certificate is available // To avoid double allocation for each stream, we clone an Arc of the certificate. // Each stream will convert the UCI certificate into a GraphQL one and send it to the transient stream. let shared_certificate = Arc::new(certificate.clone()); for transient in self.transient_streams.values() { let sender = transient.clone(); let shared_certificate = shared_certificate.clone(); tokio::spawn(async move { _ = sender.send(shared_certificate).await; }); } for target_subnet_id in target_subnets { let target_subnet_id = *target_subnet_id; let target_position = positions.remove(&target_subnet_id); if let Some(stream_list) = self.subnet_subscriptions.get(&target_subnet_id) { let uuids: Vec<&Uuid> = stream_list.iter().collect(); for uuid in uuids { if let Some(sender) = self.active_streams.get(uuid) { let sender = sender.clone(); let certificate = certificate.clone(); info!("Sending certificate to {uuid}"); if let Some(target_position) = target_position.clone() { if let Err(error) = sender .send(StreamCommand::PushCertificate { certificate, positions: vec![target_position], }) .await { error!(%error, "Can't push certificate because the receiver is dropped"); } } else { error!( "Invalid target stream position for cert id {}, target \ subnet id {target_subnet_id}, dispatch failed", &certificate.certificate.id ); } } } } } } } } async fn handle_internal_command(&mut self, command: InternalRuntimeCommand) { match command { InternalRuntimeCommand::NewTransientStream { sender } => { let stream_id = Uuid::new_v4(); info!("Opening a new transient stream with UUID {stream_id}"); let (stream, receiver) = mpsc::channel(TRANSIENT_STREAM_CHANNEL_SIZE); let (shutdown, shutdown_recv) = oneshot::channel(); self.transient_streams.insert(stream_id, stream); self.streams.push( shutdown_recv .map_err(move |_| StreamError { stream_id, kind: StreamErrorKind::StreamClosed, }) .boxed(), ); if sender .send(Ok(TransientStream { stream_id, inner: receiver, notifier: Some(shutdown), })) .is_err() { error!("Unable to send new TransientStream"); _ = self.transient_streams.remove(&stream_id); } } InternalRuntimeCommand::NewStream { stream, command_sender, } => { let stream_id = stream.stream_id; info!("Opening a new stream with UUID {stream_id}"); self.pending_streams.insert(stream_id, command_sender); self.streams.push(Box::pin(stream.run())); } InternalRuntimeCommand::Handshaked { stream_id } => { if let Some(sender) = self.pending_streams.remove(&stream_id) { self.active_streams.insert(stream_id, sender); info!("Stream {stream_id} has successfully handshake"); } } InternalRuntimeCommand::Register { stream_id, sender, target_subnet_stream_positions, } => { info!("Stream {stream_id} is registered as subscriber"); if let Some(cancel_token) = self.sync_tasks.remove(&stream_id) { // Cancel the previous task cancel_token.cancel(); } let storage = self.storage.clone(); let notifier = self .active_streams .get(&stream_id) .or_else(|| self.pending_streams.get(&stream_id)) .cloned(); if let Err(error) = sender.send(Ok(())) { error!( ?error, "Failed to send response to the Stream, receiver is dropped" ); } if let Some(notifier) = notifier { // TODO: Rework to remove old subscriptions for target_subnet_id in target_subnet_stream_positions.keys() { self.subnet_subscriptions .entry(*target_subnet_id) .or_default() .insert(stream_id); } let cancel_token = CancellationToken::new(); let cloned_cancel_token = cancel_token.clone(); let task = SyncTask::new( stream_id, target_subnet_stream_positions, storage, notifier, cancel_token, ); self.running_sync_tasks.push(task.into_future()); self.sync_tasks.insert(stream_id, cloned_cancel_token); } } InternalRuntimeCommand::CertificateSubmitted { certificate, sender, } => { async move { info!( "A certificate has been submitted to the TCE {}", certificate.id ); if let Err(error) = self .api_event_sender .send(RuntimeEvent::CertificateSubmitted { certificate, sender, }) .await { error!( %error, "Can't send certificate submission to runtime, receiver is dropped" ); } } .await } InternalRuntimeCommand::GetSourceHead { subnet_id, sender } => { info!("Source head certificate has been requested for subnet id: {subnet_id}"); if let Err(error) = self .api_event_sender .send(RuntimeEvent::GetSourceHead { subnet_id, sender }) .await { error!( %error, "Can't request source head certificate, receiver is dropped" ); } } } } } ================================================ FILE: crates/topos-tce-api/src/runtime/sync_task.rs ================================================ use crate::stream::StreamCommand; use futures::stream::FuturesUnordered; use std::collections::hash_map::Entry; use std::collections::HashMap; use std::future::{Future, IntoFuture}; use std::pin::Pin; use tokio::sync::mpsc::error::SendError; use tokio::sync::mpsc::Sender; use tokio_util::sync::CancellationToken; use topos_core::api::grpc::checkpoints::TargetStreamPosition; use topos_core::types::stream::CertificateTargetStreamPosition; use topos_core::types::CertificateDelivered; use topos_core::uci::SubnetId; use topos_tce_storage::{FetchCertificatesFilter, FetchCertificatesPosition, StorageClient}; use tracing::{debug, error, info}; use uuid::Uuid; type TargetSubnetStreamPositions = HashMap>; pub(crate) type RunningTasks = FuturesUnordered + Send>>>; /// Status of a [`SyncTask`] #[derive(Debug)] pub(crate) enum SyncTaskStatus { /// The sync task is active and started running Running, /// The sync task failed and reported an error #[allow(dead_code)] Error(Box), /// The sync task exited gracefully and is done pushing certificates to the stream Done, /// The sync task was cancelled by a incoming stream with the same Uuid Cancelled, } #[derive(Debug)] pub(crate) enum SyncTaskError { /// The [`SyncTask`] failed to send a certificate to the stream SendingToStream { #[allow(dead_code)] error: Box>, }, /// Invalid certificate position was being fetched InvalidCertificatePosition, } /// When registering a stream, a [`SyncTask`] is started to fetch certificates from the storage /// and push them to the stream. /// /// The [`SyncTask`] is used to fetch certificates from the storage and push them to the stream. /// It is created when a new stream is registered and is cancelled when a stream with the same Uuid /// is being started. It is using the [`StorageClient`] to fetch certificates from the storage and /// a [`Sender`] part of a channel to push certificates to the stream. pub(crate) struct SyncTask { /// The status of the [`SyncTask`]. Can be used to check if the task is still running pub(crate) status: SyncTaskStatus, /// The stream with which the [`SyncTask`] is connected and pushes certificates to pub(crate) stream_id: Uuid, /// A map of subnet and the subnet pair (target and source subnet id), its position and the /// last certificate id delivered to the stream pub(crate) target_subnet_stream_positions: TargetSubnetStreamPositions, /// The connection to the database layer through a StorageClient pub(crate) store: StorageClient, /// The notifier is used to send certificates to the stream pub(crate) notifier: Sender, /// If a new stream is registered with the same Uuid, the sync task will be cancelled pub(crate) cancel_token: CancellationToken, } impl SyncTask { /// Creating a new SyncTask which will fetch certificates from the storage and pushes them to the stream pub(crate) fn new( stream_id: Uuid, target_subnet_stream_positions: TargetSubnetStreamPositions, store: StorageClient, notifier: Sender, cancel_token: CancellationToken, ) -> Self { Self { status: SyncTaskStatus::Running, stream_id, target_subnet_stream_positions, store, notifier, cancel_token, } } } impl IntoFuture for SyncTask { type Output = (Uuid, SyncTaskStatus); type IntoFuture = Pin + Send + 'static>>; fn into_future(mut self) -> Self::IntoFuture { Box::pin(async move { debug!("Sync task started for stream {}", self.stream_id); let mut collector: Vec<(CertificateDelivered, FetchCertificatesPosition)> = Vec::new(); for (target_subnet_id, source) in &mut self.target_subnet_stream_positions { if self.cancel_token.is_cancelled() { self.status = SyncTaskStatus::Cancelled; return (self.stream_id, self.status); } let source_subnet_list = self .store .get_target_source_subnet_list(*target_subnet_id) .await; debug!( "Stream sync task detected {:?} as source list", source_subnet_list ); if let Ok(source_subnet_list) = source_subnet_list { for source_subnet_id in source_subnet_list { if let Entry::Vacant(entry) = source.entry(source_subnet_id) { entry.insert(TargetStreamPosition { target_subnet_id: *target_subnet_id, source_subnet_id, position: 0, certificate_id: None, }); } } } for TargetStreamPosition { target_subnet_id, source_subnet_id, position, .. } in source.values_mut() { if self.cancel_token.is_cancelled() { self.status = SyncTaskStatus::Cancelled; return (self.stream_id, self.status); } if let Ok(certificates_with_positions) = self .store .fetch_certificates(FetchCertificatesFilter::Target { target_stream_position: CertificateTargetStreamPosition { target_subnet_id: *target_subnet_id, source_subnet_id: *source_subnet_id, position: (*position).into(), }, limit: 100, }) .await { collector.extend(certificates_with_positions) } } } for (certificate, position) in collector { debug!( "Stream sync task for {} is sending {}", self.stream_id, certificate.certificate.id ); if let FetchCertificatesPosition::Target(CertificateTargetStreamPosition { target_subnet_id, source_subnet_id, position, }) = position { if let Err(error) = self .notifier .send(StreamCommand::PushCertificate { positions: vec![TargetStreamPosition { target_subnet_id, source_subnet_id, position: *position, certificate_id: Some(certificate.certificate.id), }], certificate, }) .await { error!("Error sending certificate to stream: {}", error); self.status = SyncTaskStatus::Error(Box::new(SyncTaskError::SendingToStream { error: Box::new(error), })); return (self.stream_id, self.status); } } else { error!("Invalid certificate position fetched"); self.status = SyncTaskStatus::Error(Box::from(SyncTaskError::InvalidCertificatePosition)); return (self.stream_id, self.status); } } info!("The sync task for stream {} is done", self.stream_id); self.status = SyncTaskStatus::Done; (self.stream_id, self.status) }) } } ================================================ FILE: crates/topos-tce-api/src/runtime/tests.rs ================================================ use test_log::test; #[test(tokio::test)] #[ignore = "not yet implemented"] async fn handling_new_stream() {} #[test(tokio::test)] #[ignore = "not yet implemented"] async fn stream_collision() {} #[test(tokio::test)] #[ignore = "not yet implemented"] async fn handle_stream_timedout() {} #[test(tokio::test)] #[ignore = "not yet implemented"] async fn handle_stream_handshaked() {} #[test(tokio::test)] #[ignore = "not yet implemented"] async fn handle_stream_registration() {} #[test(tokio::test)] #[ignore = "not yet implemented"] async fn handle_certificate_submission() {} #[test(tokio::test)] #[ignore = "not yet implemented"] async fn handle_stream_error() {} #[test(tokio::test)] #[ignore = "not yet implemented"] async fn handle_stream_closing() {} #[test(tokio::test)] #[ignore = "not yet implemented"] async fn forcing_a_stream_to_close() {} ================================================ FILE: crates/topos-tce-api/src/stream/commands.rs ================================================ use topos_core::api::grpc::checkpoints::TargetStreamPosition; use topos_core::types::CertificateDelivered; #[derive(Debug)] pub enum StreamCommand { PushCertificate { certificate: CertificateDelivered, positions: Vec, }, } ================================================ FILE: crates/topos-tce-api/src/stream/errors.rs ================================================ use crate::runtime::{error::RuntimeError, InternalRuntimeCommand}; use thiserror::Error; use tokio::sync::{mpsc::error::SendError, oneshot::error::RecvError}; use tonic::Code; use uuid::Uuid; #[derive(Error, Debug)] pub(crate) enum StreamErrorKind { #[error(transparent)] HandshakeFailed(#[from] HandshakeError), #[error("Pre-start error")] PreStartError, #[error("Stream is closed")] StreamClosed, #[error("A timeout occurred")] Timeout, #[error("The submitted command is invalid")] InvalidCommand, #[error("Transport error: {0}")] Transport(Code), #[error("The submitted TargetCheckpoint is ill-formed")] MalformedTargetCheckpoint, } #[derive(Debug)] pub struct StreamError { pub(crate) stream_id: Uuid, pub(crate) kind: StreamErrorKind, } impl StreamError { pub(crate) fn new(stream_id: Uuid, kind: StreamErrorKind) -> Self { Self { stream_id, kind } } } #[derive(Error, Debug)] pub(crate) enum HandshakeError { #[error(transparent)] Runtime(#[from] RuntimeError), #[error(transparent)] OneshotCommunicationChannel(#[from] RecvError), #[error(transparent)] InternalCommunicationChannel(#[from] Box>), } #[cfg(test)] mod tests { use test_log::test; use tokio::sync::oneshot; use super::*; #[test(tokio::test)] async fn handshake_error_expected() { let uuid = Uuid::new_v4(); let runtime_error = RuntimeError::PendingStreamNotFound(uuid); let handshake_error: HandshakeError = runtime_error.into(); assert_eq!( format!("The pending stream {uuid} was not found"), handshake_error.to_string() ); let (sender, receiver) = oneshot::channel::>(); drop(sender); let handshake_error: HandshakeError = receiver.await.unwrap_err().into(); assert_eq!("channel closed", handshake_error.to_string()); } } ================================================ FILE: crates/topos-tce-api/src/stream/mod.rs ================================================ use futures::{stream::BoxStream, StreamExt, TryStreamExt}; use std::sync::Arc; use std::{collections::HashMap, fmt::Debug, time::Duration}; use tokio::{ sync::{ mpsc::{self, Receiver, Sender}, oneshot, }, time::timeout, }; use tonic::Status; use topos_core::api::grpc::checkpoints::{TargetCheckpoint, TargetStreamPosition}; use topos_core::types::CertificateDelivered; use topos_core::uci::SubnetId; use tracing::{debug, error, info, trace, warn}; use uuid::Uuid; pub mod commands; pub mod errors; #[cfg(test)] mod tests; use crate::{ grpc::messaging::{ CertificatePushed, InboundMessage, OpenStream, OutboundMessage, StreamOpened, }, runtime::InternalRuntimeCommand, RuntimeError, }; pub use self::commands::StreamCommand; pub use self::errors::StreamError; pub(crate) use self::errors::{HandshakeError, StreamErrorKind}; /// [`TransientStream`] is a stream that live as long as the connection is open. /// A [`TransientStream`] will not receive any certificates that were delivered /// before the stream was ready to listen. /// /// [`TransientStream`] implements [`futures::Stream`] and use a custom [`Drop`] /// implementation to notify the `runtime` when ended. #[derive(Debug)] pub struct TransientStream { pub(crate) inner: mpsc::Receiver>, pub(crate) stream_id: Uuid, pub(crate) notifier: Option>, } impl futures::Stream for TransientStream { type Item = Arc; fn poll_next( mut self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, ) -> std::task::Poll> { self.inner.poll_recv(cx) } } impl Drop for TransientStream { fn drop(&mut self) { if let Some(notifier) = self.notifier.take() { trace!( "Dropping TransientStream {}, notifying runtime for cleanup", self.stream_id ); _ = notifier.send(self.stream_id); } } } pub struct Stream { pub(crate) stream_id: Uuid, /// Mapping for each target subnet to the set of position per source subnet pub(crate) target_subnet_listeners: HashMap>, pub(crate) command_receiver: Receiver, pub(crate) internal_runtime_command_sender: Sender, /// gRPC outbound stream pub(crate) outbound_stream: Sender, OutboundMessage), Status>>, /// gRPC inbound stream pub(crate) inbound_stream: BoxStream<'static, Result<(Option, InboundMessage), StreamError>>, } impl Debug for Stream { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("Stream") .field("stream_id", &self.stream_id) .field("target_subnet_listeners", &self.target_subnet_listeners) .finish() } } impl Stream { pub(crate) fn new( stream_id: Uuid, inbound_stream: BoxStream<'static, Result<(Option, InboundMessage), StreamError>>, outbound_stream: Sender, OutboundMessage), Status>>, command_receiver: mpsc::Receiver, internal_runtime_command_sender: Sender, ) -> Self { Self { stream_id, target_subnet_listeners: HashMap::new(), command_receiver, outbound_stream, inbound_stream, internal_runtime_command_sender, } } pub async fn run(mut self) -> Result { // Prestart is the phase that waits for a particular message to being able to process the // handshake. For now we do not have authentication nor authorization. let (request_id, checkpoint) = self.pre_start().await?; // The handshake is preparing the stream to broadcast certificates to the client. // Notifying the manager about the subscriptions and defining everything related to // the stream management. self.handshake(checkpoint) .await .map_err(|error| StreamError::new(self.stream_id, StreamErrorKind::from(error)))?; if let Err(error) = self .outbound_stream .send(Ok(( request_id, OutboundMessage::StreamOpened(StreamOpened { subnet_ids: self.target_subnet_listeners.keys().copied().collect(), }), ))) .await { error!(%error, "Handshake failed with stream"); return Err(StreamError::new( self.stream_id, StreamErrorKind::StreamClosed, )); } loop { tokio::select! { Some(command) = self.command_receiver.recv() => { if self.handle_command(command).await? { break } } // We currently open the stream, but no other message from the client is getting processed. // We are using this open connection to communicate `delivered_certificates` to the client. Some(stream_packet) = self.inbound_stream.next() => { match stream_packet { Ok((_request_id, _message)) => { trace!("Received message from stream_id: {:?}", self.stream_id); } Err(error) => { match error.kind { StreamErrorKind::StreamClosed => { warn!("Stream {} closed", self.stream_id); return Err(StreamError::new(self.stream_id, StreamErrorKind::StreamClosed)); } _ => { // We are not handling specific errors for now. // If the sequencer is closing the connection, we are receiving a // StreamErrorKind::TransportError. error!( "Stream error: {:?}", error); return Err(StreamError::new(self.stream_id, error.kind)); } } } } } else => break, } } Ok(self.stream_id) } } impl Stream { async fn handle_command(&mut self, command: StreamCommand) -> Result { match command { StreamCommand::PushCertificate { certificate, positions, } => { let certificate_id = certificate.certificate.id; if let Err(error) = self .outbound_stream .send(Ok(( None, OutboundMessage::CertificatePushed(Box::new(CertificatePushed { certificate, positions, })), ))) .await { error!(%error, "Can't forward WatchCertificatesResponse to stream, channel seems dropped certificate {certificate_id}"); return Err(StreamError::new( self.stream_id, StreamErrorKind::StreamClosed, )); } else { info!( "Certificate {} sent to gRPC stream {}", certificate_id, self.stream_id ); } } } Ok(false) } async fn pre_start(&mut self) -> Result<(Option, TargetCheckpoint), StreamError> { let waiting_for_open_stream = async { if let Ok(Some(( request_id, InboundMessage::OpenStream(OpenStream { target_checkpoint, .. }), ))) = self.inbound_stream.try_next().await { Ok((request_id, target_checkpoint)) } else { Err(()) } }; match timeout(Duration::from_millis(100), waiting_for_open_stream).await { Ok(Ok(checkpoint)) => { info!( "Received an OpenStream command for the stream {}", self.stream_id ); Ok(checkpoint) } Ok(Err(_)) => { if let Err(error) = self .outbound_stream .send(Err(Status::invalid_argument("No OpenStream provided"))) .await { warn!(%error, "Can't notify stream of invalid argument during pre_start"); Err(StreamError::new( self.stream_id, StreamErrorKind::StreamClosed, )) } else { Err(StreamError::new( self.stream_id, StreamErrorKind::PreStartError, )) } } _ => Err(StreamError::new(self.stream_id, StreamErrorKind::Timeout)), } } async fn handshake(&mut self, checkpoint: TargetCheckpoint) -> Result<(), HandshakeError> { _ = self.handle_checkpoint(checkpoint); let (sender, receiver) = oneshot::channel::>(); self.internal_runtime_command_sender .send(InternalRuntimeCommand::Register { stream_id: self.stream_id, target_subnet_stream_positions: self.target_subnet_listeners.clone(), sender, }) .await .map_err(Box::new)?; receiver.await??; self.internal_runtime_command_sender .send(InternalRuntimeCommand::Handshaked { stream_id: self.stream_id, }) .await .map_err(Box::new)?; Ok(()) } fn handle_checkpoint(&mut self, checkpoint: TargetCheckpoint) -> Result<(), StreamError> { self.target_subnet_listeners.clear(); for target in checkpoint.target_subnet_ids { self.target_subnet_listeners .insert(target, Default::default()); } for position in checkpoint.positions { let target = position.target_subnet_id; if let Some(entry) = self.target_subnet_listeners.get_mut(&target) { let source = position.source_subnet_id; if entry.insert(source, position).is_some() { debug!( "Stream {} replaced its position for target {:?} -> {:?}", self.stream_id, target, source ); } } else { return Err(StreamError::new( self.stream_id, StreamErrorKind::MalformedTargetCheckpoint, )); } } Ok(()) } } ================================================ FILE: crates/topos-tce-api/src/stream/tests/utils.rs ================================================ use std::collections::HashMap; use futures::{stream::BoxStream, StreamExt}; use hyper::body::Sender; use tokio::sync::mpsc; use tonic::{ codec::{Codec, ProstCodec}, transport::Body, Status, Streaming, }; use topos_core::api::grpc::tce::v1::{WatchCertificatesRequest, WatchCertificatesResponse}; use uuid::Uuid; use crate::{ grpc::{ messaging::{InboundMessage, OutboundMessage}, TceGrpcService, }, runtime::InternalRuntimeCommand, stream::{Stream, StreamCommand, StreamError}, }; type CreateStreamResult = ( Sender, BoxStream<'static, Result<(Option, InboundMessage), StreamError>>, ); pub fn create_stream(stream_id: Uuid) -> CreateStreamResult { let (tx, body) = Body::channel(); let mut codec = ProstCodec::::default(); let stream = Streaming::new_request(codec.decoder(), body, None, None) .map(move |message| TceGrpcService::parse_stream(message, stream_id)) .boxed(); (tx, stream) } pub struct StreamBuilder { outbound_stream_channel_size: usize, runtime_channel_size: usize, stream_channel_size: usize, stream_id: Uuid, } impl Default for StreamBuilder { fn default() -> Self { Self { outbound_stream_channel_size: 10, runtime_channel_size: 10, stream_channel_size: 10, stream_id: Uuid::new_v4(), } } } impl StreamBuilder { #[allow(dead_code)] pub fn outbound_stream_channel_size(mut self, value: usize) -> Self { self.outbound_stream_channel_size = value; self } #[allow(dead_code)] pub fn runtime_channel_size(mut self, value: usize) -> Self { self.runtime_channel_size = value; self } #[allow(dead_code)] pub fn stream_channel_size(mut self, value: usize) -> Self { self.stream_channel_size = value; self } #[allow(dead_code)] pub fn stream_id(mut self, value: Uuid) -> Self { self.stream_id = value; self } pub fn build(self) -> (Sender, Stream, StreamContext) { let stream_id = Uuid::new_v4(); let (tx, stream) = create_stream(stream_id); let (sender, stream_receiver) = mpsc::channel(self.outbound_stream_channel_size); let (command_sender, command_receiver) = mpsc::channel(self.stream_channel_size); let (internal_runtime_command_sender, runtime_receiver) = mpsc::channel(self.runtime_channel_size); let testable_stream = Stream { stream_id, target_subnet_listeners: HashMap::new(), outbound_stream: sender, inbound_stream: stream, internal_runtime_command_sender, command_receiver, }; ( tx, testable_stream, StreamContext { stream_receiver, command_sender, runtime_receiver, stream_id, }, ) } } pub struct StreamContext { pub(crate) stream_receiver: mpsc::Receiver, OutboundMessage), Status>>, #[allow(dead_code)] pub(crate) command_sender: mpsc::Sender, pub(crate) runtime_receiver: mpsc::Receiver, pub(crate) stream_id: Uuid, } ================================================ FILE: crates/topos-tce-api/src/stream/tests.rs ================================================ use rstest::*; use std::time::Duration; use tokio::sync::{mpsc, oneshot}; use tokio_stream::StreamExt; use topos_core::uci::SUBNET_ID_LENGTH; use topos_test_sdk::certificates::create_certificate_chain; use topos_test_sdk::constants::{SOURCE_SUBNET_ID_2, TARGET_SUBNET_ID_1}; use uuid::Uuid; use self::utils::StreamBuilder; use crate::grpc::messaging::{OutboundMessage, StreamOpened}; use crate::runtime::InternalRuntimeCommand; use crate::stream::{StreamError, StreamErrorKind, TransientStream}; use crate::tests::encode; use crate::wait_for_command; use test_log::test; use tokio::spawn; use topos_core::api::grpc::shared::v1::checkpoints::TargetCheckpoint; use topos_core::api::grpc::shared::v1::positions::TargetStreamPosition; use topos_core::api::grpc::tce::v1::watch_certificates_request::OpenStream as GrpcOpenStream; use topos_core::api::grpc::tce::v1::WatchCertificatesRequest; mod utils; #[rstest] #[timeout(Duration::from_millis(100))] #[test(tokio::test)] pub async fn sending_no_message() -> Result<(), Box> { let (_, stream, mut context) = StreamBuilder::default().build(); let join = spawn(stream.run()); wait_for_command!( context.stream_receiver, matches: Err(status) if status.message() == "No OpenStream provided" ); let result = join.await?; assert!( matches!(result, Err(StreamError { stream_id, kind: StreamErrorKind::PreStartError}) if stream_id == context.stream_id), "Doesn't match {result:?}", ); Ok(()) } #[rstest] #[timeout(Duration::from_millis(100))] #[test(tokio::test)] pub async fn sending_open_stream_message() -> Result<(), Box> { let (mut tx, stream, mut context) = StreamBuilder::default().build(); let join = spawn(stream.run()); let msg: WatchCertificatesRequest = GrpcOpenStream { target_checkpoint: Some(TargetCheckpoint { target_subnet_ids: vec![TARGET_SUBNET_ID_1.into()], positions: Vec::new(), }), source_checkpoint: None, } .into(); _ = tx.send_data(encode(&msg)?).await; let expected_stream_id = context.stream_id; wait_for_command!( context.runtime_receiver, matches: InternalRuntimeCommand::Register { stream_id, .. } if stream_id == expected_stream_id ); join.abort(); Ok(()) } #[rstest] #[timeout(Duration::from_millis(100))] #[test(tokio::test)] async fn subscribing_to_one_target_with_position() -> Result<(), Box> { let (mut tx, stream, mut context) = StreamBuilder::default().build(); let join = spawn(stream.run()); let msg: WatchCertificatesRequest = GrpcOpenStream { target_checkpoint: Some(TargetCheckpoint { target_subnet_ids: vec![TARGET_SUBNET_ID_1.into()], positions: vec![TargetStreamPosition { source_subnet_id: Some(SOURCE_SUBNET_ID_2.into()), target_subnet_id: Some(TARGET_SUBNET_ID_1.into()), position: 1, certificate_id: None, }], }), source_checkpoint: None, } .into(); _ = tx.send_data(encode(&msg)?).await; let expected_stream_id = context.stream_id; wait_for_command!( context.runtime_receiver, matches: InternalRuntimeCommand::Register { stream_id, .. } if stream_id == expected_stream_id ); join.abort(); Ok(()) } #[rstest] #[timeout(Duration::from_millis(100))] #[test(tokio::test)] async fn receive_expected_certificate_from_zero() -> Result<(), Box> { let (mut tx, stream, mut context) = StreamBuilder::default().build(); let expected_certificates = create_certificate_chain(SOURCE_SUBNET_ID_2, &[TARGET_SUBNET_ID_1], 2); let join = spawn(stream.run()); let msg: WatchCertificatesRequest = GrpcOpenStream { target_checkpoint: Some(TargetCheckpoint { target_subnet_ids: vec![TARGET_SUBNET_ID_1.into()], positions: vec![], }), source_checkpoint: None, } .into(); _ = tx.send_data(encode(&msg)?).await; let expected_stream_id = context.stream_id; wait_for_command!( context.runtime_receiver, matches: InternalRuntimeCommand::Register { stream_id, sender, .. } if stream_id == expected_stream_id => { sender.send(Ok(())) } ); let msg = context.stream_receiver.recv().await; assert!( matches!( msg, Some(Ok((_, OutboundMessage::StreamOpened(StreamOpened { ref subnet_ids })))) if subnet_ids == &[TARGET_SUBNET_ID_1], ), "Expected StreamOpened, received: {msg:?}" ); for (index, expected_certificate) in expected_certificates.iter().enumerate() { context .command_sender .send(crate::stream::StreamCommand::PushCertificate { certificate: expected_certificate.clone(), positions: vec![topos_core::api::grpc::checkpoints::TargetStreamPosition { position: index as u64, certificate_id: Some(expected_certificate.certificate.id), target_subnet_id: [1u8; SUBNET_ID_LENGTH].into(), source_subnet_id: expected_certificate.certificate.source_subnet_id, }], }) .await .expect("Unable to send certificate during test"); } for (expected_position, expected_certificate) in expected_certificates.into_iter().enumerate() { assert!( matches!( context.stream_receiver.recv().await, Some(Ok((_, OutboundMessage::CertificatePushed(certificate_pushed)))) if certificate_pushed.certificate == expected_certificate && certificate_pushed.positions[0].position == expected_position as u64, ), "Expected CertificatePushed with {}, received: {:?}", expected_certificate.certificate.id, msg ); } join.abort(); Ok(()) } #[test(tokio::test)] #[ignore = "not yet implemented"] async fn pausing_all_subscription() {} #[test(tokio::test)] #[ignore = "not yet implemented"] async fn pausing_one_subscription() {} #[test(tokio::test)] #[ignore = "not yet implemented"] async fn resuming_one_subscription() {} #[test(tokio::test)] #[ignore = "not yet implemented"] async fn resuming_all_subscription() {} #[rstest] #[test(tokio::test)] async fn closing_client_stream() -> Result<(), Box> { let (mut tx, stream, mut context) = StreamBuilder::default().build(); let join = spawn(stream.run()); let msg: WatchCertificatesRequest = GrpcOpenStream { target_checkpoint: Some(TargetCheckpoint { target_subnet_ids: vec![TARGET_SUBNET_ID_1.into()], positions: vec![], }), source_checkpoint: None, } .into(); _ = tx.send_data(encode(&msg)?).await; let expected_stream_id = context.stream_id; wait_for_command!( context.runtime_receiver, matches: InternalRuntimeCommand::Register { stream_id, sender, .. } if stream_id == expected_stream_id => { sender.send(Ok(())) } ); let msg = context.stream_receiver.recv().await; assert!( matches!( msg, Some(Ok((_, OutboundMessage::StreamOpened(StreamOpened { ref subnet_ids })))) if subnet_ids == &[TARGET_SUBNET_ID_1], ), "Expected StreamOpened, received: {msg:?}" ); tx.abort(); let result = join.await?; assert!( matches!(result, Err(StreamError { stream_id, kind: StreamErrorKind::Transport(_)}) if stream_id == context.stream_id), "Doesn't match {result:?}", ); Ok(()) } #[test(tokio::test)] #[ignore = "not yet implemented"] async fn closing_server_stream() {} #[test(tokio::test)] async fn opening_transient_stream() { let (_sender, receiver) = mpsc::channel(1); let (notifier, check) = oneshot::channel(); let id = Uuid::new_v4(); let stream = TransientStream { inner: receiver, stream_id: id, notifier: Some(notifier), }; tokio::spawn(async move { drop(stream); }); let res = check.await; assert_eq!(res.unwrap(), id); } #[test(tokio::test)] async fn opening_transient_stream_drop_sender() { let (sender, receiver) = mpsc::channel(1); let (notifier, check) = oneshot::channel(); let id = Uuid::new_v4(); let mut stream = TransientStream { inner: receiver, stream_id: id, notifier: Some(notifier), }; let handle = tokio::spawn(async move { while stream.next().await.is_some() {} }); tokio::time::sleep(Duration::from_millis(10)).await; drop(sender); let res = check.await; assert_eq!(res.unwrap(), id); assert!(handle.is_finished()); } ================================================ FILE: crates/topos-tce-api/src/tests.rs ================================================ use bytes::{BufMut, Bytes, BytesMut}; use prost::Message; #[macro_export] macro_rules! wait_for_command { ($node:expr, matches: $(|)? $( $pattern:pat_param )|+ $( if $guard: expr )? $(=> $input_block:block)? $(,)?) => { let assertion = async { while let Some(command) = $node.recv().await { match command { $( $pattern )|+ $( if $guard )? => { _ = {$($input_block)?}; break; } _ => {} } } }; if let Err(_) = tokio::time::timeout(std::time::Duration::from_millis(100), assertion).await { panic!("Timeout waiting for command"); } }; } // Utility to encode our proto into GRPC stream format. pub fn encode(proto: &T) -> Result> { let mut buf = BytesMut::new(); // See below comment on spec. use std::mem::size_of; const PREFIX_BYTES: usize = size_of::() + size_of::(); for _ in 0..PREFIX_BYTES { // Advance our buffer first. // We will backfill it once we know the size of the message. buf.put_u8(0); } proto.encode(&mut buf)?; let len = buf.len() - PREFIX_BYTES; { let mut buf = &mut buf[0..PREFIX_BYTES]; // See: https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#:~:text=Compressed-Flag // for more details on spec. // Compressed-Flag -> 0 / 1 # encoded as 1 byte unsigned integer. buf.put_u8(0); // Message-Length -> {length of Message} # encoded as 4 byte unsigned integer (big endian). buf.put_u32(len as u32); // Message -> *{binary octet}. } Ok(buf.freeze()) } ================================================ FILE: crates/topos-tce-api/tests/grpc/certificate_precedence.rs ================================================ use base64ct::{Base64, Encoding}; use rstest::rstest; use std::sync::Arc; use test_log::test; use topos_core::api::grpc::tce::v1::{GetLastPendingCertificatesRequest, LastPendingCertificate}; use topos_core::uci::Certificate; use topos_test_sdk::{ certificates::create_certificate_chain, constants::{SOURCE_SUBNET_ID_1, TARGET_SUBNET_ID_1}, storage::{create_fullnode_store, create_validator_store, storage_client}, tce::public_api::{broadcast_stream, create_public_api}, }; use topos_tce_storage::validator::ValidatorStore; #[rstest] #[test(tokio::test)] async fn fetch_latest_pending_certificates() { let fullnode_store = create_fullnode_store(&[]).await; let validator_store: Arc = create_validator_store(&[], futures::future::ready(fullnode_store.clone())).await; let (api_context, _) = create_public_api( storage_client(&[]), broadcast_stream(), futures::future::ready(validator_store.clone()), ) .await; let mut client = api_context.api_client; let certificates = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 2); let expected = certificates[1].certificate.clone(); assert!(validator_store .insert_pending_certificate(&certificates[1].certificate) .await .unwrap() .is_none()); assert!(validator_store .insert_pending_certificate(&certificates[0].certificate) .await .unwrap() .is_some()); let mut res = client .get_last_pending_certificates(GetLastPendingCertificatesRequest { subnet_ids: vec![SOURCE_SUBNET_ID_1.into()], }) .await .unwrap() .into_inner(); let res: LastPendingCertificate = res .last_pending_certificate .remove(&Base64::encode_string(SOURCE_SUBNET_ID_1.as_array())) .unwrap(); let res: Certificate = res.value.unwrap().try_into().unwrap(); assert_eq!(res, expected); } #[rstest] #[test(tokio::test)] async fn fetch_latest_pending_certificates_with_conflicts() { let fullnode_store = create_fullnode_store(&[]).await; let validator_store: Arc = create_validator_store(&[], futures::future::ready(fullnode_store.clone())).await; let (api_context, _) = create_public_api( storage_client(&[]), broadcast_stream(), futures::future::ready(validator_store.clone()), ) .await; let mut client = api_context.api_client; let mut certificates = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 3); certificates[2].certificate.prev_id = certificates[1].certificate.prev_id; let expected = certificates[2].certificate.clone(); for certificate in certificates.iter().skip(1) { assert!(validator_store .insert_pending_certificate(&certificate.certificate) .await .unwrap() .is_none()); } assert!(validator_store .insert_pending_certificate(&certificates[0].certificate) .await .unwrap() .is_some()); let mut res = client .get_last_pending_certificates(GetLastPendingCertificatesRequest { subnet_ids: vec![SOURCE_SUBNET_ID_1.into()], }) .await .unwrap() .into_inner(); let res: LastPendingCertificate = res .last_pending_certificate .remove(&Base64::encode_string(SOURCE_SUBNET_ID_1.as_array())) .unwrap(); let res: Certificate = res.value.unwrap().try_into().unwrap(); assert_eq!(res, expected); } ================================================ FILE: crates/topos-tce-api/tests/grpc/mod.rs ================================================ mod certificate_precedence; ================================================ FILE: crates/topos-tce-api/tests/runtime.rs ================================================ use futures::Stream; use rstest::rstest; use serde::Deserialize; use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; use test_log::test; use tokio::sync::{broadcast, mpsc}; use tokio::{spawn, sync::oneshot}; use tokio_stream::StreamExt; use tonic::transport::channel; use tonic::transport::Uri; use topos_core::api::graphql::certificate::Certificate as GraphQLCertificate; use topos_core::api::grpc::shared::v1::checkpoints::TargetCheckpoint; use topos_core::api::grpc::shared::v1::positions::TargetStreamPosition; use topos_core::types::stream::Position; use topos_core::types::CertificateDelivered; use topos_core::uci::CertificateId; use topos_core::{ api::grpc::tce::v1::{ api_service_client::ApiServiceClient, watch_certificates_request::OpenStream, watch_certificates_response::{CertificatePushed, Event}, }, uci::Certificate, }; use topos_metrics::{STORAGE_PENDING_POOL_COUNT, STORAGE_PRECEDENCE_POOL_COUNT}; use topos_tce_api::{Runtime, RuntimeEvent}; use topos_tce_storage::types::CertificateDeliveredWithPositions; use topos_tce_storage::validator::ValidatorStore; use topos_tce_storage::StorageClient; use topos_test_sdk::certificates::{ create_certificate, create_certificate_at_position, create_certificate_chain, }; use topos_test_sdk::constants::*; use topos_test_sdk::networking::get_available_addr; use topos_test_sdk::storage::{create_fullnode_store, create_validator_store, storage_client}; use topos_test_sdk::tce::public_api::{broadcast_stream, create_public_api, PublicApiContext}; mod grpc; #[rstest] #[timeout(Duration::from_secs(4))] #[test(tokio::test)] async fn runtime_can_dispatch_a_cert( #[future] create_public_api: (PublicApiContext, impl Stream), ) { let (mut api_context, _) = create_public_api.await; let mut client = api_context.api_client; let (tx, rx) = oneshot::channel::(); // This block represent a subnet A spawn(async move { let in_stream = async_stream::stream! { yield OpenStream { target_checkpoint: Some(TargetCheckpoint { target_subnet_ids: vec![TARGET_SUBNET_ID_1.into()], positions: Vec::new() }), source_checkpoint: None }.into() }; let response = client.watch_certificates(in_stream).await.unwrap(); let mut resp_stream = response.into_inner(); let mut tx = Some(tx); while let Some(received) = resp_stream.next().await { let received = received.unwrap(); if let Some(Event::CertificatePushed(CertificatePushed { certificate: Some(certificate), .. })) = received.event { if let Some(tx) = tx.take() { _ = tx.send(certificate.try_into().unwrap()); } else { panic!("Double certificate sent"); } } } }); // Wait for client to be ready tokio::time::sleep(Duration::from_millis(10)).await; let cert = create_certificate_at_position( Position::ZERO, create_certificate( SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], Some(PREV_CERTIFICATE_ID), ), ); let mut target_positions = std::collections::HashMap::new(); target_positions.insert( TARGET_SUBNET_ID_1, topos_core::api::grpc::checkpoints::TargetStreamPosition { position: 0, source_subnet_id: SOURCE_SUBNET_ID_1, target_subnet_id: TARGET_SUBNET_ID_1, certificate_id: Some(cert.certificate.id), }, ); // Send a dispatch command that will be push to the subnet A api_context .client .dispatch_certificate(cert.clone(), target_positions) .await; let certificate_received = rx.await.unwrap(); assert_eq!(cert.certificate, certificate_received); drop(api_context.api_context.take()); } #[rstest] #[timeout(Duration::from_secs(4))] #[test(tokio::test)] async fn can_catchup_with_old_certs( #[with(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 15)] #[from(create_certificate_chain)] certificates: Vec, ) { let storage_client = storage_client::partial_1(&certificates[..]); let (mut api_context, _) = create_public_api::partial_1(storage_client).await; let mut client = api_context.api_client; let (tx, mut rx) = mpsc::channel::(16); // This block represent a subnet A spawn(async move { let in_stream = async_stream::stream! { yield OpenStream { target_checkpoint: Some(TargetCheckpoint { target_subnet_ids: vec![TARGET_SUBNET_ID_1.into()], positions: Vec::new() }), source_checkpoint: None }.into() }; let response = client.watch_certificates(in_stream).await.unwrap(); let mut resp_stream = response.into_inner(); while let Some(received) = resp_stream.next().await { let received = received.unwrap(); if let Some(Event::CertificatePushed(CertificatePushed { certificate: Some(certificate), .. })) = received.event { _ = tx.send(certificate.try_into().unwrap()).await; } } }); // Wait for client to be ready tokio::time::sleep(Duration::from_millis(100)).await; let last = certificates.last().map(|c| c.certificate.id).unwrap(); let cert = create_certificate_at_position( certificates.len().try_into().unwrap(), create_certificate(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], Some(last)), ); let mut target_positions = std::collections::HashMap::new(); target_positions.insert( TARGET_SUBNET_ID_1, topos_core::api::grpc::checkpoints::TargetStreamPosition { position: certificates.len() as u64, source_subnet_id: SOURCE_SUBNET_ID_1, target_subnet_id: TARGET_SUBNET_ID_1, certificate_id: Some(cert.certificate.id), }, ); // Send a dispatch command that will be push to the subnet A api_context .client .dispatch_certificate(cert.clone(), target_positions) .await; for (index, certificate) in certificates.iter().enumerate() { let certificate_received = rx .recv() .await .unwrap_or_else(|| panic!("Didn't received index {index}")); assert_eq!( certificate.certificate, certificate_received, "Certificate at index {index} not received" ); } let certificate_received = rx.recv().await.unwrap(); assert_eq!(cert.certificate, certificate_received); drop(api_context.api_context.take()); } #[rstest] #[timeout(Duration::from_secs(4))] #[test(tokio::test)] async fn can_catchup_with_old_certs_with_position( broadcast_stream: broadcast::Receiver, ) { let (tx, mut rx) = mpsc::channel::(16); let addr = get_available_addr(); let graphql_addr = get_available_addr(); let metrics_addr = get_available_addr(); // launch data store let certificates = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 15); let fullnode_store = create_fullnode_store::default().await; let store = create_validator_store( &certificates[..], futures::future::ready(fullnode_store.clone()), ) .await; let storage_client = StorageClient::new(store.clone()); let (runtime_client, _launcher, _ctx) = Runtime::builder() .with_broadcast_stream(broadcast_stream) .storage(storage_client) .store(store) .serve_grpc_addr(addr) .serve_graphql_addr(graphql_addr) .serve_metrics_addr(metrics_addr) .build_and_launch() .await; // Wait for server to boot tokio::time::sleep(Duration::from_millis(100)).await; let uri = Uri::builder() .path_and_query("/") .authority(addr.to_string()) .scheme("http") .build() .unwrap(); // This block represent a subnet A spawn(async move { let channel = channel::Channel::builder(uri).connect_lazy(); let mut client = ApiServiceClient::new(channel); let in_stream = async_stream::stream! { yield OpenStream { target_checkpoint: Some(TargetCheckpoint { target_subnet_ids: vec![TARGET_SUBNET_ID_1.into()], positions: vec![ TargetStreamPosition { certificate_id: None, position: 5, source_subnet_id: Some(SOURCE_SUBNET_ID_1.into()), target_subnet_id: Some(TARGET_SUBNET_ID_1.into()) } ] }), source_checkpoint: None }.into() }; let response = client.watch_certificates(in_stream).await.unwrap(); let mut resp_stream = response.into_inner(); while let Some(received) = resp_stream.next().await { let received = received.unwrap(); if let Some(Event::CertificatePushed(CertificatePushed { certificate: Some(certificate), .. })) = received.event { _ = tx.send(certificate.try_into().unwrap()).await; } } }); // Wait for client to be ready tokio::time::sleep(Duration::from_millis(100)).await; let last = certificates.last().map(|c| c.certificate.id).unwrap(); let cert = create_certificate_at_position( certificates.len().try_into().unwrap(), create_certificate(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], Some(last)), ); let mut target_positions = std::collections::HashMap::new(); target_positions.insert( TARGET_SUBNET_ID_1, topos_core::api::grpc::checkpoints::TargetStreamPosition { position: certificates.len() as u64, source_subnet_id: SOURCE_SUBNET_ID_1, target_subnet_id: TARGET_SUBNET_ID_1, certificate_id: Some(cert.certificate.id), }, ); // Send a dispatch command that will be push to the subnet A runtime_client .dispatch_certificate(cert.clone(), target_positions) .await; for (index, certificate) in certificates.iter().skip(5).enumerate() { let certificate_received = rx .recv() .await .unwrap_or_else(|| panic!("Didn't received index {index}")); assert_eq!( certificate.certificate, certificate_received, "Certificate at index {index} not received" ); } let certificate_received = rx.recv().await.unwrap(); assert_eq!(cert.certificate, certificate_received); } #[test(tokio::test)] #[ignore = "not yet implemented"] async fn can_listen_for_multiple_subnet_id() {} #[rstest] #[timeout(Duration::from_secs(4))] #[test(tokio::test)] async fn boots_healthy_graphql_server( broadcast_stream: broadcast::Receiver, ) { let addr = get_available_addr(); let graphql_addr = get_available_addr(); let metrics_addr = get_available_addr(); // launch data store let certificates = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 15); let fullnode_store = create_fullnode_store::default().await; let store = create_validator_store( &certificates[..], futures::future::ready(fullnode_store.clone()), ) .await; let storage_client = StorageClient::new(store.clone()); let (_runtime_client, _launcher, _ctx) = Runtime::builder() .with_broadcast_stream(broadcast_stream) .storage(storage_client) .store(store) .serve_grpc_addr(addr) .serve_graphql_addr(graphql_addr) .serve_metrics_addr(metrics_addr) .build_and_launch() .await; // Wait for server to boot tokio::time::sleep(Duration::from_millis(100)).await; let res = reqwest::get(format!("http://{}/health", graphql_addr)) .await .unwrap() .text() .await .unwrap(); assert_eq!(res, "{\"healthy\":true}"); } #[rstest] #[timeout(Duration::from_secs(4))] #[test(tokio::test)] async fn graphql_server_enables_cors( broadcast_stream: broadcast::Receiver, ) { let addr = get_available_addr(); let graphql_addr = get_available_addr(); let metrics_addr = get_available_addr(); // launch data store let certificates = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 15); let fullnode_store = create_fullnode_store::default().await; let store = create_validator_store( &certificates[..], futures::future::ready(fullnode_store.clone()), ) .await; let storage_client = StorageClient::new(store.clone()); let (_runtime_client, _launcher, _ctx) = Runtime::builder() .with_broadcast_stream(broadcast_stream) .storage(storage_client) .store(store) .serve_grpc_addr(addr) .serve_graphql_addr(graphql_addr) .serve_metrics_addr(metrics_addr) .build_and_launch() .await; // Wait for server to boot tokio::time::sleep(Duration::from_millis(100)).await; let mut headers = reqwest::header::HeaderMap::new(); headers.insert("Origin", "http://example.com".parse().unwrap()); headers.insert("Access-Control-Request-Method", "POST".parse().unwrap()); headers.insert( "Access-Control-Request-Headers", "X-Requested-With".parse().unwrap(), ); let client = reqwest::Client::new(); let res = client .request( "OPTIONS".parse().unwrap(), format!("http://{}/health", graphql_addr), ) .headers(headers) .send() .await .unwrap(); let headers = res.headers(); let ac_allow_origin = headers.get("Access-Control-Allow-Origin"); assert_eq!(ac_allow_origin.unwrap().to_str().unwrap(), "*"); let ac_allow_methods = headers.get("Access-Control-Allow-Methods"); assert_eq!(ac_allow_methods.unwrap().to_str().unwrap(), "GET,POST"); let ac_allow_headers = headers.get("Access-Control-Allow-Headers"); assert_eq!(ac_allow_headers.unwrap().to_str().unwrap(), "content-type"); } #[rstest] #[timeout(Duration::from_secs(4))] #[test(tokio::test)] async fn can_query_graphql_endpoint_for_certificates( broadcast_stream: broadcast::Receiver, ) { let (tx, mut rx) = mpsc::channel::(16); let addr = get_available_addr(); let graphql_addr = get_available_addr(); let metrics_addr = get_available_addr(); // launch data store let certificates = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 15); let fullnode_store = create_fullnode_store::default().await; let store = create_validator_store( &certificates[..], futures::future::ready(fullnode_store.clone()), ) .await; let storage_client = StorageClient::new(store.clone()); let (runtime_client, _launcher, _ctx) = Runtime::builder() .with_broadcast_stream(broadcast_stream) .storage(storage_client) .store(store) .serve_grpc_addr(addr) .serve_graphql_addr(graphql_addr) .serve_metrics_addr(metrics_addr) .build_and_launch() .await; // Wait for server to boot tokio::time::sleep(Duration::from_millis(100)).await; let uri = Uri::builder() .path_and_query("/") .authority(addr.to_string()) .scheme("http") .build() .unwrap(); // This block represent a subnet A spawn(async move { let channel = channel::Channel::builder(uri).connect_lazy(); let mut client = ApiServiceClient::new(channel); let in_stream = async_stream::stream! { yield OpenStream { target_checkpoint: Some(TargetCheckpoint { target_subnet_ids: vec![TARGET_SUBNET_ID_1.into()], positions: vec![ TargetStreamPosition { certificate_id: None, position: 5, source_subnet_id: Some(SOURCE_SUBNET_ID_1.into()), target_subnet_id: Some(TARGET_SUBNET_ID_1.into()) } ] }), source_checkpoint: None }.into() }; let response = client.watch_certificates(in_stream).await.unwrap(); let mut resp_stream = response.into_inner(); while let Some(received) = resp_stream.next().await { let received = received.unwrap(); if let Some(Event::CertificatePushed(CertificatePushed { certificate: Some(certificate), .. })) = received.event { _ = tx.send(certificate.try_into().unwrap()).await; } } }); // Wait for client to be ready tokio::time::sleep(Duration::from_millis(100)).await; let last = certificates.last().map(|c| c.certificate.id).unwrap(); let cert = create_certificate_at_position( certificates.len().try_into().unwrap(), create_certificate(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], Some(last)), ); let mut target_positions = std::collections::HashMap::new(); target_positions.insert( TARGET_SUBNET_ID_1, topos_core::api::grpc::checkpoints::TargetStreamPosition { position: certificates.len() as u64, source_subnet_id: SOURCE_SUBNET_ID_1, target_subnet_id: TARGET_SUBNET_ID_1, certificate_id: Some(cert.certificate.id), }, ); // Send a dispatch command that will be push to the subnet A runtime_client .dispatch_certificate(cert.clone(), target_positions) .await; for (index, certificate) in certificates.iter().skip(5).enumerate() { let certificate_received = rx .recv() .await .unwrap_or_else(|| panic!("Didn't received index {index}")); assert_eq!( certificate.certificate, certificate_received, "Certificate at index {index} not received" ); } let _ = rx.recv().await.unwrap(); let query = format!( r#" query {{ certificates( fromSourceCheckpoint: {{ sourceSubnetIds: [ "{SOURCE_SUBNET_ID_1}" ], positions: [ {{ sourceSubnetId:"{SOURCE_SUBNET_ID_1}", position: 0, }} ] }}, first: 10 ) {{ id prevId proof signature sourceSubnetId stateRoot targetSubnets txRootHash receiptsRootHash verifier positions {{ source {{ sourceSubnetId position certificateId }} }} }} }} "# ); #[derive(Deserialize)] struct Response { data: CertificatesResponse, } #[derive(Deserialize, Debug)] struct CertificatesResponse { certificates: Vec, } let client = reqwest::Client::new(); let response = client .post(format!("http://{}", graphql_addr)) .json(&serde_json::json!({ "query": query, })) .send() .await .unwrap() .json::() .await .unwrap(); let graphql_certificate: GraphQLCertificate = cert.as_ref().into(); assert_eq!(response.data.certificates.len(), 10); assert_eq!( response.data.certificates[0].source_subnet_id, graphql_certificate.source_subnet_id ); } #[rstest] #[timeout(Duration::from_secs(4))] #[test(tokio::test)] async fn check_storage_pool_stats( broadcast_stream: broadcast::Receiver, ) { let addr = get_available_addr(); let graphql_addr = get_available_addr(); let metrics_addr = get_available_addr(); let fullnode_store = create_fullnode_store::default().await; let store = create_validator_store(&[], futures::future::ready(fullnode_store.clone())).await; STORAGE_PENDING_POOL_COUNT.set(10); STORAGE_PRECEDENCE_POOL_COUNT.set(200); let storage_client = StorageClient::new(store.clone()); let (_runtime_client, _launcher, _ctx) = Runtime::builder() .with_broadcast_stream(broadcast_stream) .storage(storage_client) .store(store) .serve_grpc_addr(addr) .serve_graphql_addr(graphql_addr) .serve_metrics_addr(metrics_addr) .build_and_launch() .await; // Wait for server to boot tokio::time::sleep(Duration::from_millis(100)).await; let query = "query {getStoragePoolStats}"; #[derive(Debug, Deserialize)] struct Response { // data: HashMap, data: Stats, } #[derive(Debug, Deserialize)] #[serde(rename_all = "camelCase")] struct Stats { get_storage_pool_stats: PoolStats, } #[derive(Debug, Deserialize)] struct PoolStats { metrics_pending_pool: u64, metrics_precedence_pool: u64, } let client = reqwest::Client::new(); let response = client .post(format!("http://{}", graphql_addr)) .json(&serde_json::json!({ "query": query, })) .send() .await .unwrap() .json::() .await .unwrap(); assert_eq!( response.data.get_storage_pool_stats.metrics_pending_pool, 10 ); assert_eq!( response.data.get_storage_pool_stats.metrics_precedence_pool, 200 ); } #[rstest] #[timeout(Duration::from_secs(4))] #[test(tokio::test)] async fn get_pending_pool( broadcast_stream: broadcast::Receiver, ) { let addr = get_available_addr(); let graphql_addr = get_available_addr(); let metrics_addr = get_available_addr(); // launch data store let certificates = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 15); let fullnode_store = create_fullnode_store::default().await; let store: Arc = create_validator_store(&[], futures::future::ready(fullnode_store.clone())).await; for certificate in &certificates { _ = store .insert_pending_certificate(&certificate.certificate) .await; } let storage_client = StorageClient::new(store.clone()); let (_runtime_client, _launcher, _ctx) = Runtime::builder() .with_broadcast_stream(broadcast_stream) .storage(storage_client) .store(store) .serve_grpc_addr(addr) .serve_graphql_addr(graphql_addr) .serve_metrics_addr(metrics_addr) .build_and_launch() .await; // Wait for server to boot tokio::time::sleep(Duration::from_millis(100)).await; let query = "query { getPendingPool }".to_string(); #[derive(Debug, Deserialize)] struct Response { data: PendingPool, } #[derive(Debug, Deserialize)] #[serde(rename_all = "camelCase")] struct PendingPool { #[serde(rename = "getPendingPool")] pool: HashMap, } let client = reqwest::Client::new(); let mut response = client .post(format!("http://{}", graphql_addr)) .json(&serde_json::json!({ "query": query, })) .send() .await .unwrap() .json::() .await .unwrap(); assert_eq!(response.data.pool.len(), 1); let first: CertificateId = response .data .pool .remove(&1) .unwrap() .as_bytes() .try_into() .unwrap(); assert_eq!(first, certificates[0].certificate.id); } #[rstest] #[timeout(Duration::from_secs(4))] #[test(tokio::test)] async fn check_precedence( broadcast_stream: broadcast::Receiver, ) { let addr = get_available_addr(); let graphql_addr = get_available_addr(); let metrics_addr = get_available_addr(); // launch data store let certificates = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 15); let fullnode_store = create_fullnode_store::default().await; let store: Arc = create_validator_store(&[], futures::future::ready(fullnode_store.clone())).await; for certificate in &certificates { _ = store .insert_pending_certificate(&certificate.certificate) .await; } let storage_client = StorageClient::new(store.clone()); let (_runtime_client, _launcher, _ctx) = Runtime::builder() .with_broadcast_stream(broadcast_stream) .storage(storage_client) .store(store) .serve_grpc_addr(addr) .serve_graphql_addr(graphql_addr) .serve_metrics_addr(metrics_addr) .build_and_launch() .await; // Wait for server to boot tokio::time::sleep(Duration::from_millis(100)).await; let certificate_one = certificates[0].certificate.id; let query = format!( r#" query {{ checkPrecedence(certificateId: "{}") {{ id }} }} "#, certificate_one ); #[derive(Debug, Deserialize)] struct Response { data: CheckPrecedenceResponse, } #[derive(Debug, Deserialize)] #[serde(rename_all = "camelCase")] struct CheckPrecedenceResponse { check_precedence: CheckPrecedence, } #[derive(Debug, Deserialize)] #[serde(rename_all = "camelCase")] struct CheckPrecedence { id: String, } let client = reqwest::Client::new(); let response = client .post(format!("http://{}", graphql_addr)) .json(&serde_json::json!({ "query": query, })) .send() .await .unwrap() .json::() .await .unwrap(); assert_eq!( TryInto::::try_into(response.data.check_precedence.id.as_bytes()).unwrap(), certificates[1].certificate.id ); } ================================================ FILE: crates/topos-tce-broadcast/Cargo.toml ================================================ [package] name = "topos-tce-broadcast" version = "0.1.0" edition = "2021" [lints] workspace = true [dependencies] byteorder.workspace = true futures.workspace = true lazy_static.workspace = true rand.workspace = true rand_core.workspace = true serde.workspace = true thiserror.workspace = true tokio = { workspace = true, features = ["full"] } tokio-stream = { workspace = true, features = ["sync"] } tokio-util.workspace = true tracing-subscriber = { workspace = true, features = ["env-filter", "fmt"] } tracing.workspace = true topos-core = { workspace = true, features = ["uci"] } topos-config = { path = "../topos-config/" } topos-metrics = { path = "../topos-metrics/" } topos-tce-storage = { path = "../topos-tce-storage/" } topos-crypto = { path = "../topos-crypto" } [dev-dependencies] criterion = { version = "0.5.1", features = ["async_futures", "async_tokio"] } rstest = { workspace = true, features = ["async-timeout"] } test-log.workspace = true env_logger.workspace = true rand.workspace = true hex.workspace = true topos-test-sdk = { path = "../topos-test-sdk/" } [[bench]] name = "double_echo" path = "benches/double_echo.rs" harness = false ================================================ FILE: crates/topos-tce-broadcast/README.md ================================================ # topos-tce-broadcast Implementation of Topos Reliable Broadcast to be used in the Transmission Control Engine (TCE) This crate is designed to be used as a library in the TCE implementation. It covers the Reliable Broadcast part of the TCE, which is the core of the TCE. It doesn't handle how messages are sent or received, nor how the certificates are stored. It is designed to be used with any transport and storage implementation, relying on the `ProtocolEvents` and `DoubleEchoCommand` to communicate with the transport and storage. The reliable broadcast allows a set of validators to agree on a set of messages in order to reach agreement about the delivery of a certificate. Each certificates need to be broadcast to the network, and each validator needs to receive a threshold of messages from the other validators. The thresholds are defined by the `ReliableBroadcastParams` and correspond to the minimum number of validators who need to agree on one certificate in order to consider it delivered. This crate is responsible for validating and driving the broadcast of every certificates. ### Input The input of the broadcast is a certificate to be broadcast. It can be received from the transport layer, or from the storage layer (from the pending tables). The transport layer can be anything from p2p network to API calls. Other inputs are the messages received from the transport layer, coming from other validators. They're `Echo` and `Ready` signed messages. ### Output The outcome of the broadcast is either a certificate delivered or a failure on the delivery. The implementation is based on the paper: [Topos: A Secure, Trustless, and Decentralized Interoperability Protocol](https://arxiv.org/pdf/2206.03481.pdf) ================================================ FILE: crates/topos-tce-broadcast/benches/double_echo.rs ================================================ use criterion::async_executor::FuturesExecutor; use criterion::{criterion_group, criterion_main, Criterion}; use topos_test_sdk::storage::create_validator_store; mod task_manager; pub fn criterion_benchmark(c: &mut Criterion) { let certificates = 10_000; let runtime = tokio::runtime::Builder::new_current_thread() .build() .unwrap(); let store = runtime.block_on(async { create_validator_store::partial_1(&[]).await }); c.bench_function("double_echo", |b| { b.to_async(FuturesExecutor).iter(|| async { runtime.block_on(async { task_manager::processing_double_echo(certificates, store.clone()).await }) }) }); } criterion_group!(benches, criterion_benchmark); criterion_main!(benches); ================================================ FILE: crates/topos-tce-broadcast/benches/task_manager.rs ================================================ use std::collections::HashSet; use std::str::FromStr; use std::sync::Arc; use tokio::sync::{broadcast, mpsc, oneshot}; use topos_config::tce::broadcast::ReliableBroadcastParams; use topos_core::types::ValidatorId; use topos_crypto::messages::MessageSigner; use topos_tce_broadcast::double_echo::DoubleEcho; use topos_tce_storage::validator::ValidatorStore; use topos_test_sdk::certificates::create_certificate_chain; use topos_test_sdk::constants::{SOURCE_SUBNET_ID_1, TARGET_SUBNET_ID_1}; const CHANNEL_SIZE: usize = 256_000; const PRIVATE_KEY: &str = "d6f8d1fe6d0f3606ccb15ef383910f10d83ca77bf3d73007f12fef023dabaab9"; struct TceParams { nb_peers: usize, broadcast_params: ReliableBroadcastParams, } pub async fn processing_double_echo(n: u64, validator_store: Arc) { let (_cmd_sender, cmd_receiver) = mpsc::channel(CHANNEL_SIZE); let (event_sender, _event_receiver) = mpsc::channel(CHANNEL_SIZE); let (broadcast_sender, mut broadcast_receiver) = broadcast::channel(CHANNEL_SIZE); let (_double_echo_shutdown_sender, double_echo_shutdown_receiver) = mpsc::channel::>(1); let (task_manager_message_sender, task_manager_message_receiver) = mpsc::channel(CHANNEL_SIZE); let params = TceParams { nb_peers: 10, broadcast_params: ReliableBroadcastParams { echo_threshold: 8, ready_threshold: 5, delivery_threshold: 8, }, }; let message_signer: Arc = Arc::new(MessageSigner::from_str(PRIVATE_KEY).unwrap()); let mut validators = HashSet::new(); let validator_id = ValidatorId::from(message_signer.public_address); validators.insert(validator_id); for i in 1..params.nb_peers { validators.insert(ValidatorId::from( MessageSigner::new(&[i as u8; 32]).unwrap().public_address, )); } let mut double_echo = DoubleEcho::new( params.broadcast_params, validator_id, message_signer.clone(), validators.clone(), task_manager_message_sender.clone(), cmd_receiver, event_sender, double_echo_shutdown_receiver, validator_store.clone(), broadcast_sender, ); double_echo.spawn_task_manager(task_manager_message_receiver); let certificates = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], n as usize); let double_echo_selected_echo = double_echo .subscriptions .echo .iter() .take(double_echo.params.echo_threshold) .cloned() .collect::>(); let double_echo_selected_ready = double_echo .subscriptions .ready .iter() .take(double_echo.params.delivery_threshold) .cloned() .collect::>(); for cert in &certificates { _ = validator_store .insert_pending_certificate(&cert.certificate) .await .unwrap(); } for cert in &certificates { let mut payload = Vec::new(); payload.extend_from_slice(cert.certificate.id.as_array()); payload.extend_from_slice(validator_id.as_bytes()); for _ in &double_echo_selected_echo { let signature = message_signer.sign_message(&payload).unwrap(); double_echo .handle_echo(cert.certificate.id, validator_id, signature) .await; } for _ in &double_echo_selected_ready { let signature = message_signer.sign_message(&payload).unwrap(); double_echo .handle_ready(cert.certificate.id, validator_id, signature) .await; } } let mut count = 0; while let Ok(_event) = broadcast_receiver.recv().await { count += 1; if count == n { break; } } } ================================================ FILE: crates/topos-tce-broadcast/src/constant.rs ================================================ use lazy_static::lazy_static; lazy_static! { /// Size of the double echo command channel pub static ref COMMAND_CHANNEL_SIZE: usize = std::env::var("TOPOS_DOUBLE_ECHO_COMMAND_CHANNEL_SIZE") .ok() .and_then(|s| s.parse().ok()) .unwrap_or(2048); /// Size of the channel between double echo and the task manager pub static ref BROADCAST_TASK_MANAGER_CHANNEL_SIZE: usize = std::env::var("TOPOS_BROADCAST_TASK_MANAGER_CHANNEL_SIZE") .ok() .and_then(|s| s.parse().ok()) .unwrap_or(20_480); /// Size of the channel to send protocol events from the double echo pub static ref PROTOCOL_CHANNEL_SIZE: usize = std::env::var("TOPOS_PROTOCOL_CHANNEL_SIZE") .ok() .and_then(|s| s.parse().ok()) .unwrap_or(2048); /// Capacity alert threshold for the double echo command channel pub static ref COMMAND_CHANNEL_CAPACITY: usize = COMMAND_CHANNEL_SIZE .checked_mul(10) .map(|v| { let r: usize = v.checked_div(100).unwrap_or(*COMMAND_CHANNEL_SIZE); r }) .unwrap_or(*COMMAND_CHANNEL_SIZE); /// pub static ref PENDING_LIMIT_PER_REQUEST_TO_STORAGE: usize = std::env::var("TOPOS_PENDING_LIMIT_PER_REQUEST_TO_STORAGE") .ok() .and_then(|s| s.parse().ok()) .unwrap_or(1000); } ================================================ FILE: crates/topos-tce-broadcast/src/double_echo/broadcast_state/status.rs ================================================ use std::fmt::Display; #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum Status { Pending, EchoSent, ReadySent, DeliveredWithReadySent, Delivered, } impl Display for Status { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::Pending => write!(f, "Pending"), Self::EchoSent => write!(f, "EchoSent"), Self::ReadySent => write!(f, "ReadySent"), Self::DeliveredWithReadySent => write!(f, "DeliveredWithReadySent"), Self::Delivered => write!(f, "Delivered"), } } } impl Status { pub(crate) fn is_ready_sent(&self) -> bool { matches!(self, Self::ReadySent) || matches!(self, Self::DeliveredWithReadySent) } pub(crate) fn is_delivered(&self) -> bool { matches!(self, Self::Delivered) || matches!(self, Self::DeliveredWithReadySent) } pub(crate) fn ready_sent(self) -> Self { match self { Self::EchoSent => Self::ReadySent, Self::Delivered => Self::DeliveredWithReadySent, _ => self, } } pub(crate) fn delivered(self) -> Self { match self { Self::ReadySent => Self::DeliveredWithReadySent, _ => Self::Delivered, } } } ================================================ FILE: crates/topos-tce-broadcast/src/double_echo/broadcast_state.rs ================================================ use crate::event::ProtocolEvents; use crate::sampler::SubscriptionsView; use std::sync::Arc; use std::{collections::HashSet, time}; use tokio::sync::mpsc; use topos_core::{ types::{ stream::{CertificateSourceStreamPosition, Position}, CertificateDelivered, ProofOfDelivery, Ready, ValidatorId, }, uci::Certificate, }; use topos_crypto::messages::MessageSigner; use topos_metrics::DOUBLE_ECHO_BROADCAST_FINISHED_TOTAL; use tracing::{debug, error, info, trace}; mod status; pub use status::Status; #[derive(Debug)] pub struct BroadcastState { subscriptions_view: SubscriptionsView, status: Status, pub(crate) certificate: Certificate, validator_id: ValidatorId, echo_threshold: usize, ready_threshold: usize, delivery_threshold: usize, message_signer: Arc, event_sender: mpsc::Sender, delivery_time: time::Instant, readies: HashSet, pub(crate) expected_position: Option, } impl BroadcastState { #[allow(clippy::too_many_arguments)] pub fn new( certificate: Certificate, validator_id: ValidatorId, echo_threshold: usize, ready_threshold: usize, delivery_threshold: usize, event_sender: mpsc::Sender, subscriptions_view: SubscriptionsView, need_gossip: bool, message_signer: Arc, ) -> Self { let mut state = Self { subscriptions_view, status: Status::Pending, certificate, validator_id, echo_threshold, ready_threshold, delivery_threshold, message_signer, event_sender, delivery_time: time::Instant::now(), readies: HashSet::new(), expected_position: None, }; _ = state.event_sender.try_send(ProtocolEvents::Broadcast { certificate_id: state.certificate.id, }); if need_gossip { debug!( "📣 Gossiping the Certificate {} from the source subnet {}", &state.certificate.id, &state.certificate.source_subnet_id ); let _ = state.event_sender.try_send(ProtocolEvents::Gossip { cert: state.certificate.clone(), }); } state.update_status(); state } pub fn into_delivered(&self) -> CertificateDelivered { CertificateDelivered { certificate: self.certificate.clone(), proof_of_delivery: ProofOfDelivery { certificate_id: self.certificate.id, delivery_position: CertificateSourceStreamPosition { subnet_id: self.certificate.source_subnet_id, // FIXME: Should never fails but need to find how to remove the unwrap position: self .expected_position .expect("Expected position is not set, this is a bug"), }, readies: self .readies .iter() .cloned() .map(|r| (r, "signature".to_string())) .collect(), threshold: self.delivery_threshold as u64, }, } } pub fn apply_echo(&mut self, validator_id: ValidatorId) -> Option { if self.subscriptions_view.echo.remove(&validator_id) { self.update_status() } else { None } } pub fn apply_ready(&mut self, validator_id: ValidatorId) -> Option { if self.subscriptions_view.ready.remove(&validator_id) { self.readies.insert(validator_id.to_string()); self.update_status() } else { None } } fn update_status(&mut self) -> Option { // Nothing happened yet, we're in the initial state and didn't process // any Echo or Ready messages // Sending our Echo message if let Status::Pending = self.status { let mut payload = Vec::new(); payload.extend_from_slice(self.certificate.id.as_array()); payload.extend_from_slice(self.validator_id.as_bytes()); let _ = self.event_sender.try_send(ProtocolEvents::Echo { certificate_id: self.certificate.id, signature: self.message_signer.sign_message(&payload).ok()?, validator_id: self.validator_id, }); self.status = Status::EchoSent; trace!( "Certificate {} is now {}", &self.certificate.id, self.status ); return Some(self.status); } // Upon reaching the Echo or Ready threshold, if the status is either // EchoSent or Delivered (without ReadySent), we send the Ready message // and update the status accordingly. // If the status was EchoSent, we update it to ReadySent // If the status was Delivered, we update it to DeliveredWithReadySent if !self.status.is_ready_sent() && self.reached_ready_threshold() { let mut payload = Vec::new(); payload.extend_from_slice(self.certificate.id.as_array()); payload.extend_from_slice(self.validator_id.as_bytes()); let event = ProtocolEvents::Ready { certificate_id: self.certificate.id, signature: self.message_signer.sign_message(&payload).ok()?, validator_id: self.validator_id, }; if let Err(e) = self.event_sender.try_send(event) { error!("Failed to send the Ready message: {}", e); } self.status = self.status.ready_sent(); trace!( "Certificate {} is now {}", &self.certificate.id, self.status ); return Some(self.status); } // Upon reaching the Delivery threshold, if the status is not Delivered, // we update the status to Delivered and change the status if !self.status.is_delivered() && self.reached_delivery_threshold() { self.status = self.status.delivered(); trace!( "Certificate {} is now {}", &self.certificate.id, self.status ); // Calculate delivery time let from = self.delivery_time; let duration = from.elapsed(); let d = duration; info!( "📝 Certificate delivered {} with broadcast duration: {:?}", self.certificate.id, d ); DOUBLE_ECHO_BROADCAST_FINISHED_TOTAL.inc(); return Some(self.status); } None } fn reached_ready_threshold(&self) -> bool { // Compute the threshold let reached_echo_threshold = match self .subscriptions_view .network_size .checked_sub(self.subscriptions_view.echo.len()) { Some(consumed) => consumed >= self.echo_threshold, None => false, }; let reached_ready_threshold = match self .subscriptions_view .network_size .checked_sub(self.subscriptions_view.ready.len()) { Some(consumed) => consumed >= self.ready_threshold, None => false, }; trace!( "Certificate {} reached Echo threshold: {} and Ready threshold: {}", &self.certificate.id, reached_echo_threshold, reached_ready_threshold ); // If reached any of the Echo or Ready thresholds, I send the Ready reached_echo_threshold || reached_ready_threshold } fn reached_delivery_threshold(&self) -> bool { // If reached the delivery threshold, I can deliver let delivery_threshold = match self .subscriptions_view .network_size .checked_sub(self.subscriptions_view.ready.len()) { Some(consumed) => consumed >= self.delivery_threshold, None => false, }; trace!( "Certificate {} reached Delivery threshold: {}", &self.certificate.id, delivery_threshold ); delivery_threshold } } ================================================ FILE: crates/topos-tce-broadcast/src/double_echo/mod.rs ================================================ //! Everything related to the double_echo implementation //! //! ## Messages and roles //! //! In order to prevent many non validator's messages to be published on the //! gossip topics, messages are filtered when the [`DoubleEcho`] is producing events. //! //! For `validator` nothing changed, for `fullnode` and `sentry` node, their `Echo` and //! `Ready` messages are filtered, they still produce `Gossip` messages tho. //! //! It doesn't mean that a `fullnode` will stop propagate messages from //! `validators`, it only prevents a non validator to publish messages that will //! be ignored by others. `fullnode` still consumes Echo and Ready coming from //! validators and use those messages to build their state. use crate::event::ProtocolEvents; use crate::{DoubleEchoCommand, SubscriptionsView}; use std::collections::HashSet; use std::sync::Arc; use tokio::sync::{broadcast, mpsc, oneshot}; use tokio_util::sync::CancellationToken; use topos_config::tce::broadcast::ReliableBroadcastParams; use topos_core::{types::ValidatorId, uci::CertificateId}; use topos_crypto::messages::{MessageSigner, Signature}; use topos_tce_storage::store::ReadStore; use topos_tce_storage::types::CertificateDeliveredWithPositions; use topos_tce_storage::validator::ValidatorStore; use tracing::{debug, error, info, warn}; pub mod broadcast_state; pub struct DoubleEcho { /// Channel to receive commands command_receiver: mpsc::Receiver, /// Channel to send events event_sender: mpsc::Sender, /// Channel to receive shutdown signal pub(crate) shutdown: mpsc::Receiver>, /// The threshold parameters for the double echo pub params: ReliableBroadcastParams, /// The connection to the TaskManager to forward DoubleEchoCommand messages task_manager_message_sender: mpsc::Sender, /// The overview of the network, which holds echo and ready subscriptions and the network size pub subscriptions: SubscriptionsView, /// Local node ValidatorId pub validator_id: ValidatorId, /// Keypair to sign and verify ECHO and READY messages pub message_signer: Arc, /// List of approved validators through smart contract and/or genesis pub validators: HashSet, pub validator_store: Arc, pub broadcast_sender: broadcast::Sender, pub task_manager_cancellation: CancellationToken, } impl DoubleEcho { pub const MAX_BUFFER_SIZE: usize = 2048; #[allow(clippy::too_many_arguments)] pub fn new( params: ReliableBroadcastParams, validator_id: ValidatorId, message_signer: Arc, validators: HashSet, task_manager_message_sender: mpsc::Sender, command_receiver: mpsc::Receiver, event_sender: mpsc::Sender, shutdown: mpsc::Receiver>, validator_store: Arc, broadcast_sender: broadcast::Sender, ) -> Self { Self { params, validator_id, message_signer, validators: validators.clone(), task_manager_message_sender, command_receiver, event_sender, subscriptions: SubscriptionsView { echo: validators.clone(), ready: validators.clone(), network_size: validators.len(), }, shutdown, validator_store, broadcast_sender, task_manager_cancellation: CancellationToken::new(), } } pub fn spawn_task_manager( &mut self, task_manager_message_receiver: mpsc::Receiver, ) { let task_manager = crate::task_manager::TaskManager::new( task_manager_message_receiver, self.subscriptions.clone(), self.event_sender.clone(), self.validator_id, self.params.clone(), self.message_signer.clone(), self.validator_store.clone(), self.broadcast_sender.clone(), ); tokio::spawn(task_manager.run(self.task_manager_cancellation.child_token())); } /// DoubleEcho main loop /// - Listen for shutdown signal /// - Read new messages from command_receiver /// - If a new certificate is received, add it to the buffer /// - If a new subscription view is received, update the subscriptions /// - If a new Echo/Ready is received, update the state of the certificate or buffer /// the message pub(crate) async fn run( mut self, task_manager_message_receiver: mpsc::Receiver, ) { self.spawn_task_manager(task_manager_message_receiver); info!("DoubleEcho started"); let shutdowned: Option> = loop { tokio::select! { biased; shutdown = self.shutdown.recv() => { warn!("Double echo shutdown signal received {:?}", shutdown); self.task_manager_cancellation.cancel(); break shutdown; }, Some(command) = self.command_receiver.recv() => { match command { command if self.subscriptions.is_some() => { match command { DoubleEchoCommand::Broadcast { cert, need_gossip, pending_id } => { _ = self .task_manager_message_sender .send(DoubleEchoCommand::Broadcast { need_gossip, cert, pending_id }) .await; } DoubleEchoCommand::Echo { certificate_id, validator_id, signature } => { // Check if source is part of known_validators if !self.validators.contains(&validator_id) { debug!("ECHO message comes from non-validator: {}", validator_id); continue; } let mut payload = Vec::new(); payload.extend_from_slice(certificate_id.as_array()); payload.extend_from_slice(validator_id.as_bytes()); if let Err(e) = self.message_signer.verify_signature(signature, &payload, validator_id.address()) { debug!("ECHO message signature cannot be verified from: {}", e); continue; } self.handle_echo(certificate_id, validator_id, signature).await }, DoubleEchoCommand::Ready { certificate_id, validator_id, signature } => { // Check if source is part of known_validators if !self.validators.contains(&validator_id) { debug!("READY message comes from non-validator: {}", validator_id); continue; } let mut payload = Vec::new(); payload.extend_from_slice(certificate_id.as_array()); payload.extend_from_slice(validator_id.as_bytes()); if let Err(e) = self.message_signer.verify_signature(signature, &payload, validator_id.address()) { debug!("READY message signature cannot be verified from: {}", e); continue; } self.handle_ready(certificate_id, validator_id, signature).await }, } }, command => { warn!("Received a command {command:?} while not having a complete sampling"); } } } else => { debug!("Break the tokio loop for the double echo"); break None; } } }; if let Some(sender) = shutdowned { info!("Shutting down p2p double echo..."); _ = sender.send(()); } else { debug!("Shutting down p2p double echo due to error..."); } } } impl DoubleEcho { pub async fn handle_echo( &mut self, certificate_id: CertificateId, validator_id: ValidatorId, signature: Signature, ) { match self.validator_store.get_certificate(&certificate_id) { Err(storage_error) => error!( "Unable to get the Certificate {} due to {:?}", &certificate_id, storage_error ), Ok(Some(_)) => debug!( "Certificate {} already delivered, ignoring echo", &certificate_id ), Ok(None) => { let _ = self .task_manager_message_sender .send(DoubleEchoCommand::Echo { validator_id, certificate_id, signature, }) .await; } } } pub async fn handle_ready( &mut self, certificate_id: CertificateId, validator_id: ValidatorId, signature: Signature, ) { match self.validator_store.get_certificate(&certificate_id) { Err(storage_error) => error!( "Unable to get the Certificate {} due to {:?}", &certificate_id, storage_error ), Ok(Some(_)) => debug!( "Certificate {} already delivered, ignoring echo", &certificate_id ), Ok(None) => { let _ = self .task_manager_message_sender .send(DoubleEchoCommand::Ready { validator_id, certificate_id, signature, }) .await; } } } } ================================================ FILE: crates/topos-tce-broadcast/src/event.rs ================================================ use topos_core::{ types::ValidatorId, uci::{Certificate, CertificateId}, }; use topos_crypto::messages::Signature; /// Protocol events #[derive(Clone, Debug)] pub enum ProtocolEvents { BroadcastFailed { certificate_id: CertificateId, }, AlreadyDelivered { certificate_id: CertificateId, }, /// (pb.Broadcast) Broadcast { certificate_id: CertificateId, }, /// Indicates that 'gossip' message broadcasting is required Gossip { cert: Certificate, }, /// Indicates that 'echo' message broadcasting is required Echo { certificate_id: CertificateId, signature: Signature, validator_id: ValidatorId, }, /// Indicates that 'ready' message broadcasting is required Ready { certificate_id: CertificateId, signature: Signature, validator_id: ValidatorId, }, } ================================================ FILE: crates/topos-tce-broadcast/src/lib.rs ================================================ //! Implementation of Topos Reliable Broadcast to be used in the Transmission Control Engine (TCE) //! //! This crate is designed to be used as a library in the TCE implementation. //! It covers the Reliable Broadcast part of the TCE, which is the core of the TCE. //! It doesn't handle how messages are sent or received, nor how the certificates are stored. //! It is designed to be used with any transport and storage implementation, relying on the //! `ProtocolEvents` and `DoubleEchoCommand` to communicate with the transport and storage. //! //! The reliable broadcast allows a set of validators to agree on a set of messages in order to //! reach agreement about the delivery of a certificate. //! //! Each certificates need to be broadcast to the network, and each validator needs to //! receive a threshold of messages from the other validators. //! The thresholds are defined by the `ReliableBroadcastParams` and correspond to the minimum number of //! validators who need to agree on one certificate in order to consider it delivered. //! //! This crate is responsible for validating and driving the broadcast of every certificates. //! //! ## Input //! //! The input of the broadcast is a certificate to be broadcast. It can be received from //! the transport layer, or from the storage layer (from the pending tables). //! //! The transport layer can be anything from p2p network to API calls. //! //! Other inputs are the messages received from the transport layer, coming from other validators. //! They're `Echo` and `Ready` signed messages. //! //! ## Output //! //! The outcome of the broadcast is either a certificate delivered or a failure on the delivery. //! //! The implementation is based on the paper: [Topos: A Secure, Trustless, and Decentralized Interoperability Protocol](https://arxiv.org/pdf/2206.03481.pdf) //! use crate::event::ProtocolEvents; use double_echo::DoubleEcho; use futures::Stream; use std::collections::HashSet; use std::sync::Arc; use thiserror::Error; use tokio::spawn; use tokio::sync::mpsc::Sender; use tokio::sync::{broadcast, mpsc, oneshot}; use tokio_stream::wrappers::ReceiverStream; use topos_config::tce::broadcast::ReliableBroadcastParams; use topos_core::types::ValidatorId; use topos_core::uci::{Certificate, CertificateId}; use topos_crypto::messages::{MessageSigner, Signature}; use topos_tce_storage::types::CertificateDeliveredWithPositions; use topos_tce_storage::validator::ValidatorStore; use tracing::{debug, error, Instrument}; pub use topos_core::uci; pub type Peer = String; mod constant; pub mod double_echo; pub mod event; pub mod sampler; pub mod task_manager; #[cfg(test)] mod tests; use crate::sampler::SubscriptionsView; #[derive(Debug)] pub enum TaskStatus { /// The task finished successfully and broadcast the certificate + received ready Success, /// The task did not finish successfully and stopped. Failure, } /// Configuration of TCE implementation pub struct ReliableBroadcastConfig { pub tce_params: ReliableBroadcastParams, pub validator_id: ValidatorId, pub validators: HashSet, pub message_signer: Arc, } #[derive(Debug, Clone)] pub enum DoubleEchoCommand { /// Entry point for new certificate to submit as initial sender Broadcast { need_gossip: bool, cert: Certificate, pending_id: u64, }, /// When echo reply received Echo { validator_id: ValidatorId, certificate_id: CertificateId, signature: Signature, }, /// When ready reply received Ready { validator_id: ValidatorId, certificate_id: CertificateId, signature: Signature, }, } /// Thread safe client to the protocol aggregate #[derive(Clone, Debug)] pub struct ReliableBroadcastClient { command_sender: Sender, pub(crate) double_echo_shutdown_channel: Sender>, } impl ReliableBroadcastClient { /// Creates new instance of the aggregate and returns proxy to it. /// /// New client instances to the same aggregate can be cloned from the returned one. /// Aggregate is spawned as new task. pub async fn new( config: ReliableBroadcastConfig, validator_store: Arc, broadcast_sender: broadcast::Sender, ) -> (Self, impl Stream) { let (event_sender, event_receiver) = mpsc::channel(*constant::PROTOCOL_CHANNEL_SIZE); let (command_sender, command_receiver) = mpsc::channel(*constant::COMMAND_CHANNEL_SIZE); let (double_echo_shutdown_channel, double_echo_shutdown_receiver) = mpsc::channel::>(1); let (task_manager_message_sender, task_manager_message_receiver) = mpsc::channel(*constant::BROADCAST_TASK_MANAGER_CHANNEL_SIZE); let double_echo = DoubleEcho::new( config.tce_params, config.validator_id, config.message_signer, config.validators, task_manager_message_sender, command_receiver, event_sender, double_echo_shutdown_receiver, validator_store, broadcast_sender, ); spawn( double_echo .run(task_manager_message_receiver) .in_current_span(), ); ( Self { command_sender, double_echo_shutdown_channel, }, ReceiverStream::new(event_receiver), ) } pub fn get_double_echo_channel(&self) -> Sender { self.command_sender.clone() } pub async fn shutdown(&self) -> Result<(), Errors> { debug!("Shutting down reliable broadcast client"); let (double_echo_sender, double_echo_receiver) = oneshot::channel(); self.double_echo_shutdown_channel .send(double_echo_sender) .await .map_err(Errors::ShutdownCommunication)?; double_echo_receiver.await?; Ok(()) } } /// Protocol and technical errors #[derive(Error, Debug)] pub enum Errors { #[error("Error while sending a DoubleEchoCommand to DoubleEcho: {0:?}")] DoubleEchoSend(#[from] Box>), #[error("Error while waiting for a DoubleEchoCommand response: {0:?}")] DoubleEchoRecv(#[from] oneshot::error::RecvError), #[error("Requested certificate not found")] CertificateNotFound, #[error("Requested digest not found for certificate {0:?}")] DigestNotFound(CertificateId), #[error("Cannot create public address from private key")] ProducePublicAddress, #[error("Unable to execute shutdown for the reliable broadcast: {0}")] ShutdownCommunication(mpsc::error::SendError>), } ================================================ FILE: crates/topos-tce-broadcast/src/sampler/mod.rs ================================================ use std::collections::HashSet; use topos_core::types::ValidatorId; /// Stateful network view with whom we broadcast the Certificate /// The Echo and the Ready sets are initially equal to the whole network #[derive(Debug, Clone, Eq, PartialEq, Default)] pub struct SubscriptionsView { /// Set of Peer from which we listen for ECHO messages pub echo: HashSet, /// Set of Peer from which we listen for READY messages pub ready: HashSet, /// Size of the network pub network_size: usize, } impl SubscriptionsView { pub fn is_some(&self) -> bool { !self.is_none() } pub fn is_none(&self) -> bool { self.echo.is_empty() && self.ready.is_empty() } } ================================================ FILE: crates/topos-tce-broadcast/src/task_manager/mod.rs ================================================ use crate::event::ProtocolEvents; use futures::stream::FuturesUnordered; use futures::Future; use futures::StreamExt; use std::collections::HashMap; use std::future::IntoFuture; use std::pin::Pin; use std::sync::Arc; use std::time::Duration; use tokio::sync::broadcast; use tokio::{spawn, sync::mpsc}; use tokio_util::sync::CancellationToken; use topos_config::tce::broadcast::ReliableBroadcastParams; use topos_core::types::ValidatorId; use topos_core::uci::Certificate; use topos_core::uci::CertificateId; use topos_metrics::CERTIFICATE_PROCESSING_FROM_API_TOTAL; use topos_metrics::CERTIFICATE_PROCESSING_FROM_GOSSIP_TOTAL; use topos_metrics::CERTIFICATE_PROCESSING_TOTAL; use topos_metrics::DOUBLE_ECHO_ACTIVE_TASKS_COUNT; use topos_tce_storage::store::ReadStore; use topos_tce_storage::types::CertificateDeliveredWithPositions; use topos_tce_storage::validator::ValidatorStore; use topos_tce_storage::PendingCertificateId; use tracing::{debug, error, info, trace, warn}; pub mod task; use crate::constant::PENDING_LIMIT_PER_REQUEST_TO_STORAGE; use crate::double_echo::broadcast_state::BroadcastState; use crate::sampler::SubscriptionsView; use crate::DoubleEchoCommand; use crate::TaskStatus; use task::{Task, TaskContext}; use topos_crypto::messages::MessageSigner; type RunningTasks = FuturesUnordered + Send + 'static>>>; /// The TaskManager is responsible for receiving messages from the network and distributing them /// among tasks. These tasks are either created if none for a certain CertificateID exists yet, /// or existing tasks will receive the messages. pub struct TaskManager { pub message_receiver: mpsc::Receiver, pub subscriptions: SubscriptionsView, pub event_sender: mpsc::Sender, pub tasks: HashMap, pub message_signer: Arc, #[allow(clippy::type_complexity)] pub running_tasks: RunningTasks, pub buffered_messages: HashMap>, pub thresholds: ReliableBroadcastParams, pub validator_id: ValidatorId, pub validator_store: Arc, pub broadcast_sender: broadcast::Sender, pub latest_pending_id: PendingCertificateId, } impl TaskManager { #[allow(clippy::too_many_arguments)] pub fn new( message_receiver: mpsc::Receiver, subscriptions: SubscriptionsView, event_sender: mpsc::Sender, validator_id: ValidatorId, thresholds: ReliableBroadcastParams, message_signer: Arc, validator_store: Arc, broadcast_sender: broadcast::Sender, ) -> Self { Self { message_receiver, subscriptions, event_sender, tasks: HashMap::new(), running_tasks: FuturesUnordered::new(), buffered_messages: Default::default(), validator_id, message_signer, thresholds, validator_store, broadcast_sender, latest_pending_id: 0, } } /// Fetch the next pending certificates from the storage and create tasks for them. /// This method is called periodically to check for new pending certificates and when /// a task has finished. fn next_pending_certificate(&mut self) { debug!("Checking for next pending_certificates"); match self.validator_store.get_next_pending_certificates( &self.latest_pending_id, *PENDING_LIMIT_PER_REQUEST_TO_STORAGE, ) { Ok(pendings) => { debug!("Received {} pending certificates", pendings.len()); for (pending_id, certificate) in pendings { self.create_task(&certificate, true, pending_id); self.latest_pending_id = pending_id; } } Err(error) => { error!("Failed to fetch the pending certificates: {:?}", error); } } } pub async fn run(mut self, shutdown_receiver: CancellationToken) { let mut interval = tokio::time::interval(Duration::from_secs(1)); loop { tokio::select! { biased; _ = interval.tick() => { self.next_pending_certificate(); } Some(msg) = self.message_receiver.recv() => { match msg { DoubleEchoCommand::Echo { certificate_id, .. } | DoubleEchoCommand::Ready { certificate_id, .. } => { if let Some(task_context) = self.tasks.get(&certificate_id) { _ = task_context.sink.send(msg).await; } else { self.buffered_messages .entry(certificate_id) .or_default() .push(msg); }; } DoubleEchoCommand::Broadcast { ref cert, need_gossip, pending_id } => { trace!("Received broadcast message for certificate {} ", cert.id); self.create_task(cert, need_gossip, pending_id) } } } Some((certificate_id, status)) = self.running_tasks.next() => { if let TaskStatus::Success = status { trace!("Task for certificate {} finished successfully", certificate_id); self.tasks.remove(&certificate_id); DOUBLE_ECHO_ACTIVE_TASKS_COUNT.dec(); } else { error!("Task for certificate {} finished unsuccessfully", certificate_id); } self.next_pending_certificate(); } _ = shutdown_receiver.cancelled() => { info!("Task Manager shutting down"); debug!("Remaining active tasks: {:?}", self.tasks.len()); if !self.tasks.is_empty() { debug!("Certificates still in broadcast: {:?}", self.tasks.keys()); } warn!("Remaining buffered messages: {}", self.buffered_messages.len()); for task in self.tasks.iter() { task.1.shutdown_sender.send(()).await.unwrap(); } break; } } } } fn start_task( running_tasks: &RunningTasks, task: Task, sink: mpsc::Sender, messages: Option>, need_gossip: bool, ) { running_tasks.push(task.into_future()); if let Some(messages) = messages { spawn(async move { for msg in messages { _ = sink.send(msg).await; } }); } DOUBLE_ECHO_ACTIVE_TASKS_COUNT.inc(); CERTIFICATE_PROCESSING_TOTAL.inc(); if need_gossip { CERTIFICATE_PROCESSING_FROM_API_TOTAL.inc(); } else { CERTIFICATE_PROCESSING_FROM_GOSSIP_TOTAL.inc(); } } /// Create a new task for the given certificate and add it to the running tasks. /// If the previous certificate is not available yet, the task will be created but not started. /// This method is called when a pending certificate is fetched from the storage. fn create_task(&mut self, cert: &Certificate, need_gossip: bool, pending_id: u64) { match self.tasks.entry(cert.id) { std::collections::hash_map::Entry::Vacant(entry) => { let broadcast_state = BroadcastState::new( cert.clone(), self.validator_id, self.thresholds.echo_threshold, self.thresholds.ready_threshold, self.thresholds.delivery_threshold, self.event_sender.clone(), self.subscriptions.clone(), need_gossip, self.message_signer.clone(), ); let (task, task_context) = Task::new( cert.id, broadcast_state, self.validator_store.clone(), self.broadcast_sender.clone(), ); let prev = self.validator_store.get_certificate(&cert.prev_id); if matches!(prev, Ok(Some(_))) || cert.prev_id == topos_core::uci::INITIAL_CERTIFICATE_ID { Self::start_task( &self.running_tasks, task, task_context.sink.clone(), self.buffered_messages.remove(&cert.id), need_gossip, ); } else { debug!( "Received broadcast message for certificate {} but the previous \ certificate {} is not available yet", cert.id, cert.prev_id ); } debug!( "Creating task for pending certificate {} at position {} if needed", cert.id, pending_id ); entry.insert(task_context); } std::collections::hash_map::Entry::Occupied(_) => { trace!( "Received broadcast message for certificate {} but it is already being \ processed", cert.id ); } } } } ================================================ FILE: crates/topos-tce-broadcast/src/task_manager/task.rs ================================================ use std::future::{Future, IntoFuture}; use std::pin::Pin; use std::sync::Arc; use tokio::sync::{broadcast, mpsc}; use topos_core::types::stream::Position; use topos_core::uci::CertificateId; use topos_tce_storage::errors::StorageError; use topos_tce_storage::store::{ReadStore, WriteStore}; use topos_tce_storage::types::CertificateDeliveredWithPositions; use topos_tce_storage::validator::ValidatorStore; use tracing::{debug, error}; use crate::double_echo::broadcast_state::{BroadcastState, Status}; use crate::{DoubleEchoCommand, TaskStatus}; #[derive(Debug)] pub struct TaskContext { pub sink: mpsc::Sender, pub shutdown_sender: mpsc::Sender<()>, } pub struct Task { pub validator_store: Arc, pub message_receiver: mpsc::Receiver, pub certificate_id: CertificateId, pub broadcast_state: BroadcastState, pub shutdown_receiver: mpsc::Receiver<()>, broadcast_sender: broadcast::Sender, } impl Task { pub fn new( certificate_id: CertificateId, broadcast_state: BroadcastState, validator_store: Arc, broadcast_sender: broadcast::Sender, ) -> (Task, TaskContext) { let (message_sender, message_receiver) = mpsc::channel(10_024); let (shutdown_sender, shutdown_receiver) = mpsc::channel(1); let task_context = TaskContext { sink: message_sender, shutdown_sender, }; let task = Task { validator_store, message_receiver, certificate_id, broadcast_state, shutdown_receiver, broadcast_sender, }; (task, task_context) } pub async fn persist(&self) -> Result { let certificate_delivered = self.broadcast_state.into_delivered(); let positions = self .validator_store .insert_certificate_delivered(&certificate_delivered) .await?; Ok(CertificateDeliveredWithPositions( certificate_delivered, positions, )) } } impl IntoFuture for Task { type Output = (CertificateId, TaskStatus); type IntoFuture = Pin + Send + 'static>>; fn into_future(mut self) -> Self::IntoFuture { Box::pin(async move { // When the task starts, we need to gather information such as current stream position // for the source subnet in order to expect its position let expected_position = match self.validator_store.last_delivered_position_for_subnet( &self.broadcast_state.certificate.source_subnet_id, ) { Ok(Some(stream_position)) => stream_position.position.increment().unwrap(), Ok(None) => Position::ZERO, Err(_) => return (self.certificate_id, TaskStatus::Failure), }; debug!( "Expected position for Certificate {} is {:?} for the subnet {}", self.certificate_id, expected_position, self.broadcast_state.certificate.source_subnet_id ); self.broadcast_state.expected_position = Some(expected_position); loop { tokio::select! { Some(msg) = self.message_receiver.recv() => { match msg { DoubleEchoCommand::Echo { validator_id, .. } => { if let Some(Status::DeliveredWithReadySent) = self.broadcast_state.apply_echo(validator_id) { match self.persist().await { Ok(delivered) => { _ = self.broadcast_sender.send(delivered); return (self.certificate_id, TaskStatus::Success); } Err(error) => { error!("Unable to persist one delivered certificate: {:?}", error); return (self.certificate_id, TaskStatus::Failure); } } } } DoubleEchoCommand::Ready { validator_id, .. } => { if let Some(Status::DeliveredWithReadySent) = self.broadcast_state.apply_ready(validator_id) { match self.persist().await { Ok(delivered) => { _ = self.broadcast_sender.send(delivered); return (self.certificate_id, TaskStatus::Success); } Err(error) => { error!("Unable to persist one delivered certificate: {:?}", error); return (self.certificate_id, TaskStatus::Failure); } } } } _ => {} } } _ = self.shutdown_receiver.recv() => { debug!("Received shutdown, shutting down task {:?}", self.certificate_id); return (self.certificate_id, TaskStatus::Failure) } } } }) } } ================================================ FILE: crates/topos-tce-broadcast/src/tests/mod.rs ================================================ use crate::double_echo::*; use crate::event::ProtocolEvents; use rstest::*; use std::collections::HashSet; use std::str::FromStr; use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc::Receiver; use tokio::sync::{broadcast, mpsc, oneshot}; use topos_config::tce::broadcast::ReliableBroadcastParams; use topos_core::uci::Certificate; use topos_crypto::messages::MessageSigner; use topos_crypto::validator_id::ValidatorId; use topos_tce_storage::types::CertificateDeliveredWithPositions; use topos_tce_storage::validator::ValidatorStore; use topos_test_sdk::constants::*; use topos_test_sdk::storage::create_validator_store; mod task; mod task_manager; const CHANNEL_SIZE: usize = 10; const PRIVATE_KEY: &str = "d6f8d1fe6d0f3606ccb15ef383910f10d83ca77bf3d73007f12fef023dabaab9"; #[fixture] fn small_config() -> TceParams { TceParams { nb_peers: 10, broadcast_params: ReliableBroadcastParams { echo_threshold: 8, ready_threshold: 5, delivery_threshold: 8, }, } } #[fixture] fn medium_config() -> TceParams { TceParams { nb_peers: 50, broadcast_params: ReliableBroadcastParams { echo_threshold: 33, ready_threshold: 16, delivery_threshold: 32, }, } } #[derive(Debug)] struct TceParams { nb_peers: usize, broadcast_params: ReliableBroadcastParams, } struct Context { event_receiver: Receiver, broadcast_receiver: broadcast::Receiver, validator_store: Arc, } async fn create_context(params: TceParams) -> (DoubleEcho, Context) { let validator_store = create_validator_store::default().await; let (_cmd_sender, cmd_receiver) = mpsc::channel(CHANNEL_SIZE); let (event_sender, event_receiver) = mpsc::channel(CHANNEL_SIZE); let (_double_echo_shutdown_sender, double_echo_shutdown_receiver) = mpsc::channel::>(1); let (task_manager_message_sender, task_manager_message_receiver) = mpsc::channel(CHANNEL_SIZE); let message_signer = Arc::new(MessageSigner::from_str(PRIVATE_KEY).unwrap()); let mut validators = HashSet::new(); let validator_id = ValidatorId::from(message_signer.public_address); validators.insert(validator_id); for i in 1..params.nb_peers { let message_signer = Arc::new(MessageSigner::new(&[i as u8; 32]).unwrap()); let validator_id = ValidatorId::from(message_signer.public_address); validators.insert(validator_id); } let (broadcast_sender, broadcast_receiver) = broadcast::channel(CHANNEL_SIZE); let mut double_echo = DoubleEcho::new( params.broadcast_params, validator_id, message_signer, validators.clone(), task_manager_message_sender.clone(), cmd_receiver, event_sender, double_echo_shutdown_receiver, validator_store.clone(), broadcast_sender, ); double_echo.spawn_task_manager(task_manager_message_receiver); ( double_echo, Context { event_receiver, broadcast_receiver, validator_store, }, ) } async fn reach_echo_threshold(double_echo: &mut DoubleEcho, cert: &Certificate) { let selected = double_echo .subscriptions .echo .iter() .take(double_echo.params.echo_threshold) .cloned() .collect::>(); let message_signer = Arc::new(MessageSigner::from_str(PRIVATE_KEY).unwrap()); let validator_id = ValidatorId::from(message_signer.public_address); let mut payload = Vec::new(); payload.extend_from_slice(cert.id.as_array()); payload.extend_from_slice(validator_id.as_bytes()); let signature = message_signer.sign_message(&payload).unwrap(); for val_id in selected { double_echo.handle_echo(cert.id, val_id, signature).await; } } async fn reach_ready_threshold(double_echo: &mut DoubleEcho, cert: &Certificate) { let selected = double_echo .subscriptions .ready .iter() .take(double_echo.params.ready_threshold) .cloned() .collect::>(); let message_signer = Arc::new(MessageSigner::from_str(PRIVATE_KEY).unwrap()); let validator_id = ValidatorId::from(message_signer.public_address); let mut payload = Vec::new(); payload.extend_from_slice(cert.id.as_array()); payload.extend_from_slice(validator_id.as_bytes()); let signature = message_signer.sign_message(&payload).unwrap(); for val_id in selected { double_echo.handle_ready(cert.id, val_id, signature).await; } } async fn reach_delivery_threshold(double_echo: &mut DoubleEcho, cert: &Certificate) { let selected = double_echo .subscriptions .ready .iter() .take(double_echo.params.delivery_threshold) .cloned() .collect::>(); let message_signer = Arc::new(MessageSigner::from_str(PRIVATE_KEY).unwrap()); let validator_id = ValidatorId::from(message_signer.public_address); let mut payload = Vec::new(); payload.extend_from_slice(cert.id.as_array()); payload.extend_from_slice(validator_id.as_bytes()); let signature = message_signer.sign_message(&payload).unwrap(); for val_id in selected { double_echo.handle_ready(cert.id, val_id, signature).await; } } #[rstest] #[case::small_config(small_config())] #[case(medium_config())] #[test_log::test(tokio::test)] #[trace] #[timeout(Duration::from_secs(10))] async fn trigger_success_path_upon_reaching_threshold(#[case] params: TceParams) { let (mut double_echo, mut ctx) = create_context(params).await; let dummy_cert = Certificate::new_with_default_fields(PREV_CERTIFICATE_ID, SOURCE_SUBNET_ID_1, &[]) .expect("Dummy certificate"); _ = ctx .validator_store .insert_pending_certificate(&dummy_cert) .await .unwrap(); assert!(matches!( ctx.event_receiver.recv().await, Some(ProtocolEvents::Broadcast { certificate_id }) if certificate_id == dummy_cert.id )); assert!(matches!( ctx.event_receiver.try_recv(), Ok(ProtocolEvents::Gossip { .. }) )); assert!(matches!( ctx.event_receiver.try_recv(), Ok(ProtocolEvents::Echo { .. }) )); assert!(matches!( ctx.event_receiver.try_recv(), Err(mpsc::error::TryRecvError::Empty) )); // Trigger Ready upon reaching the Echo threshold reach_echo_threshold(&mut double_echo, &dummy_cert).await; assert!(matches!( ctx.event_receiver.recv().await, Some(ProtocolEvents::Ready { .. }) )); // Trigger Delivery upon reaching the Delivery threshold reach_delivery_threshold(&mut double_echo, &dummy_cert).await; let x = ctx.broadcast_receiver.recv().await; assert!(matches!( x, Ok(CertificateDeliveredWithPositions(topos_core::types::CertificateDelivered { certificate, .. }, _)) if certificate == dummy_cert )); } #[rstest] #[case::small_config(small_config())] #[case(medium_config())] #[test_log::test(tokio::test)] #[trace] #[timeout(Duration::from_secs(4))] async fn trigger_ready_when_reached_enough_ready(#[case] params: TceParams) { let (mut double_echo, mut ctx) = create_context(params).await; let dummy_cert = Certificate::new_with_default_fields(PREV_CERTIFICATE_ID, SOURCE_SUBNET_ID_1, &[]) .expect("Dummy certificate"); _ = ctx .validator_store .insert_pending_certificate(&dummy_cert) .await .unwrap(); assert!(matches!( ctx.event_receiver.recv().await, Some(ProtocolEvents::Broadcast { certificate_id }) if certificate_id == dummy_cert.id )); assert!(matches!( ctx.event_receiver.try_recv(), Ok(ProtocolEvents::Gossip { .. }) )); assert!(matches!( ctx.event_receiver.try_recv(), Ok(ProtocolEvents::Echo { .. }) )); // Trigger Ready upon reaching the Ready threshold reach_ready_threshold(&mut double_echo, &dummy_cert).await; assert!(matches!( ctx.event_receiver.recv().await, Some(ProtocolEvents::Ready { .. }) )); } ================================================ FILE: crates/topos-tce-broadcast/src/tests/task.rs ================================================ use std::{future::IntoFuture, sync::Arc, time::Duration}; use rstest::rstest; use tokio::{ spawn, sync::{broadcast, mpsc}, }; use topos_core::uci::Certificate; use topos_crypto::{messages::MessageSigner, validator_id::ValidatorId}; use topos_tce_storage::validator::ValidatorStore; use topos_test_sdk::{ certificates::create_certificate_chain, constants::{SOURCE_SUBNET_ID_1, TARGET_SUBNET_ID_1}, crypto::message_signer, storage::create_validator_store, }; use crate::{ double_echo::broadcast_state::BroadcastState, event::ProtocolEvents, sampler::SubscriptionsView, task_manager::task::Task, }; #[rstest] #[test_log::test(tokio::test)] #[timeout(Duration::from_secs(1))] async fn start_with_ungossiped_cert( #[future(awt)] #[from(create_validator_store)] validatore_store: Arc, message_signer: Arc, ) { let certificate = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 1) .pop() .unwrap() .certificate; let certificate_id = certificate.id; let validator_id = ValidatorId::default(); let thresholds = topos_config::tce::broadcast::ReliableBroadcastParams { echo_threshold: 1, ready_threshold: 1, delivery_threshold: 1, }; let (event_sender, mut event_receiver) = mpsc::channel(2); let (broadcast_sender, _) = broadcast::channel(1); let need_gossip = true; let subscriptions = SubscriptionsView::default(); let broadcast_state = BroadcastState::new( certificate, validator_id, thresholds.echo_threshold, thresholds.ready_threshold, thresholds.delivery_threshold, event_sender, subscriptions, need_gossip, message_signer, ); let (task, _ctx) = Task::new( certificate_id, broadcast_state, validatore_store, broadcast_sender, ); let _handle = spawn(task.into_future()); let event = event_receiver.recv().await; assert!(matches!( event, Some(ProtocolEvents::Broadcast { certificate_id: id }) if id == certificate_id )); let event = event_receiver.recv().await; assert!(matches!( event, Some(ProtocolEvents::Gossip { cert: Certificate { id, .. } }) if id == certificate_id )); } ================================================ FILE: crates/topos-tce-broadcast/src/tests/task_manager.rs ================================================ use std::sync::Arc; use rstest::rstest; use tokio::{ spawn, sync::{broadcast, mpsc}, }; use tokio_util::sync::CancellationToken; use topos_crypto::{messages::MessageSigner, validator_id::ValidatorId}; use topos_metrics::DOUBLE_ECHO_ACTIVE_TASKS_COUNT; use topos_tce_storage::validator::ValidatorStore; use topos_test_sdk::{ certificates::create_certificate_chain, constants::{SOURCE_SUBNET_ID_1, TARGET_SUBNET_ID_1}, crypto::message_signer, storage::create_validator_store, }; use crate::{sampler::SubscriptionsView, task_manager::TaskManager}; #[rstest] #[tokio::test] async fn can_start( #[future(awt)] #[from(create_validator_store)] validator_store: Arc, message_signer: Arc, ) { let (message_sender, message_receiver) = mpsc::channel(1); let (event_sender, _) = mpsc::channel(1); let (broadcast_sender, _) = broadcast::channel(1); let shutdown = CancellationToken::new(); let validator_id = ValidatorId::default(); let thresholds = topos_config::tce::broadcast::ReliableBroadcastParams { echo_threshold: 1, ready_threshold: 1, delivery_threshold: 1, }; let manager = TaskManager::new( message_receiver, SubscriptionsView::default(), event_sender, validator_id, thresholds, message_signer, validator_store, broadcast_sender, ); spawn(manager.run(shutdown)); let certificates = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 2); let parent = certificates .first() .take() .expect("Failed to create certificate"); let child = certificates .last() .take() .expect("Failed to create certificate"); let _ = message_sender .send(crate::DoubleEchoCommand::Broadcast { need_gossip: false, cert: child.certificate.clone(), pending_id: 0, }) .await; let _ = message_sender .send(crate::DoubleEchoCommand::Broadcast { need_gossip: false, cert: parent.certificate.clone(), pending_id: 0, }) .await; let _ = message_sender .send(crate::DoubleEchoCommand::Broadcast { need_gossip: false, cert: parent.certificate.clone(), pending_id: 0, }) .await; assert_eq!(DOUBLE_ECHO_ACTIVE_TASKS_COUNT.get(), 1); } ================================================ FILE: crates/topos-tce-gatekeeper/Cargo.toml ================================================ [package] name = "topos-tce-gatekeeper" version = "0.1.0" edition = "2021" [lints] workspace = true [dependencies] async-trait.workspace = true futures.workspace = true rand.workspace = true thiserror.workspace = true tracing.workspace = true tokio = { workspace = true, features = ["full"] } topos-core = { workspace = true, features = ["uci"] } topos-p2p = { path = "../topos-p2p" } [dev-dependencies] rstest.workspace = true tracing-subscriber = { workspace = true, features = ["env-filter", "fmt"] } test-log.workspace = true env_logger.workspace = true ================================================ FILE: crates/topos-tce-gatekeeper/src/builder.rs ================================================ use std::future::IntoFuture; use futures::{future::BoxFuture, FutureExt}; use tokio::sync::mpsc; use crate::{client::GatekeeperClient, Gatekeeper, GatekeeperError}; #[derive(Default)] pub struct GatekeeperBuilder {} impl IntoFuture for GatekeeperBuilder { type Output = Result<(GatekeeperClient, Gatekeeper), GatekeeperError>; type IntoFuture = BoxFuture<'static, Self::Output>; fn into_future(self) -> Self::IntoFuture { let (shutdown_channel, shutdown) = mpsc::channel(1); futures::future::ok(( GatekeeperClient { shutdown_channel }, Gatekeeper { shutdown, ..Gatekeeper::default() }, )) .boxed() } } ================================================ FILE: crates/topos-tce-gatekeeper/src/client.rs ================================================ use crate::GatekeeperError; use tokio::sync::{mpsc, oneshot}; #[derive(Clone)] pub struct GatekeeperClient { pub(crate) shutdown_channel: mpsc::Sender>, } impl GatekeeperClient { pub async fn shutdown(&self) -> Result<(), GatekeeperError> { let (sender, receiver) = oneshot::channel(); self.shutdown_channel .send(sender) .await .map_err(GatekeeperError::ShutdownCommunication)?; Ok(receiver.await?) } } ================================================ FILE: crates/topos-tce-gatekeeper/src/lib.rs ================================================ use std::{future::IntoFuture, time::Duration}; use builder::GatekeeperBuilder; use futures::{future::BoxFuture, FutureExt}; use thiserror::Error; use tokio::{ sync::{mpsc, oneshot}, time, }; use tracing::error; mod builder; mod client; #[cfg(test)] mod tests; pub use client::GatekeeperClient; use tracing::{info, warn}; pub struct Gatekeeper { pub(crate) shutdown: mpsc::Receiver>, pub(crate) tick_duration: Duration, } impl Default for Gatekeeper { fn default() -> Self { let (_shutdown_channel, shutdown) = mpsc::channel(1); let tick_duration = Duration::from_secs(Self::DEFAULT_TICK_DURATION); Self { shutdown, tick_duration, } } } impl IntoFuture for Gatekeeper { type Output = Result<(), GatekeeperError>; type IntoFuture = BoxFuture<'static, Self::Output>; fn into_future(mut self) -> Self::IntoFuture { async move { let mut interval = time::interval(self.tick_duration); let shutdowned: Option> = loop { tokio::select! { _ = interval.tick() => {} sender = self.shutdown.recv() => { break sender; } } }; if let Some(sender) = shutdowned { info!("Shutting down gatekeeper..."); _ = sender.send(()); } else { warn!("Shutting down gatekeeper due to error..."); } Ok(()) } .boxed() } } impl Gatekeeper { pub(crate) const DEFAULT_TICK_DURATION: u64 = 10; pub fn builder() -> GatekeeperBuilder { GatekeeperBuilder::default() } } #[derive(Debug, Error)] pub enum GatekeeperError { #[error("Unable to receive expected response from Gatekeeper: {0}")] ResponseChannel(#[from] oneshot::error::RecvError), #[error("Unable to execute command on the Gatekeeper: {0}")] InvalidCommand(String), #[error("Unable to execute shutdown on the Gatekeeper: {0}")] ShutdownCommunication(mpsc::error::SendError>), #[error("The command produce no update")] NoUpdate, } ================================================ FILE: crates/topos-tce-gatekeeper/src/tests.rs ================================================ use std::future::IntoFuture; use rstest::fixture; use test_log::test; use tokio::spawn; use topos_p2p::PeerId; use crate::{client::GatekeeperClient, Gatekeeper}; #[test(tokio::test)] async fn can_start_and_stop() -> Result<(), Box> { let (client, server) = Gatekeeper::builder().await?; let handler = spawn(server.into_future()); client.shutdown().await?; assert!(handler.is_finished()); Ok(()) } #[fixture] async fn gatekeeper() -> GatekeeperClient { let (client, server) = Gatekeeper::builder().await.unwrap(); spawn(server.into_future()); client } #[fixture] fn peer_list(#[default(10)] number: usize) -> Vec { (0..number) .map(|i| { topos_p2p::utils::local_key_pair(Some(i as u8)) .public() .to_peer_id() }) .collect() } ================================================ FILE: crates/topos-tce-proxy/Cargo.toml ================================================ [package] name = "topos-tce-proxy" version = "0.1.0" edition = "2021" [lints] workspace = true [dependencies] topos-core = { workspace = true, features = ["uci", "api"] } topos-telemetry = { path = "../topos-telemetry" } async-stream.workspace = true backoff.workspace = true byteorder.workspace = true futures.workspace = true hex.workspace = true hyper.workspace = true serde = { workspace = true, features = ["derive"] } serde_json.workspace = true thiserror.workspace = true tokio = { workspace = true, features = [ "io-util", "io-std", "macros", "rt", "rt-multi-thread", "fs", "time", "sync", ] } tokio-stream.workspace = true tonic = { workspace = true, features = ["transport"] } tracing-subscriber = { workspace = true, features = ["env-filter", "json", "ansi", "fmt"] } tracing.workspace = true uuid.workspace = true tracing-opentelemetry.workspace = true opentelemetry.workspace = true base64ct.workspace = true [dev-dependencies] libp2p.workspace = true topos-tce = { path = "../topos-tce" } rstest = { workspace = true, features = ["async-timeout"] } test-log.workspace = true env_logger.workspace = true serial_test.workspace = true byteorder = "1.4.3" dockertest = "0.3.1" topos-tce-storage = { path = "../topos-tce-storage" } topos-test-sdk = { path = "../topos-test-sdk/" } ================================================ FILE: crates/topos-tce-proxy/src/client.rs ================================================ use crate::{Error, TceProxyEvent}; use base64ct::{Base64, Encoding}; use futures::stream::FuturesOrdered; use opentelemetry::trace::FutureExt; use std::collections::HashMap; use std::time::Duration; use tokio::sync::{mpsc, oneshot}; use tokio_stream::StreamExt; use tonic::IntoRequest; use topos_core::api::grpc::checkpoints::{TargetCheckpoint, TargetStreamPosition}; use topos_core::api::grpc::tce::v1::{ GetLastPendingCertificatesRequest, GetSourceHeadRequest, GetSourceHeadResponse, }; use topos_core::{ api::grpc::tce::v1::{ watch_certificates_request, watch_certificates_response, SubmitCertificateRequest, WatchCertificatesRequest, WatchCertificatesResponse, }, uci::{Certificate, SubnetId}, }; use tracing::{debug, error, info, info_span, warn, Instrument, Span}; use tracing_opentelemetry::OpenTelemetrySpanExt; const CERTIFICATE_OUTBOUND_CHANNEL_SIZE: usize = 100; const CERTIFICATE_INBOUND_CHANNEL_SIZE: usize = 100; const TCE_PROXY_COMMAND_CHANNEL_SIZE: usize = 100; // Maximum backoff retry timeout in seconds (1 hour) const TCE_SUBMIT_CERTIFICATE_BACKOFF_TIMEOUT: Duration = Duration::from_secs(3600); pub(crate) enum TceClientCommand { // Get head certificate that was sent to the TCE node for this subnet GetSourceHead { subnet_id: SubnetId, sender: oneshot::Sender>, }, // Get map of subnet id->last pending certificate GetLastPendingCertificates { subnet_ids: Vec, #[allow(clippy::type_complexity)] sender: oneshot::Sender>, Error>>, }, // Open the stream to the TCE node // Mark the position from which TCE node certificates should be retrieved OpenStream { target_checkpoint: TargetCheckpoint, }, // Send generated certificate to the TCE node SendCertificate { cert: Box, span: tracing::Span, }, Shutdown, } /// Create new backoff library error based on error that happened pub(crate) fn new_tce_proxy_backoff_err(err: E) -> backoff::Error { // Retry according to backoff policy backoff::Error::Transient { err, retry_after: None, } } pub struct TceClient { subnet_id: topos_core::uci::SubnetId, tce_endpoint: String, command_sender: mpsc::Sender, } impl TceClient { pub async fn open_stream(&self, positions: Vec) -> Result<(), Error> { self.command_sender .send(TceClientCommand::OpenStream { target_checkpoint: TargetCheckpoint { target_subnet_ids: vec![self.subnet_id], positions, }, }) .await .map_err(|_| Error::InvalidChannelError)?; Ok(()) } pub async fn send_certificate(&mut self, cert: Certificate) -> Result<(), Error> { self.command_sender .send(TceClientCommand::SendCertificate { cert: Box::new(cert), span: tracing::Span::current(), }) .with_current_context() .in_current_span() .await .map_err(|_| Error::InvalidChannelError)?; Ok(()) } pub async fn close(&mut self) -> Result<(), Error> { self.command_sender .send(TceClientCommand::Shutdown) .await .map_err(|_| Error::InvalidChannelError)?; Ok(()) } // Return source head and position of the certificate pub async fn get_source_head(&mut self) -> Result<(Certificate, u64), Error> { #[allow(clippy::type_complexity)] let (sender, receiver): ( oneshot::Sender>, oneshot::Receiver>, ) = oneshot::channel(); self.command_sender .send(TceClientCommand::GetSourceHead { subnet_id: self.subnet_id, sender, }) .await .map_err(|_| Error::InvalidChannelError)?; receiver.await.map_err(|_| Error::InvalidChannelError)? } pub async fn get_last_pending_certificates( &mut self, subnet_ids: Vec, ) -> Result>, Error> { #[allow(clippy::type_complexity)] let (sender, receiver) = oneshot::channel(); self.command_sender .send(TceClientCommand::GetLastPendingCertificates { subnet_ids, sender }) .await .map_err(|_| Error::InvalidChannelError)?; receiver.await.map_err(|_| Error::InvalidChannelError)? } pub fn get_subnet_id(&self) -> SubnetId { self.subnet_id } pub fn get_tce_endpoint(&self) -> &str { self.tce_endpoint.as_str() } } #[derive(Default)] pub struct TceClientBuilder { tce_endpoint: Option, subnet_id: Option, tce_proxy_event_sender: Option>, } impl TceClientBuilder { pub fn set_tce_endpoint(mut self, endpoint: T) -> Self { self.tce_endpoint = Some(endpoint.to_string()); self } pub fn set_subnet_id(mut self, subnet_id: SubnetId) -> Self { self.subnet_id = Some(subnet_id); self } pub fn set_proxy_event_sender( mut self, tce_proxy_event_sender: mpsc::Sender, ) -> Self { self.tce_proxy_event_sender = Some(tce_proxy_event_sender); self } pub async fn build_and_launch( self, mut shutdown: mpsc::Receiver>, ) -> Result< ( TceClient, impl futures::stream::Stream, ), Error, > { // Channel used to pass received certificates (certificates pushed TCE node) from the TCE client to the application let (inbound_certificate_sender, inbound_certificate_receiver) = mpsc::channel::<(Certificate, TargetStreamPosition)>(CERTIFICATE_INBOUND_CHANNEL_SIZE); let tce_endpoint = self .tce_endpoint .as_ref() .ok_or(Error::InvalidTceEndpoint)? .clone(); // Connect to tce node service using backoff strategy let mut tce_grpc_client = match crate::connect_to_tce_service_with_retry(tce_endpoint.clone()).await { Ok(client) => { info!("Connected to the TCE service at {}", &tce_endpoint); client } Err(e) => { error!("Unable to connect to tce client: {}", e); return Err(e); } }; // Channel used to initiate watch_certificates_request::Command that will be sent to the TCE through stream let (outbound_stream_command_sender, mut outbound_stream_command_receiver) = mpsc::channel::(CERTIFICATE_OUTBOUND_CHANNEL_SIZE); // Outbound stream used to send watch_certificates_request::Command to the TCE node service let outbound_watch_certificates_stream = async_stream::stream! { loop { while let Some(request) = outbound_stream_command_receiver.recv().await { yield request; } } }; // Call TCE service watch certificates, get inbound response stream let mut inbound_watch_certificates_stream: tonic::Streaming = tce_grpc_client .watch_certificates(outbound_watch_certificates_stream) .await .map(|r| r.into_inner())?; // Channel used to shut down task for inbound stream responses processing let (inbound_shutdown_sender, mut inbound_shutdown_receiver) = mpsc::unbounded_channel::<()>(); let subnet_id = *self.subnet_id.as_ref().ok_or(Error::InvalidSubnetId)?; let tce_proxy_event_sender = self.tce_proxy_event_sender.clone(); // Run task and process inbound watch certificate stream responses tokio::spawn(async move { // Listen for feedback from TCE service (WatchCertificatesResponse) info!( "Entering watch certificate response loop for tce node {} for subnet id {}", &tce_endpoint, &subnet_id ); loop { tokio::select! { Some(response) = inbound_watch_certificates_stream.next() => { match response { Ok(watch_certificate_response) => match watch_certificate_response.event { // Received CertificatePushed event from TCE (new certificate has been received from TCE) Some(watch_certificates_response::Event::CertificatePushed( mut certificate_pushed )) => { info!("Certificate {:?} received from the TCE", &certificate_pushed); if let Some(certificate) = certificate_pushed.certificate.take() { let cert: Certificate = match certificate.try_into() { Ok(c) => c, Err(e) => { error!("Invalid Certificate conversion for certificate: {e}"); continue; } }; // Currently only one target stream position is expected let position: TargetStreamPosition = match certificate_pushed.positions.first() { Some(p) => { if let Ok(p) = TryInto::::try_into(p.clone()) { p } else { error!("Invalid target stream position for certificate id {}",cert.id); continue; } }, None => { error!("Invalid target stream position for certificate id {}",cert.id); continue; } }; if let Err(e) = inbound_certificate_sender .send((cert, position)) .await { error!( "Unable to pass received certificate to application: {e}" ) } } } // Confirmation from TCE that stream has been opened Some(watch_certificates_response::Event::StreamOpened(stream_opened)) => { info!( "Successfully opened the Certificate stream with the TCE at {} for the subnet(s): {:?}", &tce_endpoint, stream_opened.subnet_ids ); } None => { warn!( "Watch certificate stream received None object from the TCE node at {}", &tce_endpoint ); } }, Err(e) => { error!( "Failed to open the Certificate stream with the TCE node at {} for the subnet(s): {:?}: {}", &tce_endpoint, &subnet_id, e.to_string() ); // Send warning to restart TCE proxy if let Some(tce_proxy_event_sender) = tce_proxy_event_sender.clone() { if let Err(e) = tce_proxy_event_sender.send(TceProxyEvent::WatchCertificatesChannelFailed).await { error!("Unable to send watch certificates channel failed signal: {e}"); } } } } } Some(_) = inbound_shutdown_receiver.recv() => { info!("Finishing watch certificates task..."); // Finish this task listener break; } } } info!( "Finishing watch certificate task for tce node {} subnet_id {:?}", &tce_endpoint, &subnet_id ); }); // Channel used to pass commands from the application to the TCE proxy // To close to chanel worker task, send None as Certificate let (tce_command_sender, mut tce_command_receiver) = mpsc::channel::(TCE_PROXY_COMMAND_CHANNEL_SIZE); // Run task for sending certificates to the TCE stream let tce_endpoint = self .tce_endpoint .as_ref() .ok_or(Error::InvalidTceEndpoint)? .clone(); let tce_proxy_event_sender = self.tce_proxy_event_sender.clone(); tokio::spawn(async move { let mut certificate_to_send = FuturesOrdered::new(); info!( "Entering tce proxy command loop for stream {}", &tce_endpoint ); loop { tokio::select! { Some(result) = certificate_to_send.next() => { match result { Ok(()) => { // All good, after one certificate is submitted carry on continue; } Err(e) => { // Backoff maximum period timeout. We need to restart sequencer. error!("Failed to submit certificate to the tce network, backoff timeout with error: {e}. Restarting sequencer..."); if let Some(tce_proxy_event_sender) = tce_proxy_event_sender.clone() { if let Err(e) = tce_proxy_event_sender.send(TceProxyEvent::TceServiceFailure).await { error!("Unable to send tce communication failure signal: {e}"); } } } } } Some(sender) = shutdown.recv() => { info!("Shutdown tce proxy command received..."); if !certificate_to_send.is_empty() { info!("Waiting for all certificates to be sent..."); while certificate_to_send.next().await.is_some() {} } inbound_shutdown_sender.send(()).expect("valid channel for shutting down task"); sender.send(()).expect("valid channel for shutting down task"); break; } command = tce_command_receiver.recv() => { match command { Some(TceClientCommand::SendCertificate {cert, span}) => { // Send new ceritficate to the TCE network let cert_id = cert.id; let previous_cert_id = cert.prev_id; let span = info_span!(parent: &span, "SendCertificate", %cert_id, %previous_cert_id, %tce_endpoint); let context = span.context(); let tce_endpoint = tce_endpoint.clone(); let tce_grpc_client = tce_grpc_client.clone(); let context_backoff = context.clone(); // TODO: Push certificates to the TCE one by one certificate_to_send.push_back(async move { debug!("Submitting certificate {} to the TCE using backoff strategy...", &tce_endpoint); let cert = cert.clone(); let op = || async { let mut tce_grpc_client = tce_grpc_client.clone(); let mut request = SubmitCertificateRequest { certificate: Some(topos_core::api::grpc::uci::v1::Certificate::from(*(cert.clone()))), }.into_request(); let mut span_context = topos_telemetry::TonicMetaInjector(request.metadata_mut()); span_context.inject(&context_backoff); tce_grpc_client .submit_certificate(request) .with_context(context_backoff.clone()) .instrument(Span::current()) .await .map(|_response| { info!("Successfully submitted the Certificate {} (previous: {}) to the TCE at {}", &cert_id, &previous_cert_id, &tce_endpoint); }) .map_err(|e| { error!("Failed to submit the Certificate to the TCE at {}, error: {e}", &tce_endpoint); new_tce_proxy_backoff_err(e) }) }; let backoff_configuration = backoff::ExponentialBackoff { max_elapsed_time: Some(TCE_SUBMIT_CERTIFICATE_BACKOFF_TIMEOUT), ..Default::default() }; backoff::future::retry(backoff_configuration, op) .await .map_err(|e| { error!("Failed to submit certificate to the TCE: {e}"); e }) } .with_context(context) .instrument(span)); } Some(TceClientCommand::OpenStream {target_checkpoint}) => { // Send command to TCE to open stream with my subnet id info!( "Sending OpenStream command to the TCE node at {} for the Subnet {}", &tce_endpoint, &subnet_id ); if let Err(e) = outbound_stream_command_sender .send( watch_certificates_request::OpenStream { target_checkpoint: Some(target_checkpoint.into()), source_checkpoint: None }.into(), ) .await { error!( "Unable to send OpenStream command: {e}" ) } } Some(TceClientCommand::Shutdown) => { info!("Shutdown tce proxy command received..."); inbound_shutdown_sender.send(()).expect("valid channel for shutting down task"); break; } Some(TceClientCommand::GetSourceHead {subnet_id, sender}) => { let result: Result<(Certificate, u64), Error> = match tce_grpc_client .get_source_head(GetSourceHeadRequest { subnet_id: Some(subnet_id.into()) }) .await .map(|r| r.into_inner()) { Ok(GetSourceHeadResponse { position: Some(pos), certificate: Some(cert), }) => { info!("Source head certificate acquired from tce, position: {}, certificate: {:?}", pos.position, &cert); Ok((cert.try_into().map_err(|_| Error::InvalidCertificate)?, pos.position)) }, Ok(_) => { Err(Error::SourceHeadEmpty{subnet_id}) }, Err(e) => { Err(Error::UnableToGetSourceHeadCertificate{subnet_id, details: e.to_string()}) } }; if sender.send(result).is_err() { error!("Unable to pass result of the source head, channel failed"); }; } Some(TceClientCommand::GetLastPendingCertificates { subnet_ids, sender }) => { let result = match tce_grpc_client .get_last_pending_certificates(GetLastPendingCertificatesRequest { subnet_ids: subnet_ids.into_iter().map(Into::into).collect(), }) .await .map(|r| r.into_inner()) { Ok(response) => { let result = response .last_pending_certificate .into_iter() .map(|(subnet_id, last_pending_certificate)| { let subnet_id: SubnetId = TryInto::::try_into( Base64::decode_vec(subnet_id.as_str()).map_err(|_| Error::InvalidSubnetId)?.as_slice(), ) .map_err(|_| Error::InvalidSubnetId)?; let certificate_and_index: Option<(Certificate, u64)> = match last_pending_certificate.value { Some(certificate) => Some( Certificate::try_from(certificate) .map(|certificate| (certificate, last_pending_certificate.index)) .map_err( |e| Error::UnableToGetLastPendingCertificates { details: e.to_string(), subnet_id, }, )?, ), None => None, }; Ok(( subnet_id, certificate_and_index )) }) .collect::>, Error>>()?; Ok(result) } Err(e) => Err(Error::UnableToGetLastPendingCertificates { subnet_id, details: e.to_string(), }), }; if sender.send(result).is_err() { error!("Unable to pass result for the last pending certificates, channel failed"); }; } None => { error!("Unexpected termination of the TCE proxy service of the Sequencer"); break; } } } } } info!( "Finished submit certificate loop for stream {}", &tce_endpoint ); Result::<(), Error>::Ok(()) }); Ok(( TceClient { subnet_id: self.subnet_id.ok_or(Error::InvalidSubnetId)?, tce_endpoint: self.tce_endpoint.ok_or(Error::InvalidTceEndpoint)?, command_sender: tce_command_sender, }, tokio_stream::wrappers::ReceiverStream::new(inbound_certificate_receiver), )) } } ================================================ FILE: crates/topos-tce-proxy/src/lib.rs ================================================ //! //! Handles incoming events from the friendly TCE node //! pub mod client; pub mod worker; use opentelemetry::Context; use std::time::Duration; use tonic::transport::channel; use topos_core::api::grpc::checkpoints::TargetStreamPosition; use topos_core::{ api::grpc::tce::v1::api_service_client::ApiServiceClient, uci::{Certificate, SubnetId}, }; use tracing::{error, info}; // Maximum backoff retry timeout in seconds (12 hours) const TCE_CONNECT_BACKOFF_TIMEOUT: Duration = Duration::from_secs(12 * 3600); #[derive(Debug, thiserror::Error)] pub enum Error { #[error("Tonic transport error")] TonicTransportError { #[from] source: tonic::transport::Error, }, #[error("Tonic error")] TonicStatusError { #[from] source: tonic::Status, }, #[error("Invalid channel error")] InvalidChannelError, #[error("Invalid tce endpoint error")] InvalidTceEndpoint, #[error("Invalid subnet id error")] InvalidSubnetId, #[error("Invalid certificate error")] InvalidCertificate, #[error("Hex conversion error {source}")] HexConversionError { #[from] source: hex::FromHexError, }, #[error("Unable to get source head certificate for subnet id {subnet_id}: {details}")] UnableToGetSourceHeadCertificate { subnet_id: SubnetId, details: String, }, #[error("Certificate source head empty for subnet id {subnet_id}")] SourceHeadEmpty { subnet_id: SubnetId }, #[error("Unable to get last pending certificates for subnet id {subnet_id}: {details}")] UnableToGetLastPendingCertificates { subnet_id: SubnetId, details: String, }, } /// Control the TceProxy #[derive(Debug)] pub enum TceProxyCommand { /// Submit a newly created certificate to the TCE SubmitCertificate { cert: Box, ctx: Context, }, /// Shutdown command Shutdown(tokio::sync::oneshot::Sender<()>), } /// Events related to synchronizing certificates with the TCE network. #[derive(Debug, Clone)] pub enum TceProxyEvent { /// New delivered certificate (and its position) fetched from the TCE network NewDeliveredCerts { certificates: Vec<(Certificate, u64)>, ctx: Context, }, /// Failed watching certificates channel. Requires a restart of the sequencer tce proxy to recover. WatchCertificatesChannelFailed, /// Failure in communication with the TCE grpc service. Sequencer needs to be restarted TceServiceFailure, } /// Configuration data for the TCE proxy, used to configure the `TceProxyWorker`. pub struct TceProxyConfig { /// The [`SubnetId`] this config handles certificate proxying for. pub subnet_id: SubnetId, /// The GRPC endpoint where the Sequencer is expecting to find a TCE node. pub tce_endpoint: String, /// The positions in the index of the known Certificates. pub positions: Vec, } async fn connect_to_tce_service_with_retry( endpoint: String, ) -> Result, Error> { info!( "Connecting to the TCE at {} using the exponential backoff strategy...", endpoint ); let op = || async { let channel = channel::Endpoint::from_shared(endpoint.clone())? .connect() .await .map_err(|e| { error!("Failed to connect to the TCE at {}: {e}", &endpoint); e })?; Ok(ApiServiceClient::new(channel)) }; let backoff_configuration = backoff::ExponentialBackoff { max_elapsed_time: Some(TCE_CONNECT_BACKOFF_TIMEOUT), ..Default::default() }; backoff::future::retry(backoff_configuration, op) .await .map_err(|e| { error!("Failed to connect to the TCE at {}: {e}", &endpoint); Error::TonicTransportError { source: e } }) } ================================================ FILE: crates/topos-tce-proxy/src/worker.rs ================================================ use crate::{client::TceClientBuilder, Error, TceProxyCommand, TceProxyConfig, TceProxyEvent}; use opentelemetry::trace::FutureExt; use tokio::sync::{mpsc, oneshot}; use tokio_stream::StreamExt; use topos_core::uci::Certificate; use tracing::{error, info, info_span, Instrument, Span}; use tracing_opentelemetry::OpenTelemetrySpanExt; /// Proxy with the TCE /// /// Performs two tasks: /// 1) Fetch the certificates that were delivered from the TCE /// 2) Submit the new certificates to the TCE pub struct TceProxyWorker { /// The [`TceProxyConfig`] used to setup this worker. pub config: TceProxyConfig, commands: mpsc::Sender, events: mpsc::Receiver, } impl TceProxyWorker { /// Construct a new [`TceProxyWorker`] with a 128 items deep channel to send commands to and receive events from a TCE node on the given subnet. /// The worker holds a [`crate::client::TceClient`] pub async fn new(config: TceProxyConfig) -> Result<(Self, Option<(Certificate, u64)>), Error> { let (command_sender, mut command_rcv) = mpsc::channel::(128); let (evt_sender, evt_rcv) = mpsc::channel::(128); let (tce_client_shutdown_channel, shutdown_receiver) = mpsc::channel::>(1); let (mut tce_client, mut receiving_certificate_stream) = TceClientBuilder::default() .set_subnet_id(config.subnet_id) .set_tce_endpoint(&config.tce_endpoint) .set_proxy_event_sender(evt_sender.clone()) .build_and_launch(shutdown_receiver) .await?; tce_client.open_stream(config.positions.clone()).await?; // Get pending certificates from the TCE node. Source head certificate // is latest pending certificate for this subnet let source_last_pending_certificate: Option<(Certificate, u64)> = match tce_client .get_last_pending_certificates(vec![tce_client.get_subnet_id()]) .await { Ok(mut pending_certificates) => pending_certificates .remove(&tce_client.get_subnet_id()) .unwrap_or_default(), Err(e) => { error!("Unable to retrieve latest pending certificate {e}"); return Err(e); } }; info!( "Last pending certificate: {:?}", source_last_pending_certificate ); let source_last_delivered_certificate = match tce_client.get_source_head().await { Ok(certificate) => Some(certificate), Err(Error::SourceHeadEmpty { subnet_id: _ }) => { // This is also OK, TCE node does not have any data about certificates // We should start certificate production from scratch None } Err(e) => { return Err(e); } }; info!( "Last delivered certificate: {:?}", source_last_delivered_certificate ); let source_last_certificate = if source_last_pending_certificate.is_none() { // There are no pending certificates on the TCE // Block height to get next from subnet is position +1 source_last_delivered_certificate } else { // Last generated is pending certificate // Block height to get next from subnet is position of the last delivered certificate + index of the pending certificate let delivered_certificate_position = source_last_delivered_certificate .map(|(_cert, position)| position) .unwrap_or_default(); source_last_pending_certificate .map(|(cert, index)| (cert, delivered_certificate_position + index)) }; tokio::spawn(async move { info!( "Starting the TCE proxy connected to the TCE at {}", tce_client.get_tce_endpoint() ); loop { tokio::select! { // process TCE proxy commands received from application Some(cmd) = command_rcv.recv() => { match cmd { TceProxyCommand::SubmitCertificate{cert, ctx} => { let span = info_span!("Sequencer TCE Proxy"); span.set_parent(ctx); async { info!("Submitting new certificate to the TCE network: {}", &cert.id); if let Err(e) = tce_client.send_certificate(*cert).await { error!("Failure on the submission of the Certificate to the TCE client: {e}"); } } .with_context(span.context()) .instrument(span) .await; } TceProxyCommand::Shutdown(sender) => { info!("Received TceProxyCommand::Shutdown command, closing tce client..."); let (killer, waiter) = oneshot::channel::<()>(); tce_client_shutdown_channel.send(killer).await.unwrap(); waiter.await.unwrap(); _ = sender.send(()); break; } } } // Process certificates received from the TCE node Some((cert, target_stream_position)) = receiving_certificate_stream.next() => { let span = info_span!("PushCertificate"); async { info!("Received certificate from TCE {:?}, target stream position {}", cert, target_stream_position.position); if let Err(e) = evt_sender.send(TceProxyEvent::NewDeliveredCerts { certificates: vec![(cert, target_stream_position.position)], ctx: Span::current().context()} ) .await { error!("Unable to send NewDeliveredCerts event {e}"); } } .with_context(span.context()) .instrument(span) .await; } } } info!( "Exiting the TCE proxy worker handle loop connected to the TCE at {}", tce_client.get_tce_endpoint() ); }); // Save channels and handles, return latest tce known certificate Ok(( Self { commands: command_sender, events: evt_rcv, config, }, source_last_certificate, )) } /// Send commands to TCE pub async fn send_command(&self, cmd: TceProxyCommand) -> Result<(), String> { match self.commands.send(cmd).await { Ok(_) => Ok(()), Err(e) => Err(e.to_string()), } } /// Pollable (in select!) event listener pub async fn next_event(&mut self) -> Result { let event = self.events.recv().await; Ok(event.unwrap()) } /// Shut down TCE proxy pub async fn shutdown(&self) -> Result<(), String> { info!("Shutting down TCE proxy worker..."); let (sender, receiver) = oneshot::channel(); if let Err(e) = self.commands.send(TceProxyCommand::Shutdown(sender)).await { error!("Error sending shutdown signal to TCE worker {e}"); return Err(e.to_string()); }; receiver.await.map_err(|e| e.to_string()) } } ================================================ FILE: crates/topos-tce-proxy/tests/tce_tests.rs ================================================ use base64ct::{Base64, Encoding}; use futures::StreamExt; use rstest::*; use std::collections::{HashMap, HashSet}; use test_log::test; use tokio::sync::{mpsc, oneshot}; use tokio::time::Duration; use topos_core::api::grpc::shared::v1::positions::SourceStreamPosition; use topos_core::api::grpc::shared::v1::{ checkpoints::TargetCheckpoint, positions::TargetStreamPosition, }; use topos_core::api::grpc::shared::v1::{CertificateId, StarkProof, SubnetId}; use topos_core::api::grpc::tce::v1::LastPendingCertificate; use topos_core::api::grpc::tce::v1::{ watch_certificates_request, watch_certificates_response, watch_certificates_response::CertificatePushed, GetLastPendingCertificatesRequest, GetLastPendingCertificatesResponse, GetSourceHeadRequest, GetSourceHeadResponse, SubmitCertificateRequest, }; use topos_core::api::grpc::uci::v1::Certificate; use topos_core::types::CertificateDelivered; use topos_core::uci::SUBNET_ID_LENGTH; use topos_tce_proxy::client::{TceClient, TceClientBuilder}; use topos_tce_proxy::worker::TceProxyWorker; use topos_tce_proxy::{TceProxyCommand, TceProxyConfig, TceProxyEvent}; use topos_test_sdk::tce::{start_node, NodeConfig}; use tracing::{debug, error, info, warn}; use topos_test_sdk::{certificates::create_certificate_chain, constants::*, tce::TceContext}; pub const SOURCE_SUBNET_ID_1_NUMBER_OF_PREFILLED_CERTIFICATES: usize = 15; pub const SOURCE_SUBNET_ID_2_NUMBER_OF_PREFILLED_CERTIFICATES: usize = 10; #[test(tokio::test)] async fn test_tce_submit_certificate() -> Result<(), Box> { let mut context = start_node::partial_2(&[], NodeConfig::standalone()).await; let source_subnet_id: SubnetId = SOURCE_SUBNET_ID_1.into(); let prev_certificate_id: CertificateId = CERTIFICATE_ID_1.into(); let certificate_id: CertificateId = CERTIFICATE_ID_2.into(); match context .api_grpc_client .submit_certificate(SubmitCertificateRequest { certificate: Some(Certificate { source_subnet_id: Some(source_subnet_id.clone()), id: Some(certificate_id), prev_id: Some(prev_certificate_id), target_subnets: vec![], state_root: [0u8; 32].to_vec(), tx_root_hash: [0u8; 32].to_vec(), receipts_root_hash: [0u8; 32].to_vec(), verifier: 0, proof: Some(StarkProof { value: Vec::new() }), signature: Some(Default::default()), }), }) .await .map(|r| r.into_inner()) { Ok(response) => { debug!("Certificate successfully submitted {:?}", response); } Err(e) => { error!("Unable to submit the certificate: {e:?}"); return Err(Box::from(e)); } }; info!("Shutting down TCE node client"); context.shutdown().await?; Ok(()) } #[test(tokio::test)] async fn test_tce_watch_certificates() -> Result<(), Box> { let mut context = start_node::partial_2(&[], NodeConfig::standalone()).await; let source_subnet_id: SubnetId = SubnetId { value: [1u8; SUBNET_ID_LENGTH].to_vec(), }; //Outbound stream let subnet_id_instream = source_subnet_id.clone(); let in_stream = async_stream::stream! { yield watch_certificates_request::OpenStream { target_checkpoint: Some(TargetCheckpoint { target_subnet_ids: vec![ subnet_id_instream ], positions: Vec::new() }), source_checkpoint: None }.into() }; let response = context .api_grpc_client .watch_certificates(in_stream) .await .unwrap(); let mut resp_stream = response.into_inner(); info!("TCE client: waiting for watch certificate response"); while let Some(received) = resp_stream.next().await { info!("TCE client received: {:?}", received); let received = received.unwrap(); match received.event { Some(watch_certificates_response::Event::CertificatePushed(CertificatePushed { certificate: Some(certificate), .. })) => { info!("Certificate received {:?}", certificate); } Some(watch_certificates_response::Event::StreamOpened( watch_certificates_response::StreamOpened { subnet_ids }, )) => { debug!("TCE client: stream opened for subnet_ids {:?}", subnet_ids); assert_eq!(subnet_ids[0].value, source_subnet_id.value); // We have opened connection and 2 way stream, finishing test break; } Some(watch_certificates_response::Event::CertificatePushed(CertificatePushed { certificate: None, .. })) => { panic!("TCE client: empty certificate received"); } _ => { panic!("TCE client: something unexpected is received"); } } } info!("Shutting down TCE node client"); context.shutdown().await?; Ok(()) } #[test(tokio::test)] async fn test_tce_get_source_head_certificate() -> Result<(), Box> { let mut context = start_node::partial_2(&[], NodeConfig::standalone()).await; let source_subnet_id: SubnetId = SOURCE_SUBNET_ID_1.into(); let default_cert_id: CertificateId = PREV_CERTIFICATE_ID.into(); let certificate_id: CertificateId = CERTIFICATE_ID_2.into(); // Test get source head certificate for empty TCE history // This will be actual genesis certificate let response = context .api_grpc_client .get_source_head(GetSourceHeadRequest { subnet_id: Some(source_subnet_id.clone()), }) .await .map(|r| r.into_inner()) .expect("valid response"); let expected_default_genesis_certificate = Certificate { id: Some(default_cert_id.clone()), prev_id: Some(default_cert_id.clone()), source_subnet_id: Some(source_subnet_id.clone()), target_subnets: vec![], state_root: [0u8; 32].to_vec(), tx_root_hash: [0u8; 32].to_vec(), receipts_root_hash: [0u8; 32].to_vec(), verifier: 0, proof: Some(StarkProof { value: Vec::new() }), signature: Some(Default::default()), }; let expected_response = GetSourceHeadResponse { certificate: Some(expected_default_genesis_certificate.clone()), position: Some(SourceStreamPosition { source_subnet_id: Some(source_subnet_id.clone()), certificate_id: expected_default_genesis_certificate.id.clone(), position: 0, }), }; assert_eq!(response, expected_response); let test_certificate = Certificate { source_subnet_id: Some(source_subnet_id.clone()), id: Some(certificate_id), prev_id: Some(default_cert_id), target_subnets: vec![], state_root: [0u8; 32].to_vec(), tx_root_hash: [0u8; 32].to_vec(), receipts_root_hash: [0u8; 32].to_vec(), verifier: 0, proof: Some(StarkProof { value: Vec::new() }), signature: Some(Default::default()), }; match context .api_grpc_client .submit_certificate(SubmitCertificateRequest { certificate: Some(test_certificate.clone()), }) .await .map(|r| r.into_inner()) { Ok(response) => { debug!("Successfully submitted the Certificate {:?}", response); } Err(e) => { error!("Unable to submit the certificate: {e:?}"); return Err(Box::from(e)); } }; // Test get source head certificate for non empty certificate history let response = context .api_grpc_client .get_source_head(GetSourceHeadRequest { subnet_id: Some(source_subnet_id.clone()), }) .await .map(|r| r.into_inner()) .unwrap(); // TODO: currently only delivered certificates are counted as // head source certificate, so default certificate is expected // Should be updated to count also pending certificates let expected_response = GetSourceHeadResponse { certificate: Some(expected_default_genesis_certificate.clone()), position: Some(SourceStreamPosition { source_subnet_id: Some(source_subnet_id.clone()), certificate_id: expected_default_genesis_certificate.id, position: 0, }), }; assert_eq!(response, expected_response); info!("Shutting down TCE node client"); context.shutdown().await?; Ok(()) } #[test(tokio::test)] async fn test_tce_get_last_pending_certificates() -> Result<(), Box> { let mut context = start_node::partial_2(&[], NodeConfig::standalone()).await; let source_subnet_id: SubnetId = SOURCE_SUBNET_ID_1.into(); let certificates = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 10); // Test get last pending certificates for empty TCE history // Reply should be empty let response = context .api_grpc_client .get_last_pending_certificates(GetLastPendingCertificatesRequest { subnet_ids: vec![source_subnet_id.clone()], }) .await .map(|r| r.into_inner()) .expect("valid response"); let last_pending_certificates = vec![( Base64::encode_string(&source_subnet_id.value), LastPendingCertificate { value: None, index: 0, }, )] .into_iter() .collect::>(); let expected_response = GetLastPendingCertificatesResponse { last_pending_certificate: last_pending_certificates, }; assert_eq!(response, expected_response); for cert in &certificates { match context .api_grpc_client .submit_certificate(SubmitCertificateRequest { certificate: Some(cert.certificate.clone().into()), }) .await .map(|r| r.into_inner()) { Ok(response) => { debug!("Successfully submitted the Certificate {:?}", response); } Err(e) => { error!("Unable to submit the certificate: {e:?}"); return Err(Box::from(e)); } }; } // Test get last pending certificate let response = context .api_grpc_client .get_last_pending_certificates(GetLastPendingCertificatesRequest { subnet_ids: vec![source_subnet_id.clone()], }) .await .map(|r| r.into_inner()) .expect("valid response"); let expected_last_pending_certificates = vec![( Base64::encode_string(&source_subnet_id.value), LastPendingCertificate { value: Some( certificates .iter() .last() .unwrap() .clone() .certificate .into(), ), index: 10, }, )] .into_iter() .collect::>(); let expected_response = GetLastPendingCertificatesResponse { last_pending_certificate: expected_last_pending_certificates, }; assert_eq!(response, expected_response); info!("Shutting down TCE node client"); context.shutdown().await?; Ok(()) } #[rstest] #[test(tokio::test)] #[timeout(Duration::from_secs(300))] async fn test_tce_open_stream_with_checkpoint( input_certificates: Vec, ) -> Result<(), Box> { let mut context = start_node::partial_2(&input_certificates[..], NodeConfig::standalone()).await; let source_subnet_id_1: SubnetId = SubnetId { value: SOURCE_SUBNET_ID_1.into(), }; let source_subnet_id_1_stream_position = 4; let source_subnet_id_1_prefilled_certificates = &input_certificates[0..SOURCE_SUBNET_ID_1_NUMBER_OF_PREFILLED_CERTIFICATES]; let source_subnet_id_2: SubnetId = SubnetId { value: SOURCE_SUBNET_ID_2.into(), }; let source_subnet_id_2_stream_position = 2; let source_subnet_id_2_prefilled_certificates = &input_certificates[SOURCE_SUBNET_ID_1_NUMBER_OF_PREFILLED_CERTIFICATES..]; let target_subnet_id: SubnetId = SubnetId { value: TARGET_SUBNET_ID_1.into(), }; // Ask for target checkpoint for 2 subnets, one from position 4, other from position 2 let target_checkpoint = TargetCheckpoint { target_subnet_ids: vec![target_subnet_id.clone()], positions: vec![ TargetStreamPosition { source_subnet_id: source_subnet_id_1.clone().into(), target_subnet_id: target_subnet_id.clone().into(), position: source_subnet_id_1_stream_position, certificate_id: Some( source_subnet_id_1_prefilled_certificates[3] .certificate .id .into(), ), }, TargetStreamPosition { source_subnet_id: source_subnet_id_2.clone().into(), target_subnet_id: target_subnet_id.clone().into(), position: source_subnet_id_2_stream_position, certificate_id: Some( source_subnet_id_2_prefilled_certificates[1] .certificate .id .into(), ), }, ], }; // Make list of expected certificate, first received certificate for every source subnet and its position let mut expected_certs = HashMap::::new(); expected_certs.insert( input_certificates[4].certificate.source_subnet_id.into(), (input_certificates[4].certificate.clone().into(), 4), ); expected_certs.insert( input_certificates[SOURCE_SUBNET_ID_1_NUMBER_OF_PREFILLED_CERTIFICATES + 2] .certificate .source_subnet_id .into(), ( input_certificates[SOURCE_SUBNET_ID_1_NUMBER_OF_PREFILLED_CERTIFICATES + 2] .certificate .clone() .into(), 2, ), ); info!("Prefilled certificates:"); let mut index = -1; input_certificates .iter() .map(|c| c.certificate.id) .collect::>() .iter() .for_each(|id| { index += 1; info!("{index}: {id}") }); //Outbound stream let in_stream = async_stream::stream! { yield watch_certificates_request::OpenStream { target_checkpoint: Some(target_checkpoint), source_checkpoint: None }.into() }; let response = context .api_grpc_client .watch_certificates(in_stream) .await .unwrap(); let mut resp_stream = response.into_inner(); info!("TCE client: waiting for watch certificate response"); while let Some(received) = resp_stream.next().await { debug!("TCE client received: {:?}", received); let received = received.unwrap(); match received.event { Some(watch_certificates_response::Event::CertificatePushed(CertificatePushed { certificate: Some(received_certificate), positions, })) => { if let Some((expected_first_certificate_from_subnet, expected_position)) = expected_certs.get(received_certificate.source_subnet_id.as_ref().unwrap()) { info!( "\n\nCertificate received: {} source sid {}, target sid {}", received_certificate.id.as_ref().unwrap(), received_certificate.source_subnet_id.as_ref().unwrap(), received_certificate.target_subnets[0] ); assert_eq!( received_certificate, *expected_first_certificate_from_subnet ); let received_position = positions.first().unwrap(); assert_eq!(*expected_position, received_position.position); assert_eq!( received_position.target_subnet_id.as_ref().unwrap(), &received_certificate.target_subnets[0] ); // First certificate received from source subnet, remove it from the expected list expected_certs.remove(received_certificate.source_subnet_id.as_ref().unwrap()); info!( "Received valid first certificate from source subnet {} certificate id {}", received_certificate.source_subnet_id.as_ref().unwrap(), received_certificate.id.as_ref().unwrap(), ); } else { debug!( "\n\nAdditional certificate received from the source subnet: {} source \ sid {}, target sid {}", received_certificate.id.as_ref().unwrap(), received_certificate.source_subnet_id.as_ref().unwrap(), received_certificate.target_subnets[0] ); } if expected_certs.is_empty() { info!("All expected certificates received"); break; } } Some(watch_certificates_response::Event::StreamOpened( watch_certificates_response::StreamOpened { subnet_ids }, )) => { debug!("TCE client: stream opened for subnet_ids {:?}", subnet_ids); continue; } Some(watch_certificates_response::Event::CertificatePushed(CertificatePushed { certificate: None, .. })) => { panic!("TCE client: empty certificate received"); } _ => { panic!("TCE client: something unexpected is received"); } } } info!("Shutting down TCE node client"); context.shutdown().await?; Ok(()) } #[fixture] fn input_certificates() -> Vec { let mut certificates = Vec::new(); certificates.append(&mut create_certificate_chain( SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], SOURCE_SUBNET_ID_1_NUMBER_OF_PREFILLED_CERTIFICATES, )); certificates.append(&mut create_certificate_chain( SOURCE_SUBNET_ID_2, &[TARGET_SUBNET_ID_1], SOURCE_SUBNET_ID_2_NUMBER_OF_PREFILLED_CERTIFICATES, )); certificates } #[test(tokio::test)] async fn test_tce_proxy_submit_certificate() -> Result<(), Box> { let mut context = start_node::partial_2(&[], NodeConfig::standalone()).await; let source_subnet_id = SOURCE_SUBNET_ID_1; let target_subnet_stream_positions = Vec::new(); let mut certificates = Vec::new(); certificates.append(&mut create_certificate_chain( SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 5, )); let last_sent_certificate = certificates.last().unwrap().clone().certificate; // Create tce proxy client let (tce_proxy_worker, _source_head_certificate_id) = match TceProxyWorker::new(TceProxyConfig { subnet_id: source_subnet_id, tce_endpoint: context.api_entrypoint.clone(), positions: target_subnet_stream_positions, }) .await { Ok((tce_proxy_worker, mut source_head_certificate)) => { if let Some((cert, _position)) = &mut source_head_certificate { if cert.id == CertificateId::default() { warn!( "Tce has not provided source head certificate, starting from subnet \ genesis block..." ); source_head_certificate = None; } } info!( "TCE proxy client is starting for the source subnet {:?} from the head {:?}", source_subnet_id, source_head_certificate ); let source_head_certificate_id = source_head_certificate.map(|(cert, position)| (cert.id, position)); (tce_proxy_worker, source_head_certificate_id) } Err(e) => { panic!("Unable to create TCE Proxy: {e}"); } }; for (index, cert) in certificates.into_iter().enumerate() { match tce_proxy_worker .send_command(TceProxyCommand::SubmitCertificate { cert: Box::new(cert.certificate), ctx: Default::default(), }) .await { Ok(_) => { info!("Certificate {} successfully submitted", index); } Err(e) => { panic!("Error submitting certificate: {e}"); } } } // Wait for certificates to be submitted tokio::time::sleep(Duration::from_secs(5)).await; // Get last pending certificate to check that all certificates are submitted let (mut tce_client, _receiving_certificate_stream) = create_tce_client(&context.api_entrypoint, SOURCE_SUBNET_ID_1).await?; match tce_client .get_last_pending_certificates(vec![tce_client.get_subnet_id()]) .await { Ok(mut pending_certificates) => { let pending_certificate = pending_certificates .remove(&tce_client.get_subnet_id()) .unwrap_or_default(); info!("Last pending certificate: {:?}", pending_certificate); assert_eq!(pending_certificate.unwrap().0, last_sent_certificate); } Err(e) => { panic!("Unable to retrieve latest pending certificate {e}"); } }; info!("Shutting down TCE node client"); context.shutdown().await?; Ok(()) } async fn create_tce_client( endpoint: &str, source_subnet_id: topos_core::uci::SubnetId, ) -> Result< ( TceClient, impl futures::stream::Stream< Item = ( topos_core::uci::Certificate, topos_core::api::grpc::checkpoints::TargetStreamPosition, ), >, ), Box, > { let (evt_sender, _evt_rcv) = mpsc::channel::(128); let (_tce_client_shutdown_channel, shutdown_receiver) = mpsc::channel::>(1); let (tce_client, receiving_certificate_stream) = TceClientBuilder::default() .set_subnet_id(source_subnet_id) .set_tce_endpoint(endpoint) .set_proxy_event_sender(evt_sender.clone()) .build_and_launch(shutdown_receiver) .await?; tce_client.open_stream(Vec::new()).await?; Ok((tce_client, receiving_certificate_stream)) } #[test(tokio::test)] async fn test_tce_client_submit_and_get_last_pending_certificate( ) -> Result<(), Box> { let mut context = start_node::partial_2(&[], NodeConfig::standalone()).await; let mut certificates = Vec::new(); certificates.append(&mut create_certificate_chain( SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 5, )); let last_sent_certificate = certificates.last().unwrap().clone().certificate; let (mut tce_client, _receiving_certificate_stream) = create_tce_client(&context.api_entrypoint, SOURCE_SUBNET_ID_1).await?; // Create tce proxy client for (index, cert) in certificates.into_iter().enumerate() { match tce_client.send_certificate(cert.certificate).await { Ok(_) => { info!( "Certificate {} successfully submitted by the tce client", index ); } Err(e) => { panic!("Error submitting certificate by the tce client: {e}"); } } } // Wait for certificates to be submitted tokio::time::sleep(Duration::from_secs(5)).await; // Get last pending certificate to check that all certificates are submitted match tce_client .get_last_pending_certificates(vec![tce_client.get_subnet_id()]) .await { Ok(mut pending_certificates) => { let pending_certificate = pending_certificates .remove(&tce_client.get_subnet_id()) .unwrap_or_default(); info!("Last pending certificate: {:?}", pending_certificate); assert_eq!(pending_certificate.unwrap().0, last_sent_certificate); } Err(e) => { panic!("Unable to retrieve latest pending certificate {e}"); } }; info!("Shutting down TCE node client"); context.shutdown().await?; Ok(()) } #[test(tokio::test)] async fn test_tce_client_get_empty_history_source_head() -> Result<(), Box> { let mut context = start_node::partial_2(&[], NodeConfig::standalone()).await; let (mut tce_client, _receiving_certificate_stream) = create_tce_client(&context.api_entrypoint, SOURCE_SUBNET_ID_1).await?; // Get source head certificate, check if it is empty match tce_client.get_source_head().await { Ok((source_head_cert, position)) => { info!( "Source head certificate: {:?}, position {}", source_head_cert, position ); assert_eq!(source_head_cert.id, CertificateId::from([0u8; 32])); assert_eq!(position, 0); } Err(e) => { panic!("Unable to retrieve latest pending certificate {e}"); } }; info!("Shutting down TCE node client"); context.shutdown().await?; Ok(()) } #[rstest] #[test(tokio::test)] async fn test_tce_client_get_source_head( input_certificates: Vec, ) -> Result<(), Box> { let mut context = start_node::partial_2(&input_certificates[..], NodeConfig::standalone()).await; // Tce is prefilled with delivered certificates let source_subnet_id_1_prefilled_certificates = &input_certificates[0..SOURCE_SUBNET_ID_1_NUMBER_OF_PREFILLED_CERTIFICATES]; let last_delivered_certificate = &source_subnet_id_1_prefilled_certificates .last() .unwrap() .certificate; let (mut tce_client, _receiving_certificate_stream) = create_tce_client(&context.api_entrypoint, SOURCE_SUBNET_ID_1).await?; // Get source head, check if it matches match tce_client.get_source_head().await { Ok((source_head_cert, position)) => { info!( "Source head certificate: {:?}, position {}", source_head_cert, position ); assert_eq!(source_head_cert, *last_delivered_certificate); assert_eq!( position, SOURCE_SUBNET_ID_1_NUMBER_OF_PREFILLED_CERTIFICATES as u64 - 1 ); } Err(e) => { panic!("Unable to retrieve latest pending certificate {e}"); } }; // Last pending certificate should be empty match tce_client .get_last_pending_certificates(vec![tce_client.get_subnet_id()]) .await { Ok(mut pending_certificates) => { let pending_certificate = pending_certificates .remove(&tce_client.get_subnet_id()) .unwrap_or_default(); info!("Last pending certificates: {:?}", pending_certificates); assert_eq!(pending_certificate, None); } Err(e) => { panic!("Unable to retrieve latest pending certificate {e}"); } }; info!("Shutting down TCE node client"); context.shutdown().await?; Ok(()) } #[rstest] #[test(tokio::test)] #[timeout(Duration::from_secs(30))] async fn test_tce_client_submit_and_get_certificate_delivered( ) -> Result<(), Box> { let peers_context = topos_test_sdk::tce::create_network(5, &[]).await; let mut peers = peers_context.into_iter(); let mut sending_tce: TceContext = peers.next().expect("valid peer 1").1; let mut receiving_tce: TceContext = peers.next().expect("valid peer 2").1; let mut certificates = Vec::new(); certificates.append(&mut create_certificate_chain( SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 5, )); let expected_certs: HashSet = certificates .iter() .map(|cert| cert.certificate.id) .collect(); // Create tce proxy client for sending subnet let (mut tce_client_source, _) = create_tce_client(&sending_tce.api_entrypoint, SOURCE_SUBNET_ID_1).await?; // Create tce proxy client for receiving subnet let (_, mut target_receiving_certificate_stream) = create_tce_client(&receiving_tce.api_entrypoint, TARGET_SUBNET_ID_1).await?; // Send certificate from source subnet for (index, cert) in certificates.into_iter().enumerate() { match tce_client_source.send_certificate(cert.certificate).await { Ok(_) => { info!( "Certificate {} successfully submitted by the tce client", index ); } Err(e) => { panic!("Error submitting certificate by the tce client: {e}"); } } } // Wait for certificates to be submitted tokio::time::sleep(Duration::from_secs(5)).await; // Listen for certificates on target subnet info!("Waiting for certificates to be received on the target subnet"); let mut received_certs = HashSet::new(); loop { if let Some((certificate, target_position)) = target_receiving_certificate_stream.next().await { info!( "Delivered certificate cert id {}, position {:?}", &certificate.id, target_position ); received_certs.insert(certificate.id); if received_certs.len() == expected_certs.len() && received_certs == expected_certs { info!("All certificates successfully received"); break; } } } info!("Shutting down TCE node client"); sending_tce.shutdown().await?; receiving_tce.shutdown().await?; Ok(()) } ================================================ FILE: crates/topos-tce-storage/Cargo.toml ================================================ [package] name = "topos-tce-storage" version = "0.1.0" edition = "2021" [lints] workspace = true [dependencies] topos-core = { workspace = true, features = ["uci", "api"] } topos-metrics = { workspace = true } async-stream.workspace = true async-trait.workspace = true bincode.workspace = true futures.workspace = true serde.workspace = true thiserror.workspace = true tokio = { workspace = true, features = ["full"] } tokio-stream.workspace = true tracing.workspace = true lazy_static.workspace = true rocksdb = { version = "0.20.1", optional = true } serde_derive = "1.0.145" once_cell = "1.17" arc-swap = "1.6.0" [dev-dependencies] rand = { workspace = true, features = ["default"] } rstest = { workspace = true, features = ["async-timeout"] } uuid = { workspace = true, features = ["v4", "serde"] } tracing-subscriber = { workspace = true, features = ["env-filter", "fmt"] } tracing.workspace = true test-log.workspace = true env_logger.workspace = true topos-test-sdk = { path = "../topos-test-sdk/" } [features] default = ["rocksdb", "inmemory"] inmemory = [] rocksdb = ["dep:rocksdb"] ================================================ FILE: crates/topos-tce-storage/README.md ================================================ # topos-tce-storage The library provides the storage layer for the Topos TCE. It is responsible for storing and retrieving the [certificates](https://docs.topos.technology/content/module-1/4-protocol.html#certificates), managing the pending certificates pool and the certificate status, storing different metadata related to the protocol and the internal state of the TCE. The storage layer is implemented using RocksDB. The library exposes multiple stores that are used by the TCE. ### Architecture The storage layer is composed of multiple stores that are used by the TCE. Each store is described in detail in its own module. Those stores are mainly used in `topos-tce-broadcast`, `topos-tce-api` and `topos-tce-synchronizer`. As an overview, the storage layer is composed of the following stores: Text changing depending on mode. Light: 'So light!' Dark: 'So dark!' #### Definitions and Responsibilities As illustrated above, multiple `stores` are exposed in the library using various `tables`. The difference between a `store` and a `table` is that the `table` is responsible for storing the data while the `store` manages the data access and its behavior. Here's the list of the different stores and their responsibilities: - The [`EpochValidatorsStore`](struct@epoch::EpochValidatorsStore) is responsible for managing the list of validators for each `epoch`. - The [`FullNodeStore`](struct@fullnode::FullNodeStore) is responsible for managing all persistent data such as [`Certificate`](struct@topos_core::uci::Certificate) delivered and associated `streams`. - The [`IndexStore`](struct@index::IndexStore) is responsible for managing indexes and collect information about the broadcast and the network. - The [`ValidatorStore`](struct@validator::ValidatorStore) is responsible for managing the pending data that one validator needs to keep track, such as the certificates pool. For more information about a `store`, see the related doc. Next, we've the list of the different tables and their responsibilities: - The [`EpochValidatorsTables`](struct@epoch::EpochValidatorsTables) is responsible for storing the list of validators for each `epoch`. - The [`ValidatorPerpetualTables`](struct@validator::ValidatorPerpetualTables) is responsible for storing the delivered [`Certificate`](struct@topos_core::uci::Certificate)s and the persistent data related to the Broadcast. - The [`ValidatorPendingTables`](struct@validator::ValidatorPendingTables) is responsible for storing the pending data, such as the certificates pool. - The [`IndexTables`](struct@index::IndexTables) is responsible for storing indexes about the delivery of [`Certificate`](struct@topos_core::uci::Certificate)s such as `target subnet stream`. ### Special Considerations When using the storage layer, be aware of the following: - The storage layer uses [rocksdb](https://rocksdb.org/) as the backend, which means don't need an external service, as `rocksdb` is an embedded key-value store. - The storage layer uses [`Arc`](struct@std::sync::Arc) to share the stores between threads. It also means that a `store` is only instantiated once. - Some storage methods are batching multiple writes into a single transaction. ### Design Philosophy The choice of using [rocksdb](https://rocksdb.org/) as a backend was made because it matches a lot of the conditions that we were expected, such as being embedded and having good performances when reading and writing our data. Splitting storage into multiple `stores` and `tables` allows us to have a strong separation of concerns directly at the storage level. However, `RocksDB` is not the best fit when it comes to compose or filter data based on the data itself. As mentioned above, the different stores are using [`Arc`](struct@std::sync::Arc), allowing a single store to be instantiated once and then shared between threads. This is very useful when it comes to the [`FullNodeStore`](struct@fullnode::FullNodeStore) as it is used in various places but should provide single entry point to the data. It also means that the store is immutable thus can be shared easily between threads, which is a good thing for the concurrency. However, some stores are implementing the [`WriteStore`](trait@store::WriteStore) trait in order to insert or mutate data, managing locks on resources and preventing any other query to mutate the data currently in processing. For more information about the locks see [`locking`](module@fullnode::locking) The rest of the mutation on the data are handled by [rocksdb](https://rocksdb.org/) itself. ================================================ FILE: crates/topos-tce-storage/src/client.rs ================================================ use std::sync::Arc; use topos_core::types::stream::CertificateTargetStreamPosition; use topos_core::types::CertificateDelivered; use topos_core::uci::{Certificate, SubnetId}; use crate::store::ReadStore; use crate::validator::ValidatorStore; use crate::{ errors::StorageError, FetchCertificatesFilter, FetchCertificatesPosition, PendingCertificateId, }; #[derive(Clone)] pub struct StorageClient { store: Arc, } impl StorageClient { /// Create a new StorageClient pub fn new(store: Arc) -> Self { Self { store } } /// Return the list of all source subnets that targeted the given target subnet pub async fn get_target_source_subnet_list( &self, target_subnet_id: SubnetId, ) -> Result, StorageError> { self.store.get_target_source_subnet_list(&target_subnet_id) } /// Fetch all pending certificates /// /// Return list of pending certificates pub async fn get_pending_certificates( &self, ) -> Result, StorageError> { Ok(self.store.iter_pending_pool()?.collect()) } pub async fn fetch_certificates( &self, filter: FetchCertificatesFilter, ) -> Result, StorageError> { match filter { FetchCertificatesFilter::Source { .. } => unimplemented!(), FetchCertificatesFilter::Target { target_stream_position, limit, } => self .store .get_target_stream_certificates_from_position( CertificateTargetStreamPosition::new( target_stream_position.target_subnet_id, target_stream_position.source_subnet_id, target_stream_position.position, ), limit, ) .map(|values| { values .into_iter() .map(|(certificate, position)| { (certificate, FetchCertificatesPosition::Target(position)) }) .collect() }), } } /// Fetch source head certificate for subnet /// /// Return position of the certificate and certificate itself pub async fn get_source_head( &self, subnet_id: SubnetId, ) -> Result, StorageError> { Ok(self.store.get_source_head(&subnet_id)?.and_then(|head| { self.store .get_certificate(&head.certificate_id) .ok()? .map(|certificate| (*head.position, certificate.certificate)) })) } } ================================================ FILE: crates/topos-tce-storage/src/constant.rs ================================================ pub(crate) mod cfs { pub(crate) const CERTIFICATES: &str = "certificates"; pub(crate) const STREAMS: &str = "streams"; pub(crate) const EPOCH_CHAIN: &str = "epoch_chain"; pub(crate) const UNVERIFIED: &str = "unverified"; pub(crate) const PENDING_POOL: &str = "pending_pool"; pub(crate) const PENDING_POOL_INDEX: &str = "pending_pool_index"; pub(crate) const PRECEDENCE_POOL: &str = "precedence_pool"; pub(crate) const TARGET_STREAMS: &str = "target_streams"; pub(crate) const TARGET_SOURCE_LIST: &str = "target_source_list"; pub(crate) const SOURCE_LIST: &str = "source_list"; pub(crate) const DELIVERED_CERTIFICATES_PER_SOURCE_FOR_TARGET: &str = "delivered_certificates_per_source_for_target"; pub(crate) const VALIDATORS: &str = "validators"; pub(crate) const EPOCH_SUMMARY: &str = "epoch_summary"; pub(crate) const BROADCAST_STATES: &str = "broadcast_states"; } ================================================ FILE: crates/topos-tce-storage/src/epoch/mod.rs ================================================ use std::path::Path; use std::sync::Arc; use std::{collections::HashMap, sync::RwLock}; use arc_swap::ArcSwap; use crate::errors::StorageError; use crate::types::{EpochId, Validators}; pub use self::tables::EpochValidatorsTables; pub use self::tables::ValidatorPerEpochTables; mod tables; /// Epoch contextualized data - can be purged at some point pub struct ValidatorPerEpochStore { #[allow(unused)] epoch_id: EpochId, #[allow(unused)] validators: RwLock, #[allow(unused)] tables: ValidatorPerEpochTables, } impl ValidatorPerEpochStore { pub fn new(epoch_id: EpochId, path: &Path) -> Result, StorageError> { let tables: ValidatorPerEpochTables = ValidatorPerEpochTables::open(epoch_id, path); let store = ArcSwap::from(Arc::new(Self { epoch_id, validators: RwLock::new(Vec::new()), tables, })); Ok(store) } } pub struct EpochValidatorsStore { #[allow(unused)] tables: EpochValidatorsTables, #[allow(unused)] caches: RwLock>, } impl EpochValidatorsStore { pub fn new(path: &Path) -> Result, StorageError> { let tables = EpochValidatorsTables::open(path); let store = Arc::new(Self { tables, caches: RwLock::new(HashMap::new()), }); Ok(store) } } ================================================ FILE: crates/topos-tce-storage/src/epoch/tables.rs ================================================ use std::{fs::create_dir_all, path::Path}; use rocksdb::ColumnFamilyDescriptor; use topos_core::uci::CertificateId; use tracing::warn; use crate::{ constant::cfs, rocks::{ db::{default_options, init_db, init_with_cfs}, db_column::DBColumn, }, types::{BroadcastState, EpochId, Validators, VerifiedCheckpointSummary}, }; pub struct EpochValidatorsTables { #[allow(unused)] validators_map: DBColumn, } impl EpochValidatorsTables { pub(crate) fn open(path: &Path) -> Self { let path = path.join("validators"); let mut options = rocksdb::Options::default(); options.create_if_missing(true); let db = init_db(&path, options).unwrap_or_else(|_| panic!("Cannot open DB at {:?}", path)); Self { validators_map: DBColumn::reopen(&db, cfs::VALIDATORS), } } } /// Epoch contextualized data - can be purged at some point pub struct ValidatorPerEpochTables { #[allow(unused)] epoch_summary: DBColumn, #[allow(unused)] broadcast_states: DBColumn, #[allow(unused)] validators: Vec, } impl ValidatorPerEpochTables { pub(crate) fn open(epoch_id: EpochId, path: &Path) -> Self { let path = path.join("epochs").join(epoch_id.to_string()); if !path.exists() { warn!("Path {:?} does not exist, creating it", path); create_dir_all(&path).expect("Cannot create ValidatorPerEpochTables directory"); } let cfs = vec![ ColumnFamilyDescriptor::new(cfs::EPOCH_SUMMARY, default_options()), ColumnFamilyDescriptor::new(cfs::BROADCAST_STATES, default_options()), ]; let db = init_with_cfs(&path, default_options(), cfs) .unwrap_or_else(|_| panic!("Cannot open DB at {:?}", path)); Self { epoch_summary: DBColumn::reopen(&db, cfs::EPOCH_SUMMARY), broadcast_states: DBColumn::reopen(&db, cfs::BROADCAST_STATES), validators: Vec::new(), } } } #[allow(unused)] enum EpochSummaryKey { EpochId, StartCheckpoint, EndCheckpoint, } #[allow(unused)] enum EpochSummaryValue { EpochId(EpochId), StartCheckpoint(VerifiedCheckpointSummary), EndCheckpoint(VerifiedCheckpointSummary), } ================================================ FILE: crates/topos-tce-storage/src/errors.rs ================================================ use thiserror::Error; use tokio::sync::{mpsc, oneshot}; use topos_core::{ types::stream::PositionError, uci::{CertificateId, SubnetId, SUBNET_ID_LENGTH}, }; #[derive(Error, Debug)] pub enum InternalStorageError { #[error("The certificate already exists")] CertificateAlreadyExists, #[error("The certificate is already in pending")] CertificateAlreadyPending, #[error("Unable to find a certificate: {0:?}")] CertificateNotFound(CertificateId), #[error("Unable to start storage")] UnableToStartStorage, #[cfg(feature = "rocksdb")] #[error("Unable to execute query: {0}")] RocksDBError(#[from] rocksdb::Error), #[cfg(feature = "rocksdb")] #[error("Accessing invalid column family: {0}")] InvalidColumnFamily(&'static str), #[error("Unable to deserialize database value")] UnableToDeserializeValue, #[error("Invalid query argument: {0}")] InvalidQueryArgument(&'static str), #[error("Unexpected DB state: {0}")] UnexpectedDBState(&'static str), #[error(transparent)] Bincode(#[from] Box), #[error("A concurrent DBBatch has been detected")] ConcurrentDBBatchDetected, #[error("{0}: {1:?}")] PositionError(#[source] PositionError, [u8; SUBNET_ID_LENGTH]), #[error("InvalidSubnetId")] InvalidSubnetId, #[error("Missing head certificate for source subnet id {0}")] MissingHeadForSubnet(SubnetId), #[error("Certificate already exists at position {0} for subnet {1}")] CertificateAlreadyExistsAtPosition(u64, SubnetId), } #[derive(Debug, Error)] pub enum StorageError { #[error(transparent)] InternalStorage(#[from] InternalStorageError), #[error("Unable to communicate with storage: closed")] CommunicationChannelClosed, #[error("Unable to receive expected response from storage: {0}")] ResponseChannel(#[from] oneshot::error::RecvError), #[error("Unable to execute shutdown on the storage service: {0}")] ShutdownCommunication(mpsc::error::SendError>), } ================================================ FILE: crates/topos-tce-storage/src/fullnode/locking.rs ================================================ use std::{ collections::{hash_map::RandomState, HashMap}, hash::{BuildHasher, Hash}, sync::Arc, }; use tokio::sync::{Mutex, RwLock}; const LOCK_SHARDING: usize = 2048; type LocksVec = Vec>>>>; pub(crate) struct LockGuards { locks: Arc>, random_state: RandomState, } impl LockGuards { pub fn new() -> Self { Self { random_state: RandomState::new(), locks: Arc::new( (0..LOCK_SHARDING) .map(|_| RwLock::new(HashMap::new())) .collect(), ), } } pub async fn get_lock(&self, key: T) -> Arc> { let hash = self.random_state.hash_one(&key) as usize; let lock_shard = hash % self.locks.len(); let lock = { let read = self.locks[lock_shard].read().await; read.get(&key).cloned() }; if let Some(lock) = lock { lock } else { let lock = { let mut write = self.locks[lock_shard].write().await; write .entry(key) .or_insert_with(|| Arc::new(Mutex::new(()))) .clone() }; lock } } } ================================================ FILE: crates/topos-tce-storage/src/fullnode/mod.rs ================================================ use std::{collections::HashMap, path::Path, sync::Arc}; use arc_swap::ArcSwap; use async_trait::async_trait; use rocksdb::properties::ESTIMATE_NUM_KEYS; use tokio::sync::OwnedMutexGuard; use topos_core::{ types::{ stream::{CertificateSourceStreamPosition, CertificateTargetStreamPosition, Position}, CertificateDelivered, }, uci::{CertificateId, SubnetId}, }; use tracing::{error, info}; use crate::{ epoch::{EpochValidatorsStore, ValidatorPerEpochStore}, errors::{InternalStorageError, StorageError}, index::IndexTables, rocks::{map::Map, TargetSourceListKey}, store::{ReadStore, WriteStore}, validator::ValidatorPerpetualTables, CertificatePositions, SourceHead, }; use self::locking::LockGuards; pub mod locking; /// Store to manage FullNode data /// /// The [`FullNodeStore`] is responsible for storing and exposing the data that is /// needed by a full node to perform its duties. /// /// The responsabilities of the [`FullNodeStore`] are: /// /// - Store and expose the certificates that are delivered /// - Store and expose the state of the certificate streams /// /// To do so, it implements [`ReadStore`] / [`WriteStore`] by using multiple tables and store such /// as [`ValidatorPerpetualTables`], [`EpochValidatorsStore`] and [`IndexTables`] pub struct FullNodeStore { certificate_lock_guards: LockGuards, subnet_lock_guards: LockGuards, #[allow(unused)] epoch_store: ArcSwap, #[allow(unused)] validators_store: Arc, pub(crate) perpetual_tables: Arc, pub(crate) index_tables: Arc, } impl FullNodeStore { /// Try to create a new instance of [`FullNodeStore`] based on the given path pub fn new(path: &Path) -> Result, StorageError> { let perpetual_tables = Arc::new(ValidatorPerpetualTables::open(path)); let index_tables = Arc::new(IndexTables::open(path)); let validators_store = EpochValidatorsStore::new(path)?; let epoch_store = ValidatorPerEpochStore::new(0, path)?; FullNodeStore::open( epoch_store, validators_store, perpetual_tables, index_tables, ) } pub fn open( epoch_store: ArcSwap, validators_store: Arc, perpetual_tables: Arc, index_tables: Arc, ) -> Result, StorageError> { Ok(Arc::new(Self { certificate_lock_guards: LockGuards::new(), subnet_lock_guards: LockGuards::new(), epoch_store, validators_store, perpetual_tables, index_tables, })) } /// Await for a [`LockGuards`] for the given certificate id pub(crate) async fn certificate_lock_guard( &self, certificate_id: CertificateId, ) -> OwnedMutexGuard<()> { self.certificate_lock_guards .get_lock(certificate_id) .await .lock_owned() .await } /// Await for a [`LockGuards`] for the given subnet id pub(crate) async fn subnet_lock_guard(&self, subnet_id: SubnetId) -> OwnedMutexGuard<()> { self.subnet_lock_guards .get_lock(subnet_id) .await .lock_owned() .await } } #[async_trait] impl WriteStore for FullNodeStore { async fn insert_certificate_delivered( &self, certificate: &CertificateDelivered, ) -> Result { // Lock resources for concurrency issues let _cert_guard = self .certificate_lock_guard(certificate.certificate.id) .await; let _subnet_guard = self .subnet_lock_guard(certificate.certificate.source_subnet_id) .await; let subnet_id = certificate.certificate.source_subnet_id; let certificate_id = certificate.certificate.id; let expected_position = certificate.proof_of_delivery.delivery_position.clone(); let mut batch = self.perpetual_tables.certificates.batch(); let mut index_batch = self.index_tables.target_streams.batch(); // Check position already taken if let Some(delivered_at_position) = self.perpetual_tables.streams.get(&expected_position)? { if delivered_at_position != certificate_id { error!( "Expected position {} already taken by {}", expected_position, delivered_at_position ); return Err(StorageError::InternalStorage( InternalStorageError::CertificateAlreadyExistsAtPosition( *expected_position.position, expected_position.subnet_id, ), )); } else { return Err(StorageError::InternalStorage( InternalStorageError::CertificateAlreadyExists, )); } } let update_stream_position = self .index_tables .source_list .get(&subnet_id)? .and_then(|(_certificate, pos)| { if expected_position.position > pos { Some((certificate_id, expected_position.position)) } else { None } }) .or(Some((certificate_id, expected_position.position))); batch = batch.insert_batch( &self.perpetual_tables.certificates, [(&certificate_id, certificate)], )?; // Adding the certificate to the stream batch = batch.insert_batch( &self.perpetual_tables.streams, [(&expected_position, certificate_id)], )?; index_batch = if let Some(current_source_position) = update_stream_position { index_batch.insert_batch( &self.index_tables.source_list, [(&subnet_id, ¤t_source_position)], )? } else { index_batch }; // Return list of new target stream positions of certificate that will be persisted // Information is needed by sequencer/subnet contract to know from // where to continue with streaming on restart let mut target_subnet_stream_positions: HashMap = HashMap::new(); // Adding certificate to target_streams // TODO: Add expected position instead of calculating on the go let mut targets = Vec::new(); let source_list_per_target: Vec<_> = certificate .certificate .target_subnets .iter() .map(|target_subnet| ((*target_subnet, subnet_id), true)) .collect(); for target_subnet_id in &certificate.certificate.target_subnets { let target = match self .index_tables .target_streams .prefix_iter(&TargetSourceListKey(*target_subnet_id, subnet_id))? .last() { None => CertificateTargetStreamPosition::new( *target_subnet_id, subnet_id, Position::ZERO, ), Some((mut target_stream_position, _)) => { target_stream_position.position = target_stream_position .position .increment() .map_err(|error| { InternalStorageError::PositionError(error, subnet_id.into()) })?; target_stream_position } }; target_subnet_stream_positions.insert(*target_subnet_id, target); index_batch = index_batch.insert_batch( &self.index_tables.target_source_list, [( TargetSourceListKey(*target_subnet_id, subnet_id), target.position, )], )?; targets.push((target, certificate_id)); } index_batch = index_batch.insert_batch(&self.index_tables.target_streams, targets)?; index_batch = index_batch.insert_batch( &self.index_tables.source_list_per_target, source_list_per_target, )?; batch.write()?; index_batch.write()?; info!( "Certificate {} inserted at position {}", certificate.certificate.id, expected_position ); Ok(CertificatePositions { targets: target_subnet_stream_positions, source: expected_position, }) } async fn insert_certificates_delivered( &self, certificates: &[CertificateDelivered], ) -> Result<(), StorageError> { for certificate in certificates { _ = self.insert_certificate_delivered(certificate).await?; } Ok(()) } } impl ReadStore for FullNodeStore { fn count_certificates_delivered(&self) -> Result { Ok(self .perpetual_tables .certificates .property_int_value(ESTIMATE_NUM_KEYS)?) } fn get_source_head(&self, subnet_id: &SubnetId) -> Result, StorageError> { Ok(self .index_tables .source_list .get(subnet_id)? .map(|(certificate_id, position)| SourceHead { certificate_id, subnet_id: *subnet_id, position, })) } fn get_certificate( &self, certificate_id: &CertificateId, ) -> Result, StorageError> { Ok(self.perpetual_tables.certificates.get(certificate_id)?) } fn get_certificates( &self, certificate_ids: &[CertificateId], ) -> Result>, StorageError> { Ok(self .perpetual_tables .certificates .multi_get(certificate_ids)?) } fn last_delivered_position_for_subnet( &self, subnet_id: &SubnetId, ) -> Result, StorageError> { Ok(self .perpetual_tables .streams .prefix_iter(subnet_id)? .last() .map(|(k, _)| k)) } fn get_checkpoint(&self) -> Result, StorageError> { Ok(self .index_tables .source_list .iter()? .map(|(subnet_id, (certificate_id, position))| { ( subnet_id, SourceHead { certificate_id, subnet_id, position, }, ) }) .collect()) } fn get_source_stream_certificates_from_position( &self, from: CertificateSourceStreamPosition, limit: usize, ) -> Result, StorageError> { let starting_position = from.position; let x: Vec<(CertificateId, CertificateSourceStreamPosition)> = self .perpetual_tables .streams .prefix_iter(&from.subnet_id)? .skip(starting_position.try_into().map_err(|_| { StorageError::InternalStorage(InternalStorageError::InvalidQueryArgument( "Unable to parse Position", )) })?) .take(limit) .map(|(k, v)| (v, k)) .collect(); let certificate_ids: Vec<_> = x.iter().map(|(k, _)| k).cloned().collect(); let certificates = self .perpetual_tables .certificates .multi_get(&certificate_ids[..])?; Ok(x.into_iter() .zip(certificates) .filter_map(|((certificate_id, position), certificate)| { certificate .filter(|c| c.certificate.id == certificate_id) .map(|cert| (cert, position)) }) .collect()) } fn get_target_stream_certificates_from_position( &self, position: CertificateTargetStreamPosition, limit: usize, ) -> Result, StorageError> { let starting_position = position.position; let prefix = TargetSourceListKey(position.target_subnet_id, position.source_subnet_id); let certs_with_positions: Vec<(CertificateId, CertificateTargetStreamPosition)> = self .index_tables .target_streams .prefix_iter(&prefix)? .skip(starting_position.try_into().map_err(|_| { StorageError::InternalStorage(InternalStorageError::InvalidQueryArgument( "Unable to parse Position", )) })?) .take(limit) .map(|(k, v)| (v, k)) .collect(); let certificate_ids: Vec<_> = certs_with_positions .iter() .map(|(k, _)| k) .cloned() .collect(); let certificates = self .perpetual_tables .certificates .multi_get(&certificate_ids[..])?; Ok(certs_with_positions .into_iter() .zip(certificates) .filter_map(|((certificate_id, position), certificate)| { certificate .filter(|c| c.certificate.id == certificate_id) .map(|cert| (cert, position)) }) .collect()) } fn get_target_source_subnet_list( &self, target_subnet_id: &SubnetId, ) -> Result, StorageError> { Ok(self .index_tables .source_list_per_target .prefix_iter(target_subnet_id)? .map(|((_, source_subnet_id), _)| source_subnet_id) .collect()) } } ================================================ FILE: crates/topos-tce-storage/src/index/mod.rs ================================================ use std::{fs::create_dir_all, path::Path}; use rocksdb::ColumnFamilyDescriptor; use topos_core::{ types::stream::Position, uci::{CertificateId, SubnetId}, }; use tracing::warn; use crate::{ constant::cfs, rocks::{ constants, db::{default_options, init_with_cfs}, db_column::DBColumn, }, types::{TargetSourceListColumn, TargetStreamsColumn}, }; pub struct IndexStore {} pub struct IndexTables { pub(crate) target_streams: TargetStreamsColumn, pub(crate) target_source_list: TargetSourceListColumn, pub(crate) source_list: DBColumn, pub(crate) source_list_per_target: DBColumn<(SubnetId, SubnetId), bool>, } impl IndexTables { pub fn open(path: &Path) -> Self { let path = path.join("index"); if !path.exists() { warn!("Path {:?} does not exist, creating it", path); create_dir_all(&path).expect("Cannot create IndexTables directory"); } let mut options_stream = default_options(); options_stream.set_prefix_extractor(rocksdb::SliceTransform::create_fixed_prefix( constants::TARGET_STREAMS_PREFIX_SIZE, )); let cfs = vec![ ColumnFamilyDescriptor::new(cfs::TARGET_STREAMS, options_stream), ColumnFamilyDescriptor::new(cfs::TARGET_SOURCE_LIST, default_options()), ColumnFamilyDescriptor::new(cfs::SOURCE_LIST, default_options()), ColumnFamilyDescriptor::new( cfs::DELIVERED_CERTIFICATES_PER_SOURCE_FOR_TARGET, default_options(), ), ]; let db = init_with_cfs(&path, default_options(), cfs) .unwrap_or_else(|_| panic!("Cannot open DB at {:?}", path)); Self { target_streams: DBColumn::reopen(&db, cfs::TARGET_STREAMS), target_source_list: DBColumn::reopen(&db, cfs::TARGET_SOURCE_LIST), source_list: DBColumn::reopen(&db, cfs::SOURCE_LIST), source_list_per_target: DBColumn::reopen( &db, cfs::DELIVERED_CERTIFICATES_PER_SOURCE_FOR_TARGET, ), } } } ================================================ FILE: crates/topos-tce-storage/src/lib.rs ================================================ //! The library provides the storage layer for the Topos TCE. //! It is responsible for storing and retrieving the [certificates](https://docs.topos.technology/content/module-1/4-protocol.html#certificates), managing the //! pending certificates pool and the certificate status, storing different //! metadata related to the protocol and the internal state of the TCE. //! //! The storage layer is implemented using RocksDB. //! The library exposes multiple stores that are used by the TCE. //! //! //! ## Architecture //! //! The storage layer is composed of multiple stores that are used by the TCE. //! Each store is described in detail in its own module. //! //! Those stores are mainly used in `topos-tce-broadcast`, `topos-tce-api` and //! `topos-tce-synchronizer`. //! //! As an overview, the storage layer is composed of the following stores: //! //! //! //! Text changing depending on mode. Light: 'So light!' Dark: 'So dark!' //! //! //! ### Definitions and Responsibilities //! //! As illustrated above, multiple `stores` are exposed in the library using various `tables`. //! //! The difference between a `store` and a `table` is that the `table` is responsible for storing //! the data while the `store` manages the data access and its behavior. //! //! Here's the list of the different stores and their responsibilities: //! //! - The [`EpochValidatorsStore`](struct@epoch::EpochValidatorsStore) is responsible for managing the list of validators for each `epoch`. //! - The [`FullNodeStore`](struct@fullnode::FullNodeStore) is responsible for managing all persistent data such as [`Certificate`](struct@topos_core::uci::Certificate) delivered and associated `streams`. //! - The [`IndexStore`](struct@index::IndexStore) is responsible for managing indexes and collect information about the broadcast and the network. //! - The [`ValidatorStore`](struct@validator::ValidatorStore) is responsible for managing the pending data that one validator needs to keep track, such as the certificates pool. //! //! For more information about a `store`, see the related doc. //! //! Next, we've the list of the different tables and their responsibilities: //! //! - The [`EpochValidatorsTables`](struct@epoch::EpochValidatorsTables) is responsible for storing the list of validators for each `epoch`. //! - The [`ValidatorPerpetualTables`](struct@validator::ValidatorPerpetualTables) is responsible for storing the delivered [`Certificate`](struct@topos_core::uci::Certificate)s and the persistent data related to the Broadcast. //! - The [`ValidatorPendingTables`](struct@validator::ValidatorPendingTables) is responsible for storing the pending data, such as the certificates pool. //! - The [`IndexTables`](struct@index::IndexTables) is responsible for storing indexes about the delivery of [`Certificate`](struct@topos_core::uci::Certificate)s such as `target subnet stream`. //! //! ## Special Considerations //! //! When using the storage layer, be aware of the following: //! - The storage layer uses [rocksdb](https://rocksdb.org/) as the backend, which means don't need an external service, as `rocksdb` is an embedded key-value store. //! - The storage layer uses [`Arc`](struct@std::sync::Arc) to share the stores between threads. It also means that a `store` is only instantiated once. //! - Some storage methods are batching multiple writes into a single transaction. //! //! ## Design Philosophy //! //! The choice of using [rocksdb](https://rocksdb.org/) as a backend was made because it matches a lot of the conditions //! that we were expected, such as being embedded and having good performances when reading and //! writing our data. //! //! Splitting storage into multiple `stores` and `tables` allows us to have a strong separation of concerns directly at the storage level. //! //! However, `RocksDB` is not the best fit when it comes to compose or filter data based on the data //! itself. //! //! As mentioned above, the different stores are using [`Arc`](struct@std::sync::Arc), allowing a single store to be instantiated once //! and then shared between threads. This is very useful when it comes to the [`FullNodeStore`](struct@fullnode::FullNodeStore) as it is used //! in various places but should provide single entry point to the data. //! //! It also means that the store is immutable thus can be shared easily between threads, //! which is a good thing for the concurrency. //! However, some stores are implementing the [`WriteStore`](trait@store::WriteStore) trait in order to //! insert or mutate data, managing locks on resources and preventing any other query to mutate the data //! currently in processing. For more information about the locks see [`locking`](module@fullnode::locking) //! //! The rest of the mutation on the data are handled by [rocksdb](https://rocksdb.org/) itself. //! use serde::{Deserialize, Serialize}; use std::collections::HashMap; use topos_core::{ types::stream::{CertificateSourceStreamPosition, CertificateTargetStreamPosition, Position}, uci::{CertificateId, SubnetId}, }; // v2 pub mod constant; /// Epoch related store pub mod epoch; /// Fullnode store pub mod fullnode; pub mod index; pub mod types; pub mod validator; // v1 pub mod client; pub mod errors; #[cfg(feature = "rocksdb")] pub(crate) mod rocks; #[cfg(test)] mod tests; pub use client::StorageClient; pub mod store; pub type PendingCertificateId = u64; #[derive(Debug)] pub enum FetchCertificatesFilter { Source { source_stream_position: CertificateSourceStreamPosition, limit: usize, }, Target { target_stream_position: CertificateTargetStreamPosition, limit: usize, }, } #[derive(Debug)] pub enum FetchCertificatesPosition { Source(CertificateSourceStreamPosition), Target(CertificateTargetStreamPosition), } #[derive(Debug, Clone)] pub struct CertificatePositions { pub targets: HashMap, pub source: CertificateSourceStreamPosition, } /// Uniquely identify the source certificate stream head of one subnet. /// The head represent the internal state of the TCE regarding a source subnet stream for /// certificates that it receives from local sequencer #[derive(Serialize, Deserialize, Debug, Clone)] pub struct SourceHead { /// Certificate id of the head pub certificate_id: CertificateId, /// Subnet id of the head pub subnet_id: SubnetId, /// Position of the Certificate pub position: Position, } ================================================ FILE: crates/topos-tce-storage/src/rocks/constants.rs ================================================ //! This module is defining constant names for CFs pub(crate) const PENDING_CERTIFICATES: &str = "PENDING_CERTIFICATES"; pub(crate) const CERTIFICATES: &str = "CERTIFICATES"; pub(crate) const SOURCE_STREAMS: &str = "SOURCE_STREAMS"; pub(crate) const TARGET_STREAMS: &str = "TARGET_STREAMS"; pub(crate) const TARGET_SOURCES: &str = "TARGET_SOURCES"; pub(crate) const TARGET_STREAMS_PREFIX_SIZE: usize = 32 * 2; pub(crate) const SOURCE_STREAMS_PREFIX_SIZE: usize = 32; ================================================ FILE: crates/topos-tce-storage/src/rocks/db.rs ================================================ use rocksdb::MultiThreaded; use std::{path::PathBuf, sync::Arc}; use rocksdb::{ColumnFamilyDescriptor, Options}; use crate::errors::InternalStorageError; use super::constants; pub(crate) type RocksDB = Arc>; pub(crate) fn init_with_cfs( path: &PathBuf, mut options: rocksdb::Options, cfs: Vec, ) -> Result { options.create_missing_column_families(true); Ok(Arc::new( rocksdb::DBWithThreadMode::::open_cf_descriptors(&options, path, cfs)?, )) } pub(crate) fn default_options() -> rocksdb::Options { let mut options = Options::default(); options.create_if_missing(true); options } pub(crate) fn init_db( path: &PathBuf, options: rocksdb::Options, ) -> Result { let mut options_source = default_options(); options_source.set_prefix_extractor(rocksdb::SliceTransform::create_fixed_prefix( constants::SOURCE_STREAMS_PREFIX_SIZE, )); let mut options_target = Options::default(); options_target.create_if_missing(true); options_target.set_prefix_extractor(rocksdb::SliceTransform::create_fixed_prefix( constants::TARGET_STREAMS_PREFIX_SIZE, )); let cfs = vec![ ColumnFamilyDescriptor::new(constants::PENDING_CERTIFICATES, default_options()), ColumnFamilyDescriptor::new(constants::CERTIFICATES, rocksdb::Options::default()), ColumnFamilyDescriptor::new(constants::SOURCE_STREAMS, options_source), ColumnFamilyDescriptor::new(constants::TARGET_STREAMS, options_target), ColumnFamilyDescriptor::new(constants::TARGET_SOURCES, default_options()), ]; init_with_cfs(path, options, cfs) } ================================================ FILE: crates/topos-tce-storage/src/rocks/db_column.rs ================================================ use std::{borrow::Borrow, marker::PhantomData, sync::Arc}; #[cfg(test)] use std::path::Path; #[cfg(test)] use rocksdb::ColumnFamilyDescriptor; use rocksdb::{ BoundColumnFamily, CStrLike, DBRawIteratorWithThreadMode, DBWithThreadMode, Direction, IteratorMode, MultiThreaded, ReadOptions, WriteBatch, }; use bincode::Options; use serde::{de::DeserializeOwned, Serialize}; use crate::errors::InternalStorageError; use super::{iterator::ColumnIterator, map::Map, RocksDB}; /// A DBColumn represents a CF structure #[derive(Clone, Debug)] pub struct DBColumn { pub(crate) rocksdb: RocksDB, _phantom: PhantomData V>, cf: &'static str, } impl DBColumn { #[cfg(test)] #[allow(dead_code)] pub fn open>( path: P, db_options: Option, column: &'static str, ) -> Result { let mut options = db_options.unwrap_or_default(); let default_rocksdb_options = rocksdb::Options::default(); let primary = path.as_ref().to_path_buf(); let rocksdb = { options.create_if_missing(true); options.create_missing_column_families(true); Arc::new( rocksdb::DBWithThreadMode::::open_cf_descriptors( &options, primary, vec![ColumnFamilyDescriptor::new(column, default_rocksdb_options)], )?, ) }; Ok(Self { rocksdb, _phantom: PhantomData, cf: column, }) } pub fn reopen(db: &RocksDB, column: &'static str) -> Self { Self { rocksdb: db.clone(), _phantom: PhantomData, cf: column, } } /// Returns the CF of the DBColumn, used to build queries. pub(crate) fn cf(&self) -> Result>, InternalStorageError> { self.rocksdb .cf_handle(self.cf) .ok_or(InternalStorageError::InvalidColumnFamily(self.cf)) } } impl DBColumn where K: DeserializeOwned + Serialize + std::fmt::Debug, V: DeserializeOwned + Serialize + std::fmt::Debug, { pub(crate) fn property_int_value( &self, property: impl CStrLike, ) -> Result { self.rocksdb .property_int_value_cf(&self.cf()?, property)? .ok_or(InternalStorageError::UnexpectedDBState( "Property not found", )) } /// Insert a record into the storage by passing a Key and a Value. /// /// Key are fixed length bincode serialized. pub(crate) fn insert(&self, key: &K, value: &V) -> Result<(), InternalStorageError> { let cf = self.cf()?; let key_buf = be_fix_int_ser(key)?; let value_buf = bincode::serialize(value)?; self.rocksdb.put_cf(&cf, key_buf, value_buf)?; Ok(()) } /// Delete a record from the storage by passing a Key /// /// Key are fixed length bincode serialized. pub(crate) fn delete(&self, key: &K) -> Result<(), InternalStorageError> { let key_buf = be_fix_int_ser(key)?; self.rocksdb.delete_cf(&self.cf()?, key_buf)?; Ok(()) } /// Get a record from the storage by passing a Key /// /// Key are fixed length bincode serialized. pub(crate) fn get(&self, key: &K) -> Result, InternalStorageError> { let key_buf = be_fix_int_ser(key)?; self.rocksdb .get_pinned_cf(&self.cf()?, key_buf)? .map_or(Ok(None), |v| { bincode::deserialize::(&v) .map(|r| Some(r)) .map_err(|_| InternalStorageError::UnableToDeserializeValue) }) } pub(crate) fn multi_insert( &self, key_value_pairs: impl IntoIterator, ) -> Result<(), InternalStorageError> { let batch = self.batch(); batch.insert_batch(self, key_value_pairs)?.write() } pub(crate) fn multi_get(&self, keys: &[K]) -> Result>, InternalStorageError> { let keys: Result, InternalStorageError> = keys.iter().map(|k| be_fix_int_ser(k)).collect(); let results: Result, InternalStorageError> = self .rocksdb .batched_multi_get_cf_opt(&self.cf()?, keys?, false, &ReadOptions::default()) .into_iter() .map(|r| r.map_err(InternalStorageError::RocksDBError)) .collect(); results? .into_iter() .map(|e| match e { Some(v) => bincode::deserialize(&v) .map_err(InternalStorageError::Bincode) .map(|v| Some(v)), None => Ok(None), }) .collect() } #[allow(unused)] pub(crate) fn merge(&self, key: &K, value: V) -> Result<(), InternalStorageError> { let key_buf = be_fix_int_ser(key)?; let value_buf = bincode::serialize(&value)?; Ok(self.rocksdb.merge_cf(&self.cf()?, key_buf, value_buf)?) } pub(crate) fn batch(&self) -> DBBatch { DBBatch::new(&self.rocksdb) } } pub(crate) struct DBBatch { rocksdb: Arc>, batch: WriteBatch, } impl DBBatch { fn new(rocksdb: &Arc>) -> Self { Self { rocksdb: rocksdb.clone(), batch: WriteBatch::default(), } } pub(crate) fn insert_batch( mut self, db: &DBColumn, values: impl IntoIterator, ) -> Result where K: Serialize + std::fmt::Debug, V: Serialize + std::fmt::Debug, Key: Borrow, Value: Borrow, { check_cross_batch(&self.rocksdb, &db.rocksdb)?; values .into_iter() .try_for_each::<_, Result<(), InternalStorageError>>(|(k, v)| { let key_buffer = be_fix_int_ser(k.borrow())?; let value_buffer = bincode::serialize(v.borrow())?; self.batch.put_cf(&db.cf()?, key_buffer, value_buffer); Ok(()) })?; Ok(self) } pub(crate) fn write(self) -> Result<(), InternalStorageError> { self.rocksdb.write(self.batch)?; Ok(()) } } impl<'a, K, V> Map<'a, K, V> for DBColumn where K: Serialize + DeserializeOwned, V: Serialize + DeserializeOwned, { type Iterator = ColumnIterator<'a, K, V>; fn iter(&'a self) -> Result { let mut raw_iterator = self.rocksdb.raw_iterator_cf(&self.cf()?); raw_iterator.seek_to_first(); Ok(ColumnIterator::new(raw_iterator)) } fn iter_at(&'a self, index: &I) -> Result { let mut raw_iterator = self.rocksdb.raw_iterator_cf(&self.cf()?); raw_iterator.seek(be_fix_int_ser(index)?); Ok(ColumnIterator::new(raw_iterator)) } fn iter_with_mode( &'a self, mode: IteratorMode<'_>, ) -> Result { let mut raw_iterator = self.rocksdb.raw_iterator_cf(&self.cf()?); let direction = match mode { IteratorMode::Start => { raw_iterator.seek_to_first(); Direction::Forward } IteratorMode::End => { raw_iterator.seek_to_last(); Direction::Forward } _ => unimplemented!(), }; Ok(ColumnIterator::new_with_direction(raw_iterator, direction)) } fn prefix_iter( &'a self, prefix: &P, ) -> Result { let iterator = self .rocksdb .prefix_iterator_cf(&self.cf()?, be_fix_int_ser(prefix)?) .into(); Ok(ColumnIterator::new(iterator)) } fn prefix_iter_at( &'a self, prefix: &P, index: &I, ) -> Result { let mut iterator: DBRawIteratorWithThreadMode<_> = self .rocksdb .prefix_iterator_cf(&self.cf()?, be_fix_int_ser(prefix)?) .into(); iterator.seek(be_fix_int_ser(index)?); Ok(ColumnIterator::new(iterator)) } } /// Serialize a value using a fix length serialize and a big endian endianness pub(crate) fn be_fix_int_ser(t: &S) -> Result, InternalStorageError> where S: Serialize + ?Sized, { Ok(bincode::DefaultOptions::new() .with_big_endian() .with_fixint_encoding() .serialize(t)?) } fn check_cross_batch(base: &RocksDB, current: &RocksDB) -> Result<(), InternalStorageError> { if !Arc::ptr_eq(base, current) { return Err(InternalStorageError::ConcurrentDBBatchDetected); } Ok(()) } ================================================ FILE: crates/topos-tce-storage/src/rocks/iterator.rs ================================================ use std::marker::PhantomData; use bincode::Options; use rocksdb::{DBRawIteratorWithThreadMode, DBWithThreadMode, Direction, MultiThreaded}; use serde::de::DeserializeOwned; pub struct ColumnIterator<'a, K, V> { iterator: DBRawIteratorWithThreadMode<'a, DBWithThreadMode>, direction: Direction, _phantom: PhantomData<(K, V)>, } impl<'a, K, V> ColumnIterator<'a, K, V> { /// Creates a new ColumnIterator base on a DBRawIteratorWithThreadMode pub fn new(iterator: DBRawIteratorWithThreadMode<'a, DBWithThreadMode>) -> Self { Self::new_with_direction(iterator, Direction::Forward) } pub fn new_with_direction( iterator: DBRawIteratorWithThreadMode<'a, DBWithThreadMode>, direction: Direction, ) -> Self { Self { iterator, direction, _phantom: PhantomData, } } } impl<'a, K, V> Iterator for ColumnIterator<'a, K, V> where K: DeserializeOwned, V: DeserializeOwned, { type Item = (K, V); fn next(&mut self) -> Option { if self.iterator.valid() { let config = bincode::DefaultOptions::new() .with_big_endian() .with_fixint_encoding(); let key = self.iterator.key().and_then(|k| config.deserialize(k).ok()); let value = self .iterator .value() .and_then(|v| bincode::deserialize(v).ok()); match self.direction { Direction::Forward => self.iterator.next(), Direction::Reverse => self.iterator.prev(), } key.and_then(|k| value.map(|v| (k, v))) } else { None } } } ================================================ FILE: crates/topos-tce-storage/src/rocks/map.rs ================================================ use rocksdb::IteratorMode; use serde::{de::DeserializeOwned, Serialize}; use crate::errors::InternalStorageError; pub trait Map<'a, K, V> where K: Serialize + DeserializeOwned + ?Sized, V: Serialize + DeserializeOwned, { type Iterator: Iterator; /// Returns an Iterator over the whole CF fn iter(&'a self) -> Result; /// Returns an Iterator over the CF starting from index fn iter_at(&'a self, index: &I) -> Result; /// Returns an Iterator over the whole CF with mode configured #[allow(dead_code)] fn iter_with_mode( &'a self, mode: IteratorMode<'_>, ) -> Result; /// Returns a prefixed Iterator over the CF fn prefix_iter( &'a self, prefix: &P, ) -> Result; /// Returns a prefixed Iterator over the CF starting from index #[allow(dead_code)] fn prefix_iter_at( &'a self, prefix: &P, index: &I, ) -> Result; } ================================================ FILE: crates/topos-tce-storage/src/rocks/types.rs ================================================ use serde::{Deserialize, Serialize}; use crate::SubnetId; #[derive(Debug, Serialize, Deserialize)] pub(crate) struct TargetSourceListKey( // Target subnet id pub(crate) SubnetId, // Source subnet id pub(crate) SubnetId, ); ================================================ FILE: crates/topos-tce-storage/src/rocks.rs ================================================ use self::db::RocksDB; pub(crate) mod constants; pub(crate) mod db; pub(crate) mod db_column; pub(crate) mod iterator; pub(crate) mod map; pub(crate) mod types; pub(crate) use types::*; ================================================ FILE: crates/topos-tce-storage/src/store.rs ================================================ use std::collections::HashMap; use async_trait::async_trait; use topos_core::{ types::{stream::CertificateSourceStreamPosition, CertificateDelivered}, uci::{CertificateId, SubnetId}, }; use crate::{ errors::StorageError, CertificatePositions, CertificateTargetStreamPosition, SourceHead, }; /// This trait exposes common methods between /// [`ValidatorStore`](struct@super::validator::ValidatorStore) and /// [`FullNodeStore`](struct@super::fullnode::FullNodeStore) to write data. /// /// All methods are `async` to allow the implementation to deal with write concurrency. #[async_trait] pub trait WriteStore: Send { /// Insert a [`CertificateDelivered`] in the storage. Returns its positions /// in the source and target streams. /// /// The [`ValidatorStore`](struct@super::validator::ValidatorStore) implementation /// checks for a [`PendingCertificateId`](type@super::PendingCertificateId) and remove it if /// the certificate is successfully inserted. async fn insert_certificate_delivered( &self, certificate: &CertificateDelivered, ) -> Result; /// Insert multiple [`CertificateDelivered`] in the storage. /// /// See [`insert_certificate_delivered`](fn@WriteStore::insert_certificate_delivered) for more /// details async fn insert_certificates_delivered( &self, certificates: &[CertificateDelivered], ) -> Result<(), StorageError>; } /// This trait exposes common methods between /// [`ValidatorStore`](struct@super::validator::ValidatorStore) and /// [`FullNodeStore`](struct@super::fullnode::FullNodeStore) to read data. pub trait ReadStore: Send { /// Returns the number of certificates delivered fn count_certificates_delivered(&self) -> Result; /// Try to get a SourceHead of a subnet /// /// Returns `Ok(None)` if the subnet is not found, meaning that no certificate are currently /// delivered for this particular subnet. fn get_source_head(&self, subnet_id: &SubnetId) -> Result, StorageError>; /// Try to get a [`CertificateDelivered`] /// /// Returns `Ok(None)` if the certificate is not found, meaning that the certificate is either /// inexisting or not yet delivered. fn get_certificate( &self, certificate_id: &CertificateId, ) -> Result, StorageError>; /// Try to get multiple [`CertificateDelivered`] at once. /// /// See [`get_certificate`](fn@ReadStore::get_certificate) fn get_certificates( &self, certificate_ids: &[CertificateId], ) -> Result>, StorageError>; /// Try to return the latest delivered position for a source subnet fn last_delivered_position_for_subnet( &self, subnet_id: &SubnetId, ) -> Result, StorageError>; /// Returns the local checkpoint /// /// A `Checkpoint` is the representation of the state of delivery, it is a list of [`SubnetId`] /// with the associated [`SourceHead`] fn get_checkpoint(&self) -> Result, StorageError>; /// Returns the certificates delivered by a source subnet from a position. fn get_source_stream_certificates_from_position( &self, from: CertificateSourceStreamPosition, limit: usize, ) -> Result, StorageError>; /// Returns the certificates delivered to a target subnet from a position. fn get_target_stream_certificates_from_position( &self, position: CertificateTargetStreamPosition, limit: usize, ) -> Result, StorageError>; /// Returns the list of source subnets that delivered certificates to a particular target subnet fn get_target_source_subnet_list( &self, target_subnet_id: &SubnetId, ) -> Result, StorageError>; } ================================================ FILE: crates/topos-tce-storage/src/tests/checkpoints.rs ================================================ use std::{collections::HashMap, sync::Arc}; use rstest::rstest; use topos_core::uci::SubnetId; use topos_test_sdk::{ certificates::create_certificate_chain, constants::{SOURCE_SUBNET_ID_1, SOURCE_SUBNET_ID_2, TARGET_SUBNET_ID_1}, }; use super::support::store; use crate::{ store::{ReadStore, WriteStore}, validator::ValidatorStore, }; #[rstest] #[tokio::test] async fn get_checkpoint_for_two_subnets(store: Arc) { let certificates_a = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 32); let certificates_b = create_certificate_chain(SOURCE_SUBNET_ID_2, &[TARGET_SUBNET_ID_1], 24); for cert in certificates_a { _ = store.insert_certificate_delivered(&cert).await; } for cert in certificates_b { _ = store.insert_certificate_delivered(&cert).await; } let checkpoint = store .get_checkpoint() .unwrap() .into_iter() .map(|(subnet, value)| (subnet, *value.position)) .collect::>(); assert_eq!(checkpoint.len(), 2); assert_eq!(*checkpoint.get(&SOURCE_SUBNET_ID_1).unwrap(), 31); assert_eq!(*checkpoint.get(&SOURCE_SUBNET_ID_2).unwrap(), 23); } #[rstest] #[tokio::test] async fn get_checkpoint_diff_with_no_input(store: Arc) { let certificates_a = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 32); let certificates_b = create_certificate_chain(SOURCE_SUBNET_ID_2, &[TARGET_SUBNET_ID_1], 24); for cert in certificates_a { _ = store.insert_certificate_delivered(&cert).await; } for cert in certificates_b { _ = store.insert_certificate_delivered(&cert).await; } let checkpoint = store .get_checkpoint_diff(&[], 100) .unwrap() .into_iter() .map(|(subnet, proofs)| { ( subnet, proofs .iter() .map(|proof| *proof.delivery_position.position) .collect::>(), ) }) .collect::>(); assert_eq!(checkpoint.len(), 2); assert_eq!( *checkpoint.get(&SOURCE_SUBNET_ID_1).unwrap(), (0..=31).collect::>() ); assert_eq!( *checkpoint.get(&SOURCE_SUBNET_ID_2).unwrap(), (0..=23).collect::>() ); } #[rstest] #[tokio::test] async fn get_checkpoint_diff_with_input(store: Arc) { let certificates_a = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 32); let certificates_b = create_certificate_chain(SOURCE_SUBNET_ID_2, &[TARGET_SUBNET_ID_1], 24); let checkpoint = certificates_a.get(20).unwrap().proof_of_delivery.clone(); assert_eq!(*checkpoint.delivery_position.position, 20); for cert in certificates_a { _ = store.insert_certificate_delivered(&cert).await; } for cert in certificates_b { _ = store.insert_certificate_delivered(&cert).await; } let checkpoint = store .get_checkpoint_diff(&[checkpoint], 100) .unwrap() .into_iter() .map(|(subnet, proofs)| { ( subnet, proofs .iter() .map(|proof| *proof.delivery_position.position) .collect::>(), ) }) .collect::>(); assert_eq!(checkpoint.len(), 2); assert_eq!( *checkpoint.get(&SOURCE_SUBNET_ID_1).unwrap(), (21..=31).collect::>() ); assert_eq!( *checkpoint.get(&SOURCE_SUBNET_ID_2).unwrap(), (0..=23).collect::>() ); } ================================================ FILE: crates/topos-tce-storage/src/tests/db_columns.rs ================================================ use rstest::rstest; use test_log::test; use topos_core::types::stream::CertificateSourceStreamPosition; use topos_core::uci::Certificate; use topos_test_sdk::certificates::create_certificate_at_position; use topos_test_sdk::constants::SOURCE_SUBNET_ID_1; use crate::rocks::map::Map; use crate::tests::{PREV_CERTIFICATE_ID, SOURCE_STORAGE_SUBNET_ID}; use crate::types::{CertificatesColumn, PendingCertificatesColumn, StreamsColumn}; use crate::Position; use super::support::columns::{certificates_column, pending_column, source_streams_column}; #[rstest] #[test(tokio::test)] async fn can_persist_a_pending_certificate(pending_column: PendingCertificatesColumn) { let certificate = Certificate::new_with_default_fields(PREV_CERTIFICATE_ID, SOURCE_SUBNET_ID_1, &[]).unwrap(); assert!(pending_column.insert(&0, &certificate).is_ok()); assert_eq!(pending_column.get(&0).unwrap(), Some(certificate)); } #[rstest] #[test(tokio::test)] async fn can_persist_a_delivered_certificate(certificates_column: CertificatesColumn) { let certificate = Certificate::new_with_default_fields(PREV_CERTIFICATE_ID, SOURCE_SUBNET_ID_1, &Vec::new()) .unwrap(); let certificate = create_certificate_at_position(Position::ZERO, certificate); assert!(certificates_column .insert(&certificate.certificate.id, &certificate) .is_ok()); assert_eq!( certificates_column .get(&certificate.certificate.id) .unwrap(), Some(certificate) ); } #[rstest] #[test(tokio::test)] async fn delivered_certificate_position_are_incremented( certificates_column: CertificatesColumn, source_streams_column: StreamsColumn, ) { let certificate = Certificate::new_with_default_fields(PREV_CERTIFICATE_ID, SOURCE_SUBNET_ID_1, &[]).unwrap(); let certificate = create_certificate_at_position(Position::ZERO, certificate); assert!(certificates_column .insert(&certificate.certificate.id, &certificate) .is_ok()); assert!(source_streams_column .insert( &CertificateSourceStreamPosition::new(SOURCE_STORAGE_SUBNET_ID, Position::ZERO), &certificate.certificate.id ) .is_ok()); } #[rstest] #[test(tokio::test)] async fn position_can_be_fetch_for_one_subnet(source_streams_column: StreamsColumn) { let certificate = Certificate::new_with_default_fields(PREV_CERTIFICATE_ID, SOURCE_SUBNET_ID_1, &[]).unwrap(); assert!(source_streams_column .insert( &CertificateSourceStreamPosition::new(SOURCE_STORAGE_SUBNET_ID, Position::ZERO), &certificate.id ) .is_ok()); assert!(matches!( source_streams_column .prefix_iter(&SOURCE_SUBNET_ID_1) .unwrap() .last(), Some(( CertificateSourceStreamPosition { position: Position::ZERO, .. }, _ )) )); let certificate = Certificate::new_with_default_fields(PREV_CERTIFICATE_ID, SOURCE_SUBNET_ID_1, &[]).unwrap(); assert!(source_streams_column .insert( &CertificateSourceStreamPosition::new(SOURCE_STORAGE_SUBNET_ID, 1), &certificate.id ) .is_ok()); let expected_position: Position = 1.into(); assert!(matches!( source_streams_column .prefix_iter(&SOURCE_SUBNET_ID_1) .unwrap() .last(), Some(( CertificateSourceStreamPosition { position, .. }, _ )) if expected_position == position )); } #[test(tokio::test)] #[ignore = "not yet implemented"] async fn position_can_be_fetch_for_multiple_subnets() {} #[test(tokio::test)] #[ignore = "not yet implemented"] async fn position_can_be_fetch_for_all_subnets() {} ================================================ FILE: crates/topos-tce-storage/src/tests/mod.rs ================================================ use rstest::rstest; use std::sync::Arc; use test_log::test; use topos_core::{ types::{ stream::{CertificateSourceStreamPosition, CertificateTargetStreamPosition, Position}, CertificateDelivered, ProofOfDelivery, }, uci::{Certificate, SubnetId}, }; use crate::{ errors::StorageError, rocks::map::Map, store::{ReadStore, WriteStore}, validator::ValidatorStore, }; use self::support::store; use topos_test_sdk::certificates::create_certificate_chain; use topos_test_sdk::constants::*; mod checkpoints; mod db_columns; mod pending_certificates; mod position; mod rocks; pub(crate) mod support; const SOURCE_STORAGE_SUBNET_ID: SubnetId = SOURCE_SUBNET_ID_1; const TARGET_STORAGE_SUBNET_ID_1: SubnetId = TARGET_SUBNET_ID_1; const TARGET_STORAGE_SUBNET_ID_2: SubnetId = TARGET_SUBNET_ID_2; #[rstest] #[tokio::test] async fn can_persist_a_pending_certificate(store: Arc) { let certificate = Certificate::new_with_default_fields(PREV_CERTIFICATE_ID, SOURCE_SUBNET_ID_1, &[]).unwrap(); assert!(store.insert_pending_certificate(&certificate).await.is_ok()); } #[rstest] #[test(tokio::test)] async fn can_persist_a_delivered_certificate(store: Arc) { let certificate = Certificate::new_with_default_fields( PREV_CERTIFICATE_ID, SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], ) .unwrap(); let certificate_id = certificate.id; let certificate = CertificateDelivered { certificate, proof_of_delivery: ProofOfDelivery { delivery_position: CertificateSourceStreamPosition::new( SOURCE_SUBNET_ID_1, Position::ZERO, ), readies: vec![], threshold: 0, certificate_id, }, }; store .insert_certificate_delivered(&certificate) .await .unwrap(); let certificates_table = store.fullnode_store.perpetual_tables.certificates.clone(); let streams_table = store.fullnode_store.perpetual_tables.streams.clone(); let targets_streams_table = store.fullnode_store.index_tables.target_streams.clone(); assert!(certificates_table.get(&certificate.certificate.id).is_ok()); let stream_element = streams_table .prefix_iter(&SOURCE_SUBNET_ID_1) .unwrap() .last() .unwrap(); assert_eq!(stream_element.0.position, Position::ZERO); let stream_element = targets_streams_table .prefix_iter::<(SubnetId, SubnetId)>(&( TARGET_STORAGE_SUBNET_ID_1, SOURCE_STORAGE_SUBNET_ID, )) .unwrap() .last() .unwrap(); assert_eq!(stream_element.0.position, Position::ZERO); assert_eq!(stream_element.1, certificate.certificate.id); } #[rstest] #[test(tokio::test)] async fn cannot_persist_a_delivered_certificate_twice(store: Arc) { let certificate = Certificate::new_with_default_fields( PREV_CERTIFICATE_ID, SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], ) .unwrap(); let certificate_id = certificate.id; let certificate = CertificateDelivered { certificate, proof_of_delivery: ProofOfDelivery { delivery_position: CertificateSourceStreamPosition::new( SOURCE_SUBNET_ID_1, Position::ZERO, ), readies: vec![], threshold: 0, certificate_id, }, }; store .insert_certificate_delivered(&certificate) .await .unwrap(); let result = store.insert_certificate_delivered(&certificate).await; assert!(result.is_err()); assert!(matches!( result, Err(StorageError::InternalStorage( crate::errors::InternalStorageError::CertificateAlreadyExists )) )); } #[rstest] #[test(tokio::test)] async fn cannot_persist_a_delivered_certificate_at_same_position(store: Arc) { let certificate = Certificate::new_with_default_fields( PREV_CERTIFICATE_ID, SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], ) .unwrap(); let certificate_2 = Certificate::new_with_default_fields( PREV_CERTIFICATE_ID, SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1, TARGET_SUBNET_ID_2], ) .unwrap(); let certificate_id = certificate.id; let certificate_id_2 = certificate_2.id; assert_ne!(certificate_id, certificate_id_2); let certificate = CertificateDelivered { certificate, proof_of_delivery: ProofOfDelivery { delivery_position: CertificateSourceStreamPosition::new( SOURCE_SUBNET_ID_1, Position::ZERO, ), readies: vec![], threshold: 0, certificate_id, }, }; let certificate_2 = CertificateDelivered { certificate: certificate_2, proof_of_delivery: ProofOfDelivery { delivery_position: CertificateSourceStreamPosition::new( SOURCE_SUBNET_ID_1, Position::ZERO, ), readies: vec![], threshold: 0, certificate_id: certificate_id_2, }, }; store .insert_certificate_delivered(&certificate) .await .unwrap(); let result = store.insert_certificate_delivered(&certificate_2).await; assert!(result.is_err()); assert!(matches!( result, Err(StorageError::InternalStorage( crate::errors::InternalStorageError::CertificateAlreadyExistsAtPosition(_, _) )) )); } #[rstest] #[test(tokio::test)] async fn delivered_certificate_are_added_to_target_stream(store: Arc) { let certificates_column = store.fullnode_store.perpetual_tables.certificates.clone(); let source_streams_column = store.fullnode_store.perpetual_tables.streams.clone(); let target_streams_column = store.fullnode_store.index_tables.target_streams.clone(); target_streams_column .insert( &CertificateTargetStreamPosition::new( TARGET_STORAGE_SUBNET_ID_1, SOURCE_STORAGE_SUBNET_ID, Position::ZERO, ), &CERTIFICATE_ID_1, ) .unwrap(); let certificate = Certificate::new_with_default_fields( CERTIFICATE_ID_1, SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1, TARGET_SUBNET_ID_2], ) .unwrap(); let certificate_id = certificate.id; let certificate = CertificateDelivered { certificate, proof_of_delivery: ProofOfDelivery { delivery_position: CertificateSourceStreamPosition::new( SOURCE_SUBNET_ID_1, Position::ZERO, ), readies: vec![], threshold: 0, certificate_id, }, }; store .insert_certificate_delivered(&certificate) .await .unwrap(); assert!(certificates_column.get(&certificate_id).is_ok()); let stream_element = source_streams_column .prefix_iter(&SOURCE_SUBNET_ID_1) .unwrap() .last() .unwrap(); assert_eq!(stream_element.0.position, Position::ZERO); let stream_element = target_streams_column .prefix_iter(&(&TARGET_STORAGE_SUBNET_ID_1, &SOURCE_STORAGE_SUBNET_ID)) .unwrap() .last() .unwrap(); assert_eq!(*stream_element.0.position, 1); let stream_element = target_streams_column .prefix_iter(&(&TARGET_STORAGE_SUBNET_ID_2, &SOURCE_STORAGE_SUBNET_ID)) .unwrap() .last() .unwrap(); assert_eq!(stream_element.0.position, Position::ZERO); } #[rstest] #[test(tokio::test)] async fn pending_certificate_are_removed_during_persist_action(store: Arc) { let pending_column = store.pending_tables.pending_pool.clone(); let certificate = Certificate::new_with_default_fields( PREV_CERTIFICATE_ID, SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], ) .unwrap(); let certificate_id = certificate.id; let pending_id = store .insert_pending_certificate(&certificate) .await .unwrap() .unwrap(); let certificate = CertificateDelivered { certificate, proof_of_delivery: ProofOfDelivery { certificate_id, delivery_position: CertificateSourceStreamPosition::new( SOURCE_SUBNET_ID_1, Position::ZERO, ), readies: vec![], threshold: 0, }, }; assert!(pending_column.get(&pending_id).is_ok()); store .insert_certificate_delivered(&certificate) .await .unwrap(); assert!(matches!(pending_column.get(&pending_id), Ok(None))); } #[rstest] #[test(tokio::test)] async fn fetch_certificates_for_subnets(store: Arc) { let other_certificate = Certificate::new_with_default_fields( PREV_CERTIFICATE_ID, TARGET_SUBNET_ID_2, &[TARGET_SUBNET_ID_1], ) .unwrap(); let certificate_id = other_certificate.id; let other_certificate = CertificateDelivered { certificate: other_certificate, proof_of_delivery: ProofOfDelivery { certificate_id, delivery_position: CertificateSourceStreamPosition::new( TARGET_SUBNET_ID_2, Position::ZERO, ), readies: vec![], threshold: 0, }, }; store .insert_certificate_delivered(&other_certificate) .await .unwrap(); let mut expected_certificates = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 10) .into_iter() .enumerate() .map(|(index, v)| CertificateDelivered { certificate: v.certificate.clone(), proof_of_delivery: ProofOfDelivery { certificate_id: v.certificate.id, delivery_position: CertificateSourceStreamPosition::new( SOURCE_SUBNET_ID_1, index as u64, ), readies: vec![], threshold: 0, }, }) .collect::>(); for cert in &expected_certificates { store.insert_certificate_delivered(cert).await.unwrap(); } let mut certificate_ids = store .get_source_stream_certificates_from_position( CertificateSourceStreamPosition::new(SOURCE_STORAGE_SUBNET_ID, Position::ZERO), 5, ) .unwrap() .into_iter() .map(|(certificate, _)| certificate.certificate.id) .collect::>(); assert_eq!(5, certificate_ids.len()); let certificate_ids_second = store .get_source_stream_certificates_from_position( CertificateSourceStreamPosition::new(SOURCE_STORAGE_SUBNET_ID, 5), 5, ) .unwrap() .into_iter() .map(|(certificate, _)| certificate.certificate.id) .collect::>(); assert_eq!(5, certificate_ids_second.len()); certificate_ids.extend(certificate_ids_second.into_iter()); let certificates = store .get_certificates(&certificate_ids[..]) .unwrap() .into_iter() .flatten() .collect::>(); assert_eq!(expected_certificates, certificates); let mut certificate_ids = store .get_target_stream_certificates_from_position( CertificateTargetStreamPosition::new( TARGET_STORAGE_SUBNET_ID_1, SOURCE_STORAGE_SUBNET_ID, Position::ZERO, ), 100, ) .unwrap() .into_iter() .map(|(c, _)| c.certificate.id) .collect::>(); certificate_ids.extend( store .get_target_stream_certificates_from_position( CertificateTargetStreamPosition::new( TARGET_STORAGE_SUBNET_ID_1, TARGET_STORAGE_SUBNET_ID_2, Position::ZERO, ), 100, ) .unwrap() .into_iter() .map(|(c, _)| c.certificate.id), ); assert_eq!(11, certificate_ids.len()); let certificates = store .get_certificates(&certificate_ids[..]) .unwrap() .into_iter() .flatten() .collect::>(); expected_certificates.push(other_certificate); assert_eq!(expected_certificates, certificates); } #[rstest] #[test(tokio::test)] async fn pending_certificate_can_be_removed(store: Arc) { let pending_column = store.pending_tables.pending_pool.clone(); let certificate = Certificate::new_with_default_fields( PREV_CERTIFICATE_ID, SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], ) .unwrap(); let pending_id = store .insert_pending_certificate(&certificate) .await .unwrap() .unwrap(); assert!(pending_column.get(&pending_id).is_ok()); store.delete_pending_certificate(&pending_id).unwrap(); assert!(matches!(pending_column.get(&pending_id), Ok(None))); let pending_id = store .insert_pending_certificate(&certificate) .await .unwrap() .unwrap(); assert!(matches!( store.insert_pending_certificate(&certificate).await, Err(StorageError::InternalStorage( crate::errors::InternalStorageError::CertificateAlreadyPending )) )); assert!(pending_column.get(&pending_id).is_ok()); store.delete_pending_certificate(&pending_id).unwrap(); assert!(pending_column.iter().unwrap().next().is_none()); } #[rstest] #[test(tokio::test)] async fn get_source_head_for_subnet(store: Arc) { let expected_certificates_for_source_subnet_1: Vec<_> = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_2], 10); store .insert_certificates_delivered(&expected_certificates_for_source_subnet_1[..]) .await .unwrap(); let expected_certificates_for_source_subnet_2 = create_certificate_chain(SOURCE_SUBNET_ID_2, &[TARGET_SUBNET_ID_2], 10); store .insert_certificates_delivered(&expected_certificates_for_source_subnet_2[..]) .await .unwrap(); let last_certificate_source_subnet_1 = store.get_source_head(&SOURCE_SUBNET_ID_1).unwrap().unwrap(); let last_certificate_source_subnet_2 = store.get_source_head(&SOURCE_SUBNET_ID_2).unwrap().unwrap(); assert_eq!( expected_certificates_for_source_subnet_1 .last() .unwrap() .certificate .id, last_certificate_source_subnet_1.certificate_id ); assert_eq!(9, *last_certificate_source_subnet_1.position); //check position assert_eq!( expected_certificates_for_source_subnet_2 .last() .unwrap() .certificate .id, last_certificate_source_subnet_2.certificate_id ); assert_eq!(9, *last_certificate_source_subnet_2.position); //check position let certificate = Certificate::new_with_default_fields( expected_certificates_for_source_subnet_1 .last() .unwrap() .certificate .id, SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], ) .unwrap(); let new_certificate_source_subnet_1 = CertificateDelivered { certificate: certificate.clone(), proof_of_delivery: ProofOfDelivery { certificate_id: certificate.id, delivery_position: CertificateSourceStreamPosition::new(SOURCE_SUBNET_ID_1, 10), readies: vec![], threshold: 0, }, }; store .insert_certificate_delivered(&new_certificate_source_subnet_1) .await .unwrap(); let last_certificate_subnet_1 = store.get_source_head(&SOURCE_SUBNET_ID_1).unwrap().unwrap(); assert_eq!( new_certificate_source_subnet_1.certificate.id, last_certificate_subnet_1.certificate_id ); assert_eq!(10, *last_certificate_subnet_1.position); //check position let other_certificate_2 = Certificate::new_with_default_fields( new_certificate_source_subnet_1.certificate.id, SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_2, TARGET_SUBNET_ID_1], ) .unwrap(); let other_certificate_2 = CertificateDelivered { certificate: other_certificate_2.clone(), proof_of_delivery: ProofOfDelivery { certificate_id: other_certificate_2.id, delivery_position: CertificateSourceStreamPosition::new(SOURCE_SUBNET_ID_1, 11), readies: vec![], threshold: 0, }, }; store .insert_certificate_delivered(&other_certificate_2) .await .unwrap(); let last_certificate_subnet_2 = store.get_source_head(&SOURCE_SUBNET_ID_1).unwrap().unwrap(); assert_eq!( other_certificate_2.certificate.id, last_certificate_subnet_2.certificate_id ); assert_eq!(11, *last_certificate_subnet_2.position); //check position } #[rstest] #[test(tokio::test)] async fn get_pending_certificates(store: Arc) { let certificates_for_source_subnet_1 = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_2], 15); let certificates_for_source_subnet_2 = create_certificate_chain(SOURCE_SUBNET_ID_2, &[TARGET_SUBNET_ID_2], 15); // Persist the first 10 Cert of each Subnets store .insert_certificates_delivered(&certificates_for_source_subnet_1[..10]) .await .unwrap(); store .insert_certificates_delivered(&certificates_for_source_subnet_2[..10]) .await .unwrap(); let mut expected_pending_certificates = certificates_for_source_subnet_1[10..] .iter() .enumerate() .map(|(index, certificate)| ((index as u64 + 1), certificate.certificate.clone())) .collect::>(); expected_pending_certificates.extend( certificates_for_source_subnet_2[10..] .iter() .enumerate() .map(|(index, certificate)| { ( (index as u64 + 1) + expected_pending_certificates.len() as u64, certificate.certificate.clone(), ) }) .collect::>(), ); // Add the last 5 cert of each Subnet as pending certificate store .insert_pending_certificates( &certificates_for_source_subnet_1[10..] .iter() .map(|certificate| certificate.certificate.clone()) .collect::>(), ) .unwrap(); store .insert_pending_certificates( &certificates_for_source_subnet_2[10..] .iter() .map(|certificate| certificate.certificate.clone()) .collect::>(), ) .unwrap(); let pending_certificates = store.iter_pending_pool().unwrap().collect::>(); assert_eq!( expected_pending_certificates.len(), pending_certificates.len() ); assert_eq!(expected_pending_certificates, pending_certificates); // Remove some pending certificates, check again let cert_to_remove = expected_pending_certificates.remove(5); store.delete_pending_certificate(&cert_to_remove.0).unwrap(); let cert_to_remove = expected_pending_certificates.remove(8); store.delete_pending_certificate(&cert_to_remove.0).unwrap(); let pending_certificates = store.iter_pending_pool().unwrap().collect::>(); assert_eq!( expected_pending_certificates.len(), pending_certificates.len() ); assert_eq!(expected_pending_certificates, pending_certificates); } #[rstest] #[tokio::test] async fn fetch_source_subnet_certificates_in_order(store: Arc) { let certificates_for_source_subnet_1 = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_2], 10); // Persist the first 10 Cert of each Subnets store .insert_certificates_delivered(&certificates_for_source_subnet_1[..10]) .await .unwrap(); let res = store .get_source_stream_certificates_from_position( crate::CertificateSourceStreamPosition { subnet_id: SOURCE_SUBNET_ID_1, position: Position::ZERO, }, 100, ) .unwrap(); let mut prev = PREV_CERTIFICATE_ID; for (index, (cert, position)) in res.iter().enumerate() { let cert = &cert.certificate; assert_eq!(cert.prev_id, prev); assert!(matches!( position, CertificateSourceStreamPosition { subnet_id: SOURCE_SUBNET_ID_1, position: current_pos } if **current_pos == index as u64 )); prev = cert.id; } } ================================================ FILE: crates/topos-tce-storage/src/tests/pending_certificates.rs ================================================ use std::{sync::Arc, time::Duration}; use rstest::rstest; use topos_core::uci::{Certificate, INITIAL_CERTIFICATE_ID}; use topos_test_sdk::{ certificates::{create_certificate_at_position, create_certificate_chain}, constants::{SOURCE_SUBNET_ID_1, TARGET_SUBNET_ID_1}, }; use super::support::store; use crate::{store::WriteStore, validator::ValidatorStore}; #[rstest] #[tokio::test] async fn adding_genesis_pending_certificate(store: Arc) { let certificate = Certificate::new_with_default_fields( INITIAL_CERTIFICATE_ID, SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], ) .unwrap(); let pending_id = store .insert_pending_certificate(&certificate) .await .unwrap() .unwrap(); assert_eq!( store.get_pending_certificate(&pending_id).unwrap().unwrap(), certificate ); assert_eq!( store.get_pending_id(&certificate.id).unwrap().unwrap(), pending_id ); } #[rstest] #[tokio::test] async fn adding_pending_certificate_with_precedence_check_fail(store: Arc) { let initial_certificate_delivered = create_certificate_at_position::default(); let certificate = Certificate::new_with_default_fields( initial_certificate_delivered.certificate.id, SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], ) .unwrap(); assert!(store .insert_pending_certificate(&certificate) .await .unwrap() .is_none()); assert!(store.get_pending_id(&certificate.id).unwrap().is_none()); assert!(store .pending_tables .precedence_pool .get(&certificate.prev_id) .unwrap() .is_some()); store .insert_certificate_delivered(&initial_certificate_delivered) .await .unwrap(); let pending_id = store.get_pending_id(&certificate.id).unwrap().unwrap(); assert_eq!( store.get_pending_certificate(&pending_id).unwrap().unwrap(), certificate ); } #[rstest] #[tokio::test] async fn adding_pending_certificate_already_delivered(store: Arc) { let initial_certificate_delivered = create_certificate_at_position::default(); store .insert_certificate_delivered(&initial_certificate_delivered) .await .unwrap(); assert!(store .insert_pending_certificate(&initial_certificate_delivered.certificate) .await .is_err()); } /// This test is covering a corner case which involves the delivery of a prev certificate /// and a child certificate. /// /// The scenario is this one: /// - A `prev` certificate (`C1`) has been delivered (by the broadcast) and need to be persisted /// The persist method will hold a lock while performing multiple insert/delete to avoid /// insert race condition. /// - At the same time, another node is sending a certificate (`C2`) which have `C1` as `prev_id`. /// `C2` is looking at the storage to find if the `prev_id` `C1` is delivered but find nothing as /// the `persist` method is still working at creating the `WriteBatch`. It led the node to put /// `C2` in the `precedence_pool` waiting for `C1` to be delivered while it is in fact already /// delivered. /// /// To avoid that and as a first step, when trying to insert a certificate in the pending pool, /// The node will try to acquire a lock guard on the certificate but also on the prev_id. mod concurrency { use crate::errors::StorageError; use super::*; #[rstest] #[tokio::test] async fn adding_pending_certificate_but_prev_fail(store: Arc) { let mut certs = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 2); let cert = certs.pop().unwrap(); let parent = certs.pop().unwrap(); assert!(certs.is_empty()); // The lock guard simulate the start of the certificate insertion in the table. let lock_guard_certificate = store .fullnode_store .certificate_lock_guard(parent.certificate.id) .await; tokio::spawn(async move { tokio::time::sleep(Duration::from_millis(100)).await; // Drop the lock_guard of the prev_id without inserting it drop(lock_guard_certificate); }); assert!(matches!( store.insert_pending_certificate(&cert.certificate).await, Ok(None) )); } #[rstest] #[tokio::test] async fn certificate_in_delivery(store: Arc) { let mut certs = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 1); let cert = certs.pop().unwrap(); assert!(certs.is_empty()); // The lock guard simulate the start of the certificate insertion in the table. let lock_guard_subnet = store .fullnode_store .subnet_lock_guard(cert.certificate.source_subnet_id) .await; tokio::spawn(async move { tokio::time::sleep(Duration::from_millis(200)).await; // Drop the lock_guard of the certificate without inserting it drop(lock_guard_subnet); }); let store_deliver = store.clone(); let delivered = cert.clone(); tokio::spawn(async move { _ = store_deliver .insert_certificate_delivered(&delivered) .await .unwrap(); }); tokio::time::sleep(Duration::from_millis(100)).await; assert!(matches!( store.insert_pending_certificate(&cert.certificate).await, Err(StorageError::InternalStorage( crate::errors::InternalStorageError::CertificateAlreadyExists )) )); } #[rstest] #[tokio::test] async fn prev_certificate_in_delivery(store: Arc) { let mut certs = create_certificate_chain(SOURCE_SUBNET_ID_1, &[TARGET_SUBNET_ID_1], 2); let cert = certs.pop().unwrap(); let prev = certs.pop().unwrap(); assert!(certs.is_empty()); // The lock guard simulate the start of the certificate insertion in the table. let lock_guard_subnet = store .fullnode_store .subnet_lock_guard(cert.certificate.source_subnet_id) .await; tokio::spawn(async move { tokio::time::sleep(Duration::from_millis(200)).await; // Drop the lock_guard of the certificate without inserting it drop(lock_guard_subnet); }); let store_deliver = store.clone(); tokio::spawn(async move { _ = store_deliver .insert_certificate_delivered(&prev) .await .unwrap(); }); tokio::time::sleep(Duration::from_millis(100)).await; assert!(matches!( store.insert_pending_certificate(&cert.certificate).await, Ok(Some(_)) )); } } ================================================ FILE: crates/topos-tce-storage/src/tests/position.rs ================================================ use test_log::test; #[test(tokio::test)] #[ignore = "not yet implemented"] async fn position_can_be_fetch_for_multiple_subnets() {} #[test(tokio::test)] #[ignore = "not yet implemented"] async fn position_can_be_fetch_for_all_subnets() {} ================================================ FILE: crates/topos-tce-storage/src/tests/rocks.rs ================================================ use std::thread; use rstest::rstest; use crate::rocks::db_column::DBColumn; use crate::tests::support::database_name; use crate::tests::support::rocks_db; #[cfg(test)] use test_log::test; #[rstest] #[test(tokio::test)] async fn create_batch_multithread(database_name: &'static str) { let db = rocks_db(database_name); let column: DBColumn = DBColumn::reopen(&db, "default"); let column_clone = column.clone(); let batch = column .batch() .insert_batch( &column, [("key1", "thread_1_value"), ("key2", "thread_1_value")] .map(|(k, v)| (k.to_string(), v.to_string())), ) .unwrap(); let join = thread::spawn(move || { let column = column_clone; column .batch() .insert_batch( &column, [("key1", "thread_2_value"), ("key2", "thread_2_value")] .map(|(k, v)| (k.to_string(), v.to_string())), ) .unwrap() }); batch.write().unwrap(); assert_eq!( column.get(&"key1".to_string()).unwrap().unwrap(), "thread_1_value" ); join.join().unwrap().write().unwrap(); assert_eq!( column.get(&"key1".to_string()).unwrap().unwrap(), "thread_2_value" ); } ================================================ FILE: crates/topos-tce-storage/src/tests/support/columns.rs ================================================ use rstest::fixture; use crate::rocks::{constants, db_column::DBColumn}; use crate::types::{ CertificatesColumn, PendingCertificatesColumn, StreamsColumn, TargetSourceListColumn, TargetStreamsColumn, }; use super::database_name; use super::rocks_db; #[fixture] pub(crate) fn pending_column(database_name: &'static str) -> PendingCertificatesColumn { DBColumn::reopen(&rocks_db(database_name), constants::PENDING_CERTIFICATES) } #[fixture] pub(crate) fn certificates_column(database_name: &'static str) -> CertificatesColumn { DBColumn::reopen(&rocks_db(database_name), constants::CERTIFICATES) } #[fixture] pub(crate) fn source_streams_column(database_name: &'static str) -> StreamsColumn { DBColumn::reopen(&rocks_db(database_name), constants::SOURCE_STREAMS) } #[fixture] pub(crate) fn target_streams_column(database_name: &'static str) -> TargetStreamsColumn { DBColumn::reopen(&rocks_db(database_name), constants::TARGET_STREAMS) } #[fixture] pub(crate) fn target_source_list_column(database_name: &'static str) -> TargetSourceListColumn { DBColumn::reopen(&rocks_db(database_name), constants::TARGET_SOURCES) } ================================================ FILE: crates/topos-tce-storage/src/tests/support/folder.rs ================================================ use std::{ fs, path::{Path, PathBuf}, thread, }; use rstest::fixture; #[fixture] pub(crate) fn random_path() -> Box { let temp_dir = topos_test_sdk::storage::create_folder(thread::current().name().unwrap()); Box::new(temp_dir) } pub(crate) fn created_folder(random_path: &Path) { fs::create_dir_all(random_path).unwrap(); } ================================================ FILE: crates/topos-tce-storage/src/tests/support/mod.rs ================================================ use std::{ collections::HashMap, path::PathBuf, str::FromStr, sync::{Arc, Mutex}, thread, }; use once_cell::sync::Lazy; use rocksdb::Options; use rstest::fixture; use topos_test_sdk::storage::create_folder; use crate::{ epoch::{EpochValidatorsStore, ValidatorPerEpochStore}, fullnode::FullNodeStore, index::IndexTables, rocks::{db::init_db, db::RocksDB}, validator::{ValidatorPerpetualTables, ValidatorStore}, }; use self::folder::created_folder; pub(crate) mod columns; pub(crate) mod folder; pub(crate) static DB: Lazy>>> = Lazy::new(|| Mutex::new(HashMap::new())); #[fixture] pub(crate) fn database_name() -> &'static str { Box::leak(Box::new( topos_test_sdk::storage::create_folder(thread::current().name().unwrap()) .to_str() .unwrap() .replace("::", "_"), )) } #[fixture] pub(crate) fn store() -> Arc { let temp_dir = create_folder::default(); let perpetual_tables = Arc::new(ValidatorPerpetualTables::open(&temp_dir)); let index_tables = Arc::new(IndexTables::open(&temp_dir)); let participants_store = EpochValidatorsStore::new(&temp_dir).expect("Unable to create Participant store"); let epoch_store = ValidatorPerEpochStore::new(0, &temp_dir).expect("Unable to create Per epoch store"); let store = FullNodeStore::open( epoch_store, participants_store, perpetual_tables, index_tables, ) .expect("Unable to create full node store"); ValidatorStore::open(&temp_dir, store).unwrap() } #[fixture] pub(crate) fn rocks_db(database_name: &'static str) -> Arc { let mut dbs = DB.lock().unwrap(); dbs.entry(database_name) .or_insert_with(|| { let path = PathBuf::from_str(database_name).unwrap(); created_folder(&path); let mut options = Options::default(); options.create_if_missing(true); options.create_missing_column_families(true); Arc::new(init_db(&path, options).unwrap()) }) .clone() } ================================================ FILE: crates/topos-tce-storage/src/types.rs ================================================ use topos_core::{ api::grpc::checkpoints::SourceStreamPosition, types::{ stream::{CertificateSourceStreamPosition, CertificateTargetStreamPosition, Position}, CertificateDelivered, Ready, Signature, }, uci::{Certificate, CertificateId}, }; use crate::{ rocks::{db_column::DBColumn, TargetSourceListKey}, CertificatePositions, PendingCertificateId, }; pub type Echo = String; pub type CertificateSequenceNumber = u64; pub type EpochId = u64; pub type Validators = Vec; /// Column that keeps certificates that are not yet delivered pub(crate) type PendingCertificatesColumn = DBColumn; /// Column that keeps list of all certificates retrievable by their id pub(crate) type CertificatesColumn = DBColumn; /// Column that keeps list of certificates received from particular subnet and /// maps (source subnet id, source certificate position) to certificate id pub(crate) type StreamsColumn = DBColumn; /// Column that keeps list of certificates that are delivered to target subnet, /// and maps their target (target subnet, source subnet and position/count per source subnet) /// to certificate id pub(crate) type TargetStreamsColumn = DBColumn; /// Keeps position for particular target subnet id <- source subnet id column in TargetStreamsColumn pub(crate) type TargetSourceListColumn = DBColumn; #[derive(Debug, Clone)] pub enum PendingResult { AlreadyDelivered, AlreadyPending, AwaitPrecedence, InPending(PendingCertificateId), } #[derive(Debug, Clone)] pub struct CertificateDeliveredWithPositions(pub CertificateDelivered, pub CertificatePositions); #[allow(unused)] pub struct EpochSummary { epoch_id: EpochId, start_checkpoint: VerifiedCheckpointSummary, end_checkpoint: Option, } #[allow(unused)] pub struct CheckpointSummary { epoch: EpochId, sequence_number: usize, checkpoint_data: Vec, } #[allow(unused)] pub struct VerifiedCheckpointSummary(CheckpointSummary, ValidatorQuorumSignatureInfo); #[allow(unused)] pub struct ValidatorQuorumSignatureInfo { epoch: EpochId, signature: [u8; 32], } #[allow(unused)] pub struct BroadcastState { echoes: Vec<(Echo, Signature)>, readies: Vec<(Ready, Signature)>, delivered: bool, } ================================================ FILE: crates/topos-tce-storage/src/validator/mod.rs ================================================ //! Validator's context store and storage //! //! The [`ValidatorStore`] is responsible for managing the various kind of data that are required by the //! TCE network in order to broadcast certificates. It is composed of two main parts: //! //! - a [`FullNodeStore`] //! - a [`ValidatorPendingTables`] //! //! ## Responsibilities //! //! This store is used in places where the [`FullNodeStore`] is not enough, it allows to access the //! different pending pools and to manage them but also to access the [`FullNodeStore`] in order to //! persist or update [`Certificate`] or `streams`. //! //! Pending pools and their behavior are described in the [`ValidatorPendingTables`] documentation. //! use std::{ collections::HashMap, path::Path, sync::{atomic::Ordering, Arc}, }; use async_trait::async_trait; use rocksdb::properties::ESTIMATE_NUM_KEYS; use topos_core::{ types::{ stream::{CertificateSourceStreamPosition, Position}, CertificateDelivered, ProofOfDelivery, }, uci::{Certificate, CertificateId, SubnetId, INITIAL_CERTIFICATE_ID}, }; use topos_metrics::{STORAGE_PENDING_POOL_COUNT, STORAGE_PRECEDENCE_POOL_COUNT}; use tracing::{debug, error, info, instrument, warn}; use crate::{ errors::{InternalStorageError, StorageError}, fullnode::FullNodeStore, rocks::map::Map, store::{ReadStore, WriteStore}, CertificatePositions, CertificateTargetStreamPosition, PendingCertificateId, SourceHead, }; pub use self::tables::ValidatorPendingTables; pub use self::tables::ValidatorPerpetualTables; mod tables; /// Store to manage Validator data /// /// The [`ValidatorStore`] is composed of a [`FullNodeStore`] and a [`ValidatorPendingTables`]. /// /// As the [`FullNodeStore`] is responsible of keeping and managing data that are persistent, /// the [`ValidatorStore`] is delegating to it many of the [`WriteStore`] and [`ReadStore`] /// functionality. /// /// The key point is that the [`ValidatorStore`] is managing the different pending pools using a [`ValidatorPendingTables`]. /// /// Pending pools and how they behave are described in the [`ValidatorPendingTables`] documentation. /// pub struct ValidatorStore { pub(crate) pending_tables: ValidatorPendingTables, pub(crate) fullnode_store: Arc, } impl ValidatorStore { /// Try to create a new instance of [`ValidatorStore`] based on the given path pub fn new(path: &Path) -> Result, StorageError> { let fullnode_store = FullNodeStore::new(path)?; Self::open(path, fullnode_store) } /// Open a [`ValidatorStore`] at the given `path` and using the given [`FullNodeStore`] pub fn open( path: &Path, fullnode_store: Arc, ) -> Result, StorageError> { let pending_tables: ValidatorPendingTables = ValidatorPendingTables::open(path); let store = Arc::new(Self { pending_tables, fullnode_store, }); store.pending_tables.pending_pool.rocksdb.compact_range_cf( &store.pending_tables.pending_pool.cf()?, None::<&[u8]>, None::<&[u8]>, ); store .pending_tables .precedence_pool .rocksdb .compact_range_cf( &store.pending_tables.precedence_pool.cf()?, None::<&[u8]>, None::<&[u8]>, ); let pending_count: i64 = store.pending_pool_size()?.try_into().map_err(|error| { error!("Failed to convert estimate-num-keys to i64: {}", error); StorageError::InternalStorage(InternalStorageError::UnexpectedDBState( "Failed to convert estimate-num-keys to i64", )) })?; let precedence_count: i64 = store.precedence_pool_size()?.try_into().map_err(|error| { error!("Failed to convert estimate-num-keys to i64: {}", error); StorageError::InternalStorage(InternalStorageError::UnexpectedDBState( "Failed to convert estimate-num-keys to i64", )) })?; STORAGE_PENDING_POOL_COUNT.set(pending_count); STORAGE_PRECEDENCE_POOL_COUNT.set(precedence_count); Ok(store) } /// Returns the [`FullNodeStore`] used by the [`ValidatorStore`] pub fn fullnode_store(&self) -> Arc { self.fullnode_store.clone() } /// Returns the number of certificates in the pending pool pub fn pending_pool_size(&self) -> Result { Ok(self .pending_tables .pending_pool .property_int_value(ESTIMATE_NUM_KEYS)?) } /// Returns the number of certificates in the precedence pool pub fn precedence_pool_size(&self) -> Result { Ok(self .pending_tables .precedence_pool .property_int_value(ESTIMATE_NUM_KEYS)?) } /// Try to return the [`PendingCertificateId`] for a [`CertificateId`] /// /// Return `Ok(None)` if the `certificate_id` is not found. pub fn get_pending_id( &self, certificate_id: &CertificateId, ) -> Result, StorageError> { Ok(self.pending_tables.pending_pool_index.get(certificate_id)?) } /// Try to return the [`Certificate`] for a [`PendingCertificateId`] /// /// Return `Ok(None)` if the `pending_id` is not found. pub fn get_pending_certificate( &self, pending_id: &PendingCertificateId, ) -> Result, StorageError> { Ok(self.pending_tables.pending_pool.get(pending_id)?) } /// Returns an iterator over the pending pool /// /// Note: this can be slow on large datasets. #[doc(hidden)] pub fn iter_pending_pool( &self, ) -> Result + '_, StorageError> { Ok(self.pending_tables.pending_pool.iter()?) } /// Returns an iterator over the pending pool starting at a given `PendingCertificateId` /// /// Note: this can be slow on large datasets. #[doc(hidden)] pub fn iter_pending_pool_at( &self, pending_id: &PendingCertificateId, ) -> Result + '_, StorageError> { Ok(self.pending_tables.pending_pool.iter_at(pending_id)?) } /// Returns an iterator over the precedence pool /// /// Note: this can be slow on large datasets. #[doc(hidden)] pub fn iter_precedence_pool( &self, ) -> Result + '_, StorageError> { Ok(self.pending_tables.precedence_pool.iter()?) } pub fn get_next_pending_certificates( &self, from: &PendingCertificateId, number: usize, ) -> Result, StorageError> { debug!( "Get next pending certificates from {} (max: {})", from, number ); Ok(self .pending_tables .pending_pool .iter_at(from)? .take(number) .collect()) } /// Returns the [Certificate] (if any) that is currently in the precedence pool for the given [CertificateId] pub fn check_precedence( &self, certificate_id: &CertificateId, ) -> Result, StorageError> { Ok(self.pending_tables.precedence_pool.get(certificate_id)?) } // TODO: Performance issue on this one as we iter over all the pending certificates // We need to improve how we request the pending certificates. pub fn get_pending_certificates_for_subnets( &self, subnets: &[SubnetId], ) -> Result)>, StorageError> { let mut result: HashMap)> = subnets .iter() .enumerate() .map(|(_, s)| (*s, (0, None))) .collect(); for (_, certificate) in self.pending_tables.pending_pool.iter()? { if !subnets.contains(&certificate.source_subnet_id) { continue; } let mut latest_cert = certificate; let entry = result .entry(latest_cert.source_subnet_id) .or_insert((0, None)); entry.0 += 1; while let Some(certificate) = self.pending_tables.precedence_pool.get(&latest_cert.id)? { latest_cert = certificate; entry.0 += 1; } entry.1 = Some(latest_cert); } Ok(result) } #[cfg(test)] pub(crate) fn insert_pending_certificates( &self, certificates: &[Certificate], ) -> Result, StorageError> { let id = self .pending_tables .next_pending_id .fetch_add(certificates.len() as u64, Ordering::Relaxed); let mut batch = self.pending_tables.pending_pool.batch(); let (values, index, ids) = certificates.iter().enumerate().fold( (Vec::new(), Vec::new(), Vec::new()), |(mut values, mut index, mut ids), (idx, certificate)| { let id = id + idx as u64; index.push((certificate.id, id)); values.push((id, certificate)); ids.push(id); (values, index, ids) }, ); batch = batch.insert_batch(&self.pending_tables.pending_pool, values)?; batch = batch.insert_batch(&self.pending_tables.pending_pool_index, index)?; batch.write()?; STORAGE_PENDING_POOL_COUNT.add(ids.len() as i64); Ok(ids) } pub async fn insert_pending_certificate( &self, certificate: &Certificate, ) -> Result, StorageError> { // A lock guard is taken during the insertion of a pending certificate (C1) // to avoid race condition when this certificate C1 is delivered by the network // and in the process of being inserted into the precedence tables. let _certificate_guard = self .fullnode_store .certificate_lock_guard(certificate.id) .await; if self.get_certificate(&certificate.id)?.is_some() { debug!("Certificate {} is already delivered", certificate.id); return Err(StorageError::InternalStorage( InternalStorageError::CertificateAlreadyExists, )); } if self .pending_tables .pending_pool_index .get(&certificate.id)? .is_some() { debug!( "Certificate {} is already in the pending pool", certificate.id ); return Err(StorageError::InternalStorage( InternalStorageError::CertificateAlreadyPending, )); } // A lock guard is taken during the insertion of a pending certificate // to avoid race condition when a certificate is being added to the // pending pool while its parent is currently being inserted as delivered let _prev_certificate_guard = self .fullnode_store .certificate_lock_guard(certificate.prev_id) .await; let prev_delivered = certificate.prev_id == INITIAL_CERTIFICATE_ID || self.get_certificate(&certificate.prev_id)?.is_some(); if prev_delivered { let id = self .pending_tables .next_pending_id .fetch_add(1, Ordering::Relaxed); self.pending_tables.pending_pool.insert(&id, certificate)?; self.pending_tables .pending_pool_index .insert(&certificate.id, &id)?; STORAGE_PENDING_POOL_COUNT.inc(); debug!( "Certificate {} is now in the pending pool at index: {}", certificate.id, id ); Ok(Some(id)) } else { self.pending_tables .precedence_pool .insert(&certificate.prev_id, certificate)?; STORAGE_PRECEDENCE_POOL_COUNT.inc(); debug!( "Certificate {} is now in the precedence pool, because the previous certificate \ {} isn't delivered yet", certificate.id, certificate.prev_id ); Ok(None) } } #[instrument(skip(self, proofs))] pub fn insert_unverified_proofs( &self, proofs: Vec, ) -> Result, StorageError> { let certs: Vec = proofs.iter().map(|proof| proof.certificate_id).collect(); let unverified: Vec<(CertificateId, ProofOfDelivery)> = proofs .into_iter() .map(|proof| { debug!( "Certificate Sync: unverified proof for {} inserted", proof.certificate_id ); (proof.certificate_id, proof) }) .collect(); self.fullnode_store .perpetual_tables .unverified .multi_insert(unverified)?; Ok(certs) } #[instrument(skip(self, certificate))] pub async fn synchronize_certificate( &self, certificate: Certificate, ) -> Result<(), StorageError> { if let Ok(Some(proof_of_delivery)) = self.get_unverified_proof(&certificate.id) { let certificate_id = certificate.id; debug!( "Certificate Sync: certificate {} is now defined as delivered", certificate_id ); self.insert_certificate_delivered(&CertificateDelivered { certificate, proof_of_delivery, }) .await?; debug!( "Certificate Sync: unverified proof has been removed for {}", certificate_id ); self.fullnode_store .perpetual_tables .unverified .delete(&certificate_id)?; Ok(()) } else { debug!("Certificate Sync: Proof not found for {}", certificate.id); Err(StorageError::InternalStorage( crate::errors::InternalStorageError::InvalidQueryArgument("Proof not found"), )) } } pub fn get_unverified_proof( &self, certificate_id: &CertificateId, ) -> Result, StorageError> { Ok(self .fullnode_store .perpetual_tables .unverified .get(certificate_id)?) } /// Returns the difference between the `from` list of [ProofOfDelivery] and the local head /// checkpoint. This is used to define the list of certificates that are missing between the /// `from` and the local head checkpoint. /// The maximum number of [ProofOfDelivery] returned per [SubnetId] is 100. /// If the `from` is missing a local subnet, the list of [ProofOfDelivery] for this subnet will /// start from [Position] `0`. pub fn get_checkpoint_diff( &self, from: &[ProofOfDelivery], limit_per_subnet: usize, ) -> Result>, StorageError> { // Parse the from in order to extract the different position per subnets let from_positions: HashMap = from .iter() .map(|v| (v.delivery_position.subnet_id, v)) .collect(); let mut output: HashMap> = HashMap::new(); // Request the local head checkpoint let subnets: HashMap = self .fullnode_store .index_tables .source_list .iter()? .map(|(subnet_id, (_, position))| (subnet_id, position)) .collect(); // For every local known subnets we want to iterate and check if there // is a delta between the from_position and our head position. for (subnet, local_position) in subnets { let certs: Vec<_> = if let Some(position) = from_positions.get(&subnet) { if local_position <= position.delivery_position.position { continue; } self.fullnode_store .perpetual_tables .streams .prefix_iter(&(&subnet, &position.delivery_position.position))? .skip(1) .take(limit_per_subnet) .map(|(_, v)| v) .collect() } else { self.fullnode_store .perpetual_tables .streams .prefix_iter(&(&subnet, Position::ZERO))? .take(limit_per_subnet) .map(|(_, v)| v) .collect() }; let proofs: Vec<_> = self .fullnode_store .get_certificates(&certs)? .into_iter() .filter_map(|v| v.map(|c| c.proof_of_delivery)) .collect(); info!( "Certificate Sync: distance between from and head for {} subnet is {}", subnet, proofs.len() ); if let Some(old_value) = output.insert(subnet, proofs) { error!( "Certificate Sync: This should not happen, we are overwriting a value during \ sync of {subnet}. Overwriting {}", old_value.len() ); } } Ok(output) } #[cfg(test)] pub(crate) fn delete_pending_certificate( &self, pending_id: &PendingCertificateId, ) -> Result { if let Some(certificate) = self.pending_tables.pending_pool.get(pending_id)? { self.pending_tables.pending_pool.delete(pending_id)?; self.pending_tables .pending_pool_index .delete(&certificate.id)?; STORAGE_PENDING_POOL_COUNT.dec(); Ok(certificate) } else { Err(StorageError::InternalStorage( crate::errors::InternalStorageError::InvalidQueryArgument( "No certificate for pending_id", ), )) } } } impl ReadStore for ValidatorStore { fn count_certificates_delivered(&self) -> Result { self.fullnode_store.count_certificates_delivered() } fn get_source_head(&self, subnet_id: &SubnetId) -> Result, StorageError> { self.fullnode_store.get_source_head(subnet_id) } fn get_certificate( &self, certificate_id: &CertificateId, ) -> Result, StorageError> { self.fullnode_store.get_certificate(certificate_id) } fn get_certificates( &self, certificate_ids: &[CertificateId], ) -> Result>, StorageError> { self.fullnode_store.get_certificates(certificate_ids) } fn last_delivered_position_for_subnet( &self, subnet_id: &SubnetId, ) -> Result, StorageError> { Ok(self .fullnode_store .index_tables .source_list .get(subnet_id)? .map(|(_, position)| CertificateSourceStreamPosition { subnet_id: *subnet_id, position, })) } fn get_checkpoint(&self) -> Result, StorageError> { self.fullnode_store.get_checkpoint() } fn get_source_stream_certificates_from_position( &self, from: CertificateSourceStreamPosition, limit: usize, ) -> Result, StorageError> { self.fullnode_store .get_source_stream_certificates_from_position(from, limit) } fn get_target_stream_certificates_from_position( &self, position: CertificateTargetStreamPosition, limit: usize, ) -> Result, StorageError> { self.fullnode_store .get_target_stream_certificates_from_position(position, limit) } fn get_target_source_subnet_list( &self, target_subnet_id: &SubnetId, ) -> Result, StorageError> { self.fullnode_store .get_target_source_subnet_list(target_subnet_id) } } #[async_trait] impl WriteStore for ValidatorStore { async fn insert_certificate_delivered( &self, certificate: &CertificateDelivered, ) -> Result { let position = self .fullnode_store .insert_certificate_delivered(certificate) .await?; if let Ok(Some(pending_id)) = self .pending_tables .pending_pool_index .get(&certificate.certificate.id) { _ = self.pending_tables.pending_pool.delete(&pending_id); _ = self .pending_tables .pending_pool_index .delete(&certificate.certificate.id); STORAGE_PENDING_POOL_COUNT.dec(); } if let Ok(Some(next_certificate)) = self .pending_tables .precedence_pool .get(&certificate.certificate.id) { debug!( "Delivered certificate {} unlocks {} for broadcast", certificate.certificate.id, next_certificate.id ); self.insert_pending_certificate(&next_certificate).await?; self.pending_tables .precedence_pool .delete(&certificate.certificate.id)?; STORAGE_PRECEDENCE_POOL_COUNT.dec(); STORAGE_PENDING_POOL_COUNT.inc(); } Ok(position) } async fn insert_certificates_delivered( &self, certificates: &[CertificateDelivered], ) -> Result<(), StorageError> { self.fullnode_store .insert_certificates_delivered(certificates) .await } } ================================================ FILE: crates/topos-tce-storage/src/validator/tables.rs ================================================ use std::{ fs::create_dir_all, path::Path, sync::atomic::{AtomicU64, Ordering}, }; use bincode::Options; use rocksdb::ColumnFamilyDescriptor; use topos_core::{ types::ProofOfDelivery, uci::{Certificate, CertificateId}, }; use tracing::warn; use crate::{ constant::cfs, rocks::{ constants, db::{default_options, init_with_cfs}, db_column::DBColumn, }, types::{CertificatesColumn, EpochId, EpochSummary, PendingCertificatesColumn, StreamsColumn}, PendingCertificateId, }; /// Pending data used by Validator /// /// It contains data that is not yet delivered. /// /// When a [`Certificate`] is received, it can either be added to the pending /// pool or to the precedence pool. /// /// Prior to be inserted in either of the pending or precedence pools, a [`Certificate`] /// needs to be validated. A validated certificate means that the proof of the certificate /// has be verified using FROST. /// /// ## Pending pool /// /// The pending pool stores certificates that are ready to be broadcast. /// A [`Certificate`] is ready to be broadcast when it has been validated and its previous [`Certificate`] is /// already delivered. /// /// The ordering inside the pending pool is a FIFO queue, each [`Certificate`] in the pool gets /// assigned to a unique [`PendingCertificateId`](type@crate::PendingCertificateId). /// /// ## Precedence pool /// /// The precedence pool stores certificates that are not yet ready to be broadcast. /// Typically waiting for its previous [`Certificate`] to be delivered. /// However, the [`Certificate`] is already validated. /// /// When a [`Certificate`] is delivered, the [`ValidatorStore`](struct@super::ValidatorStore) will /// check for any child [`Certificate`] in the precedence pool waiting to be promoted to the /// pending pool in order to be broadcast. /// pub struct ValidatorPendingTables { pub(crate) next_pending_id: AtomicU64, pub(crate) pending_pool: PendingCertificatesColumn, pub(crate) pending_pool_index: DBColumn, pub(crate) precedence_pool: DBColumn, } impl ValidatorPendingTables { /// Open the [`ValidatorPendingTables`] at the given path. pub fn open(path: &Path) -> Self { let path = path.join("pending"); if !path.exists() { warn!("Path {:?} does not exist, creating it", path); create_dir_all(&path).expect("Cannot create ValidatorPendingTables directory"); } let cfs = vec![ ColumnFamilyDescriptor::new(cfs::PENDING_POOL, default_options()), ColumnFamilyDescriptor::new(cfs::PENDING_POOL_INDEX, default_options()), ColumnFamilyDescriptor::new(cfs::PRECEDENCE_POOL, default_options()), ]; let db = init_with_cfs(&path, default_options(), cfs) .unwrap_or_else(|_| panic!("Cannot open DB at {:?}", path)); let pending_pool = DBColumn::reopen(&db, cfs::PENDING_POOL); let next_pending_id = { let cf = pending_pool .rocksdb .cf_handle(cfs::PENDING_POOL) .expect("Cannot get cf handle for pending pool"); let mut pending_iterator = pending_pool.rocksdb.raw_iterator_cf(&cf); pending_iterator.seek_to_last(); if pending_iterator.valid() { AtomicU64::new( pending_iterator .key() .map(|key| { bincode::DefaultOptions::new() .with_big_endian() .with_fixint_encoding() .deserialize(key) .unwrap_or(0) }) .unwrap_or(0), ) } else { AtomicU64::new(0) } }; next_pending_id.fetch_add(1, Ordering::Relaxed); Self { next_pending_id, pending_pool, pending_pool_index: DBColumn::reopen(&db, cfs::PENDING_POOL_INDEX), precedence_pool: DBColumn::reopen(&db, cfs::PRECEDENCE_POOL), } } } /// Data that shouldn't be purged at all. // TODO: TP-774: Rename and move to FullNode domain pub struct ValidatorPerpetualTables { pub(crate) certificates: CertificatesColumn, pub(crate) streams: StreamsColumn, #[allow(unused)] epoch_chain: DBColumn, pub(crate) unverified: DBColumn, } impl ValidatorPerpetualTables { pub fn open(path: &Path) -> Self { let path = path.join("perpetual"); if !path.exists() { warn!("Path {:?} does not exist, creating it", path); create_dir_all(&path).expect("Cannot create ValidatorPerpetualTables directory"); } let mut options_stream = default_options(); options_stream.set_prefix_extractor(rocksdb::SliceTransform::create_fixed_prefix( constants::SOURCE_STREAMS_PREFIX_SIZE, )); let cfs = vec![ ColumnFamilyDescriptor::new(cfs::CERTIFICATES, default_options()), ColumnFamilyDescriptor::new(cfs::STREAMS, options_stream), ColumnFamilyDescriptor::new(cfs::EPOCH_CHAIN, default_options()), ColumnFamilyDescriptor::new(cfs::UNVERIFIED, default_options()), ]; let db = init_with_cfs(&path, default_options(), cfs).unwrap_or_else(|e| { panic!("Cannot open DB at {:?} => error {:?}", path, e); }); Self { certificates: DBColumn::reopen(&db, cfs::CERTIFICATES), streams: DBColumn::reopen(&db, cfs::STREAMS), epoch_chain: DBColumn::reopen(&db, cfs::EPOCH_CHAIN), unverified: DBColumn::reopen(&db, cfs::UNVERIFIED), } } } ================================================ FILE: crates/topos-tce-synchronizer/Cargo.toml ================================================ [package] name = "topos-tce-synchronizer" version = "0.1.0" edition = "2021" [lints] workspace = true [dependencies] async-trait.workspace = true futures.workspace = true thiserror.workspace = true tokio = { workspace = true, features = ["full"] } tokio-stream.workspace = true tokio-util.workspace = true tonic.workspace = true tracing-subscriber = { workspace = true, features = ["env-filter", "json", "ansi", "fmt"] } tracing.workspace = true uuid = { workspace = true, features = ["v4", "serde"] } topos-core = { workspace = true, features = ["api"] } topos-config = { path = "../topos-config/" } topos-p2p = { path = "../topos-p2p" } topos-tce-gatekeeper = { path = "../topos-tce-gatekeeper/" } topos-tce-storage = { path = "../topos-tce-storage/" } [dev-dependencies] libp2p.workspace = true mockall = "0.11" async-trait.workspace = true topos-test-sdk = { path = "../topos-test-sdk/" } rstest.workspace = true test-log.workspace = true env_logger.workspace = true ================================================ FILE: crates/topos-tce-synchronizer/src/builder.rs ================================================ use std::{future::IntoFuture, sync::Arc}; use tokio::{spawn, sync::mpsc}; use tokio_stream::wrappers::ReceiverStream; use tokio_util::sync::CancellationToken; use topos_p2p::NetworkClient; use topos_tce_storage::validator::ValidatorStore; use tracing::Instrument; use crate::{ checkpoints_collector::{CheckpointSynchronizer, CheckpointsCollectorError}, Synchronizer, SynchronizerError, SynchronizerEvent, }; use topos_config::tce::synchronization::SynchronizationConfig; pub struct SynchronizerBuilder { network_client: Option, store: Option>, config: SynchronizationConfig, /// Size of the channel producing events (default: 100) event_channel_size: usize, /// CancellationToken used to trigger shutdown of the Synchronizer shutdown: Option, } impl Default for SynchronizerBuilder { fn default() -> Self { Self { network_client: None, store: None, config: SynchronizationConfig::default(), event_channel_size: 100, shutdown: None, } } } impl SynchronizerBuilder { pub fn build( mut self, ) -> Result<(Synchronizer, ReceiverStream), SynchronizerError> { let shutdown = if let Some(shutdown) = self.shutdown.take() { shutdown } else { return Err(SynchronizerError::CheckpointsCollectorError( CheckpointsCollectorError::NoStore, ))?; }; let (events, events_recv) = mpsc::channel(self.event_channel_size); let (sync_events, checkpoints_collector_stream) = mpsc::channel(self.event_channel_size); let checkpoints_collector_stream = ReceiverStream::new(checkpoints_collector_stream); spawn( CheckpointSynchronizer { config: self.config, network: if let Some(network) = self.network_client { network } else { return Err(SynchronizerError::CheckpointsCollectorError( CheckpointsCollectorError::NoNetworkClient, ))?; }, store: if let Some(store) = self.store { store } else { return Err(SynchronizerError::CheckpointsCollectorError( CheckpointsCollectorError::NoStore, ))?; }, current_request_id: None, shutdown: shutdown.child_token(), events: sync_events, } .into_future() .in_current_span(), ); Ok(( Synchronizer { shutdown, events, checkpoints_collector_stream, }, ReceiverStream::new(events_recv), )) } } impl SynchronizerBuilder { pub fn with_store(mut self, store: Arc) -> Self { self.store = Some(store); self } pub fn with_network_client(mut self, network_client: NetworkClient) -> Self { self.network_client = Some(network_client); self } pub fn with_config(mut self, config: SynchronizationConfig) -> Self { self.config = config; self } pub fn with_shutdown(mut self, shutdown: CancellationToken) -> Self { self.shutdown = Some(shutdown); self } } ================================================ FILE: crates/topos-tce-synchronizer/src/checkpoints_collector/error.rs ================================================ use thiserror::Error; use tokio::sync::oneshot::error::RecvError; #[derive(Error, Debug)] pub enum CheckpointsCollectorError { #[error("Unable to start the CheckpointsCollector")] UnableToStart, #[error("Unable to start the CheckpointsCollector: No gatekeeper client provided")] NoGatekeeperClient, #[error("Unable to start the CheckpointsCollector: No network client provided")] NoNetworkClient, #[error("Error while dealing with Start command: already starting")] AlreadyStarting, #[error("Error while trying to fetch random peers")] UnableToFetchRandomPeer, #[error(transparent)] OneshotCommunicationChannel(#[from] RecvError), #[error("Unable to start the CheckpointsCollector: No store provided")] NoStore, } ================================================ FILE: crates/topos-tce-synchronizer/src/checkpoints_collector/mod.rs ================================================ use std::{ collections::{HashMap, HashSet}, future::IntoFuture, str::FromStr, sync::Arc, }; use futures::{future::BoxFuture, FutureExt}; use tokio::sync::mpsc; use tokio_util::sync::CancellationToken; use tonic::Status; use topos_core::{ api::grpc::{ self, shared::v1::Uuid as APIUuid, tce::v1::{ synchronizer_service_client::SynchronizerServiceClient, synchronizer_service_server::SynchronizerServiceServer, CheckpointRequest, CheckpointResponse, FetchCertificatesRequest, }, }, errors::GrpcParsingError, types::ProofOfDelivery, uci::{Certificate, CertificateId, SubnetId}, }; use topos_config::tce::synchronization::SynchronizationConfig; use topos_p2p::{error::P2PError, NetworkClient, PeerId}; use topos_tce_storage::{errors::StorageError, store::ReadStore, validator::ValidatorStore}; use tracing::{debug, error, info, warn}; use uuid::Uuid; mod error; #[cfg(test)] mod tests; pub use error::CheckpointsCollectorError; use crate::SynchronizerService; pub struct CheckpointSynchronizer { pub(crate) config: SynchronizationConfig, pub(crate) network: NetworkClient, #[allow(unused)] pub(crate) store: Arc, pub(crate) current_request_id: Option, pub(crate) shutdown: CancellationToken, #[allow(dead_code)] pub(crate) events: mpsc::Sender, } impl IntoFuture for CheckpointSynchronizer { type Output = Result<(), CheckpointsCollectorError>; type IntoFuture = BoxFuture<'static, Self::Output>; fn into_future(mut self) -> Self::IntoFuture { async move { let mut interval = tokio::time::interval(tokio::time::Duration::from_secs( self.config.interval_seconds, )); loop { tokio::select! { _tick = interval.tick() => { // On every tick, checking if there is a pending synchronization // If there is, skip // If there is not, // 1. Ask a random peer for the diff between local and its latest checkpoint // 2. Validate the PoD diff, if fail, go back to 1 // 3. Based on the diff, check if we already have some of the certs // - Fetch every missing certs from one peer // - Each certs triggers a precedence check if self.current_request_id.is_none() { if let Err(error) = self.initiate_request().await { warn!("Unsuccessful sync due to: {}", error); } } } _ = self.shutdown.cancelled() => { break; } } } Ok(()) } .boxed() } } #[derive(Debug, thiserror::Error)] enum SyncError { #[error("Unable to fetch target peer from network layer")] UnableToFetchTargetPeer, #[error("Unable to parse subnet id")] // TODO: Check if needed after full merge of grpc over p2p #[allow(unused)] UnableToParseSubnetId, #[error(transparent)] GrpcParsingError(#[from] GrpcParsingError), #[error(transparent)] CertificateConversion(#[from] topos_core::api::grpc::shared::v1_conversions_certificate::Error), #[error(transparent)] SubnetConversion(#[from] topos_core::api::grpc::shared::v1_conversions_subnet::Error), #[error(transparent)] Store(#[from] StorageError), #[error(transparent)] Network(#[from] P2PError), #[error(transparent)] Grpc(#[from] Status), } impl CheckpointSynchronizer { async fn ask_for_checkpoint( &self, peer: PeerId, ) -> Result>, SyncError> { let request_id = Uuid::new_v4(); let checkpoint: Vec = { let certificate_ids = self .store .get_checkpoint()? .values() .map(|head| head.certificate_id) .collect::>(); self.store .get_certificates(&certificate_ids[..])? .into_iter() .filter_map(|value| { value.map(|delivered_certificate| delivered_certificate.proof_of_delivery) }) .map(Into::into) .collect() }; debug!( "Asking {} for latest checkpoint (request_id: {}), with local checkpoint: {:?}", peer, request_id, checkpoint ); let req = CheckpointRequest { request_id: Some(request_id.into()), checkpoint, limit_per_subnet: self .config .limit_per_subnet .try_into() .unwrap_or(SynchronizationConfig::LIMIT_PER_SUBNET as u64), }; let mut client: SynchronizerServiceClient<_> = self .network .new_grpc_client::, SynchronizerServiceServer>(peer) .await?; let response: CheckpointResponse = client.fetch_checkpoint(req).await?.into_inner(); let diff = response .checkpoint_diff .into_iter() .map(|v| { let subnet = SubnetId::from_str(&v.key[..]).map_err(|_| SyncError::UnableToParseSubnetId)?; let proofs = v .value .into_iter() .map(TryInto::try_into) .collect::, _>>()?; Ok::<_, SyncError>((subnet, proofs)) }) .collect::, _>>()?; Ok(diff) } fn insert_unverified_proofs( &self, diff: HashMap>, ) -> Result>, SyncError> { let mut certs: HashSet = HashSet::new(); for (subnet, proofs) in diff { let len = proofs.len(); let unverified_certs = self.store.insert_unverified_proofs(proofs)?; debug!( "Persist {} unverified proof of delivery for {}", len, subnet ); certs.extend(&unverified_certs[..]); } // Chunk certs let mut chunked_certs: Vec> = vec![]; let certs = certs.into_iter().collect::>(); for certs in certs.chunks(10) { chunked_certs.push(certs.to_vec()); } Ok(chunked_certs) } async fn fetch_certificates( &self, certificate_ids: Vec, ) -> Result, SyncError> { let target_peer = self .network .random_known_peer() .await .map_err(|_| SyncError::UnableToFetchTargetPeer)?; let request_id: Option = Some(Uuid::new_v4().into()); let req = FetchCertificatesRequest { request_id, certificates: certificate_ids .iter() .map(|cert| (*cert.as_array()).into()) .collect(), }; debug!( "Ask {} for certificates payload: {:?}", target_peer, certificate_ids ); let mut client: SynchronizerServiceClient<_> = self .network .new_grpc_client::, SynchronizerServiceServer>(target_peer) .await?; let response = client.fetch_certificates(req).await?.into_inner(); let certificates: Result, _> = response .certificates .into_iter() .map(TryInto::try_into) .collect(); Ok(certificates?) } async fn initiate_request(&mut self) -> Result<(), SyncError> { // 1. Ask a random peer for the diff between local and its latest checkpoint let target_peer = self .network .random_known_peer() .await .map_err(|_| SyncError::UnableToFetchTargetPeer)?; let diff = self.ask_for_checkpoint(target_peer).await?; let certificates_to_catchup = self.insert_unverified_proofs(diff)?; info!("Certificates to catchup: {}", certificates_to_catchup.len()); for certificates in certificates_to_catchup { let certificates = self.fetch_certificates(certificates).await?; // TODO: verify every certificates for certificate in certificates { let store = self.store.clone(); tokio::spawn(async move { // Validate // Check precedence let certificate_id = certificate.id; match store.synchronize_certificate(certificate).await { Ok(_) => debug!("Certificate {} synchronized", certificate_id), Err(StorageError::InternalStorage(topos_tce_storage::errors::InternalStorageError::CertificateAlreadyExists)) => {} Err(e) => error!("Failed to sync because of: {:?}", e), } }); } } Ok(()) } } pub enum CheckpointsCollectorEvent {} ================================================ FILE: crates/topos-tce-synchronizer/src/checkpoints_collector/tests/integration.rs ================================================ use std::time::Duration; use rstest::rstest; use test_log::test; use topos_core::{ api::grpc::tce::v1::{ synchronizer_service_client::SynchronizerServiceClient, synchronizer_service_server::SynchronizerServiceServer, FetchCertificatesRequest, }, types::CertificateDelivered, }; use topos_test_sdk::{ certificates::create_certificate_chain, tce::{create_network, NodeConfig}, }; use uuid::Uuid; use crate::SynchronizerService; #[rstest] #[test(tokio::test)] #[timeout(Duration::from_secs(5))] async fn network_test() { let subnet = topos_test_sdk::constants::SOURCE_SUBNET_ID_1; let certificates: Vec = create_certificate_chain(subnet, &[topos_test_sdk::constants::TARGET_SUBNET_ID_1], 1); let boot_node = NodeConfig::from_seed(1); let cluster = create_network(5, &certificates[..]).await; let boot_node = cluster .get(&boot_node.keypair.public().to_peer_id()) .unwrap() .node_config .clone(); let cfg = NodeConfig { seed: 6, minimum_cluster_size: 1, ..Default::default() }; let (client, _, _) = cfg .bootstrap(&[cfg.clone(), boot_node.clone()], None) .await .unwrap(); use topos_core::api::grpc::shared::v1::Uuid as APIUuid; let peer = boot_node.keypair.public().to_peer_id(); let mut client: SynchronizerServiceClient<_> = client .new_grpc_client::, SynchronizerServiceServer>( peer, ) .await .unwrap(); let request_id: APIUuid = Uuid::new_v4().into(); let req = FetchCertificatesRequest { request_id: Some(request_id), certificates: certificates .clone() .into_iter() .map(|c| c.certificate.id.into()) .collect(), }; let res = client.fetch_certificates(req).await.unwrap().into_inner(); let expected = certificates .into_iter() .map(|c| c.certificate.into()) .collect::>(); assert_eq!(res.certificates, expected); } ================================================ FILE: crates/topos-tce-synchronizer/src/checkpoints_collector/tests.rs ================================================ use std::time::Duration; use rstest::rstest; use topos_core::{ api::grpc::tce::v1::{ synchronizer_service_client::SynchronizerServiceClient, synchronizer_service_server::SynchronizerServiceServer, CheckpointMapFieldEntry, CheckpointRequest, CheckpointResponse, FetchCertificatesRequest, }, types::CertificateDelivered, }; use topos_p2p::GrpcRouter; use topos_test_sdk::{ certificates::create_certificate_chain, storage::{create_fullnode_store, create_validator_store}, tce::{create_network, NodeConfig}, }; use uuid::Uuid; use crate::SynchronizerService; mod integration; #[test] fn encode() { use topos_core::api::grpc::shared::v1::Uuid as APIUuid; let request_id: APIUuid = Uuid::new_v4().into(); let req = CheckpointRequest { request_id: Some(request_id), checkpoint: vec![], limit_per_subnet: 100, }; let x: Vec = req.clone().into(); let y: CheckpointRequest = x.try_into().unwrap(); assert_eq!(y, req); let subnet = topos_test_sdk::constants::SOURCE_SUBNET_ID_1; let certificates: Vec = create_certificate_chain(subnet, &[topos_test_sdk::constants::TARGET_SUBNET_ID_1], 1); let cert = certificates.first().cloned().unwrap(); let request_id: APIUuid = Uuid::new_v4().into(); let req = CheckpointResponse { request_id: Some(request_id), checkpoint_diff: vec![CheckpointMapFieldEntry { key: subnet.to_string(), value: vec![cert.proof_of_delivery.into()], }], }; let x: Vec = req.clone().into(); let y: CheckpointResponse = x.try_into().unwrap(); assert_eq!(y, req); } #[rstest] #[test_log::test(tokio::test)] #[timeout(Duration::from_secs(10))] async fn check_fetch_certificates() { let subnet = topos_test_sdk::constants::SOURCE_SUBNET_ID_1; let certificates: Vec = create_certificate_chain(subnet, &[topos_test_sdk::constants::TARGET_SUBNET_ID_1], 1); let boot_node = NodeConfig::from_seed(1); let cluster = create_network(5, &certificates[..]).await; let boot_node = cluster .get(&boot_node.keypair.public().to_peer_id()) .unwrap() .node_config .clone(); let cfg = NodeConfig { seed: 6, minimum_cluster_size: 3, ..Default::default() }; let fullnode_store = create_fullnode_store(&[]).await; let validator_store = create_validator_store(&[], futures::future::ready(fullnode_store.clone())).await; let router = GrpcRouter::new(tonic::transport::Server::builder()).add_service( SynchronizerServiceServer::new(SynchronizerService { validator_store: validator_store.clone(), }), ); let (client, _, _) = cfg .bootstrap(&[cfg.clone(), boot_node.clone()], Some(router)) .await .unwrap(); use topos_core::api::grpc::shared::v1::Uuid as APIUuid; let request_id: APIUuid = Uuid::new_v4().into(); let req = FetchCertificatesRequest { request_id: Some(request_id), certificates: certificates .clone() .into_iter() .map(|c| c.certificate.id.into()) .collect(), }; let mut client: SynchronizerServiceClient<_> = client .new_grpc_client::, SynchronizerServiceServer>(boot_node.keypair.public().to_peer_id()) .await .unwrap(); let res = client.fetch_certificates(req).await; assert!(res.is_ok()); let res = res.unwrap().into_inner(); let expected = certificates .into_iter() .map(|c| c.certificate.into()) .collect::>(); assert_eq!(res.certificates, expected); } #[test] fn sync_unordered_certificates() {} #[test] fn sync_conflicting_certificate() {} #[test] fn fetch_certificate_failure() {} #[test] fn missing_certificate_for_pod() {} ================================================ FILE: crates/topos-tce-synchronizer/src/lib.rs ================================================ use std::{cmp::max, future::IntoFuture, sync::Arc}; use builder::SynchronizerBuilder; use checkpoints_collector::{CheckpointsCollectorError, CheckpointsCollectorEvent}; use futures::{future::BoxFuture, FutureExt}; use thiserror::Error; use tokio::sync::{ mpsc, oneshot::{self, error::RecvError}, }; use tokio_stream::StreamExt; mod builder; mod checkpoints_collector; use tokio_stream::wrappers::ReceiverStream; use tokio_util::sync::CancellationToken; use tonic::{Request, Response, Status}; use topos_config::tce::synchronization::SynchronizationConfig; use topos_core::{ api::grpc::{ shared::v1::positions::SourceStreamPosition, tce::v1::{ synchronizer_service_server::SynchronizerService as GrpcSynchronizerService, CheckpointMapFieldEntry, CheckpointRequest, CheckpointResponse, FetchCertificatesRequest, FetchCertificatesResponse, ProofOfDelivery, SignedReady, }, }, uci::CertificateId, }; use topos_tce_storage::{store::ReadStore, validator::ValidatorStore}; use tracing::{debug, error, info, trace, warn}; use uuid::Uuid; pub struct Synchronizer { pub(crate) shutdown: CancellationToken, #[allow(dead_code)] pub(crate) events: mpsc::Sender, pub(crate) checkpoints_collector_stream: ReceiverStream, } impl IntoFuture for Synchronizer { type Output = Result<(), SynchronizerError>; type IntoFuture = BoxFuture<'static, Self::Output>; fn into_future(mut self) -> Self::IntoFuture { async move { let shutdowned: Option = loop { tokio::select! { _ = self.shutdown.cancelled() => { break None } _checkpoint_event = self.checkpoints_collector_stream.next() => {} } }; if let Some(_error) = shutdowned { warn!("Shutting down Synchronizer due to error..."); } else { info!("Shutting down Synchronizer..."); } Ok(()) } .boxed() } } impl Synchronizer { pub fn builder() -> SynchronizerBuilder { SynchronizerBuilder::default() } } #[derive(Error, Debug)] pub enum SynchronizerError { #[error("Error while dealing with CheckpointsCollector: {0}")] CheckpointsCollectorError(#[from] CheckpointsCollectorError), #[error("Error while dealing with Start command: unable to start")] UnableToStart, #[error("Error while dealing with Start command: already starting")] AlreadyStarting, #[error("Error while dealing with state locking: unable to lock status")] UnableToLockStatus, #[error(transparent)] OneshotCommunicationChannel(#[from] RecvError), #[error("Unable to execute shutdown on the Synchronizer: {0}")] ShutdownCommunication(mpsc::error::SendError>), #[error("No network protocol receiver set")] NoProtocolReceiver, } pub enum SynchronizerEvent {} #[derive(Clone)] pub struct SynchronizerService { pub validator_store: Arc, } #[async_trait::async_trait] impl GrpcSynchronizerService for SynchronizerService { async fn fetch_certificates( &self, request: Request, ) -> Result, Status> { let request = request.into_inner(); let certificate_ids: Vec = request .certificates .into_iter() .map(|c| c.try_into()) .collect::, _>>() .map_err(|_| Status::invalid_argument("Unable to parse certificates"))?; let response = if let Ok(certs) = self.validator_store.get_certificates(&certificate_ids[..]) { let certs: Vec<_> = certs .into_iter() .filter_map(|v| v.map(|c| c.certificate.into())) .collect::>(); FetchCertificatesResponse { request_id: request.request_id, certificates: certs, } } else { FetchCertificatesResponse { request_id: request.request_id, certificates: vec![], } }; Ok(Response::new(response)) } async fn fetch_checkpoint( &self, request: Request, ) -> Result, Status> { let request = request.into_inner(); let id = request .request_id .map(|id| id.into()) .unwrap_or(Uuid::new_v4()); debug!("Received request for checkpoint (request_id: {})", id); let limit_per_subnet: usize = max( request .limit_per_subnet .try_into() .unwrap_or(SynchronizationConfig::LIMIT_PER_SUBNET), SynchronizationConfig::LIMIT_PER_SUBNET, ); let res: Result, _> = request .checkpoint .into_iter() .map(|v| v.try_into()) .collect(); let res = match res { Err(error) => { error!("Invalid checkpoint for request {}: {}", id, error); return Err(Status::invalid_argument("Invalid checkpoint")); } Ok(value) => value, }; debug!("Request {} contains {} proof_of_delivery", id, res.len()); trace!("Request {} contains {:?}", id, res); let diff = match self .validator_store .get_checkpoint_diff(&res, limit_per_subnet) { Ok(diff) => { debug!( "Fetched checkpoint diff from storage for request {}, got {:?}", id, diff ); diff.into_iter() .map(|(key, value)| { let v: Vec<_> = value .into_iter() .map(|v| ProofOfDelivery { delivery_position: Some(SourceStreamPosition { source_subnet_id: Some(v.delivery_position.subnet_id.into()), position: *v.delivery_position.position, certificate_id: Some(v.certificate_id.into()), }), readies: v .readies .into_iter() .map(|(ready, signature)| SignedReady { ready, signature }) .collect(), threshold: v.threshold, }) .collect(); CheckpointMapFieldEntry { key: key.to_string(), value: v, } }) .collect() } Err(error) => { error!( "Error while fetching checkpoint diff for request {}: {}", id, error ); Vec::new() } }; debug!( "Responding to request {} with checkpoint diff containing {:?}", id, diff.iter() .map(|v| (v.key.clone(), v.value.len())) .collect::>() ); let response = CheckpointResponse { request_id: request.request_id, checkpoint_diff: diff, }; Ok(Response::new(response)) } } ================================================ FILE: crates/topos-telemetry/Cargo.toml ================================================ [package] name = "topos-telemetry" version = "0.1.0" edition = "2021" [lints] workspace = true [dependencies] opentelemetry.workspace = true opentelemetry_sdk = { workspace = true, features = ["rt-tokio"] } tracing-opentelemetry.workspace = true tracing.workspace = true tonic.workspace = true tracing-subscriber = { optional = true, workspace = true, features = ["env-filter", "json", "ansi", "fmt"] } opentelemetry-otlp = { optional = true, workspace = true, features = ["grpc-tonic", "metrics", "tls-roots"] } serde = { workspace = true, features = ["derive", "std"] } [features] tracing = ["tracing-subscriber", "opentelemetry-otlp"] ================================================ FILE: crates/topos-telemetry/src/lib.rs ================================================ use std::{collections::HashMap, str::FromStr}; use ::tracing::warn; use opentelemetry::{ global, propagation::{Extractor, Injector}, Context, }; use serde::{Deserialize, Serialize}; use tonic::metadata::MetadataKey; #[cfg(feature = "tracing")] pub mod tracing; pub struct TonicMetaInjector<'a>(pub &'a mut tonic::metadata::MetadataMap); pub struct TonicMetaExtractor<'a>(pub &'a tonic::metadata::MetadataMap); impl<'a> TonicMetaExtractor<'a> { pub fn extract(&self) -> opentelemetry::Context { global::get_text_map_propagator(|propagator| propagator.extract(self)) } } impl<'a> TonicMetaInjector<'a> { pub fn inject(&mut self, context: &Context) { global::get_text_map_propagator(|propagator| { propagator.inject_context(context, self); }) } } impl<'a> Injector for TonicMetaInjector<'a> { /// Set a key and value in the MetadataMap. Does nothing if the key or value are not valid inputs fn set(&mut self, key: &str, value: String) { if let Ok(key) = MetadataKey::from_str(key) { if let Ok(val) = value.parse() { self.0.insert(key, val); } else { warn!("Invalid value: {}", value); } } else { warn!("Invalid key: {}", key); } } } impl<'a> Extractor for TonicMetaExtractor<'a> { fn get(&self, key: &str) -> Option<&str> { self.0.get(key).and_then(|v| v.to_str().ok()) } fn keys(&self) -> Vec<&str> { self.0 .keys() .map(|k| match k { tonic::metadata::KeyRef::Ascii(k) => k.as_str(), tonic::metadata::KeyRef::Binary(k) => k.as_str(), }) .collect() } } #[derive(Default, Debug, Clone, Serialize, Deserialize)] pub struct PropagationContext { context: HashMap, } impl PropagationContext { pub fn inject(context: &Context) -> Self { global::get_text_map_propagator(|propagator| { let mut propagation_context = PropagationContext::default(); propagator.inject_context(context, &mut propagation_context); propagation_context }) } pub fn extract(&self) -> opentelemetry::Context { global::get_text_map_propagator(|propagator| propagator.extract(self)) } } impl Injector for PropagationContext { fn set(&mut self, key: &str, value: String) { self.context.insert(key.to_string(), value); } } impl Extractor for PropagationContext { fn get(&self, key: &str) -> Option<&str> { self.context.get(key).map(|s| s.as_ref()) } fn keys(&self) -> Vec<&str> { self.context.keys().map(|k| k.as_ref()).collect() } } ================================================ FILE: crates/topos-telemetry/src/tracing.rs ================================================ use opentelemetry::trace::TracerProvider; use opentelemetry::{global, KeyValue}; use opentelemetry_otlp::{SpanExporterBuilder, WithExportConfig}; use opentelemetry_sdk::trace::{BatchConfigBuilder, BatchSpanProcessor, SpanLimits}; use opentelemetry_sdk::{propagation::TraceContextPropagator, trace::Sampler, Resource}; use std::time::Duration; use tracing::Level; use tracing_subscriber::util::TryInitError; use tracing_subscriber::{ prelude::__tracing_subscriber_SubscriberExt, util::SubscriberInitExt, EnvFilter, Layer, }; fn verbose_to_level(verbose: u8) -> Level { match verbose { 0 => Level::ERROR, 1 => Level::WARN, 2 => Level::INFO, 3 => Level::DEBUG, _ => Level::TRACE, } } fn build_resources(otlp_service_name: String, version: &'static str) -> Vec { let mut resources = Vec::new(); resources.push(KeyValue::new("service.name", otlp_service_name)); resources.push(KeyValue::new("service.version", version)); let custom_resources: Vec<_> = std::env::var("TOPOS_OTLP_TAGS") .unwrap_or_default() .split(',') // NOTE: limit to 10 tags to avoid exploit .take(10) .filter_map(|tag_raw| { let mut v = tag_raw.splitn(2, '='); match (v.next(), v.next()) { (Some(key), Some(value)) if !key.trim().is_empty() && !value.trim().is_empty() => { Some(KeyValue::new( key.trim().to_string(), value.trim().to_string(), )) } _ => None, } }) .collect(); resources.extend(custom_resources); resources } fn create_filter(verbose: u8) -> EnvFilter { if verbose > 0 { EnvFilter::try_new(format!("warn,topos={}", verbose_to_level(verbose).as_str())).unwrap() } else { EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("warn,topos=info")) } } // Setup tracing // If otlp agent and otlp service name are provided, opentelemetry collection will be used pub fn setup_tracing( verbose: u8, no_color: bool, otlp_agent: Option, otlp_service_name: Option, version: &'static str, ) -> Result<(), TryInitError> { let mut layers = Vec::new(); let ansi = !no_color; layers.push( match std::env::var("TOPOS_LOG_FORMAT") .map(|f| f.to_lowercase()) .as_ref() .map(|s| s.as_str()) { Ok("json") => tracing_subscriber::fmt::layer() .json() .with_ansi(ansi) .with_filter(create_filter(verbose)) .boxed(), Ok("pretty") => tracing_subscriber::fmt::layer() .pretty() .with_ansi(ansi) .with_filter(create_filter(verbose)) .boxed(), _ => tracing_subscriber::fmt::layer() .compact() .with_ansi(ansi) .with_filter(create_filter(verbose)) .boxed(), }, ); // Setup instrumentation if both otlp agent and otlp service name are provided as arguments if let (Some(otlp_agent), Some(otlp_service_name)) = (otlp_agent, otlp_service_name) { let resources = build_resources(otlp_service_name, version); let mut trace_config = opentelemetry_sdk::trace::config(); trace_config = trace_config.with_sampler(Sampler::AlwaysOn); trace_config = trace_config.with_max_events_per_span( match std::env::var("OTLP_MAX_EVENTS_PER_SPAN") { Ok(v) => v .parse::() .unwrap_or(SpanLimits::default().max_events_per_span), _ => SpanLimits::default().max_events_per_span, }, ); trace_config = trace_config.with_max_attributes_per_span( match std::env::var("OTLP_MAX_ATTRIBUTES_PER_SPAN") { Ok(v) => v .parse::() .unwrap_or(SpanLimits::default().max_attributes_per_span), _ => SpanLimits::default().max_attributes_per_span, }, ); trace_config = trace_config.with_max_links_per_span(match std::env::var("OTLP_MAX_LINK_PER_SPAN") { Ok(v) => v .parse::() .unwrap_or(SpanLimits::default().max_links_per_span), _ => SpanLimits::default().max_links_per_span, }); trace_config = trace_config.with_max_attributes_per_event( match std::env::var("OTLP_MAX_ATTRIBUTES_PER_EVENT") { Ok(v) => v .parse::() .unwrap_or(SpanLimits::default().max_attributes_per_event), _ => SpanLimits::default().max_attributes_per_event, }, ); trace_config = trace_config.with_max_attributes_per_link( match std::env::var("OTLP_MAX_ATTRIBUTES_PER_LINK") { Ok(v) => v .parse::() .unwrap_or(SpanLimits::default().max_attributes_per_link), _ => SpanLimits::default().max_attributes_per_link, }, ); trace_config = trace_config.with_resource(Resource::new(resources)); let exporter = opentelemetry_otlp::new_exporter() .tonic() .with_endpoint(otlp_agent); let batch_processor_config = BatchConfigBuilder::default() .with_scheduled_delay(match std::env::var("OTLP_BATCH_SCHEDULED_DELAY") { Ok(v) => Duration::from_millis(v.parse::().unwrap_or(5_000)), _ => Duration::from_millis(5_000), }) .with_max_queue_size(match std::env::var("OTLP_BATCH_MAX_QUEUE_SIZE") { Ok(v) => v.parse::().unwrap_or(2048), _ => 2048, }) .with_max_export_batch_size(match std::env::var("OTLP_BATCH_MAX_EXPORTER_BATCH_SIZE") { Ok(v) => v.parse::().unwrap_or(512), _ => 512, }) .with_max_export_timeout(match std::env::var("OTLP_BATCH_EXPORT_TIMEOUT") { Ok(v) => Duration::from_millis(v.parse::().unwrap_or(30_000)), _ => Duration::from_millis(30_000), }) .with_max_concurrent_exports( match std::env::var("OTLP_BATCH_MAX_CONCURRENT_EXPORTS") { Ok(v) => v.parse::().unwrap_or(1), _ => 1, }, ); let span_exporter: SpanExporterBuilder = exporter.into(); let mut provider_builder = opentelemetry_sdk::trace::TracerProvider::builder() .with_span_processor( BatchSpanProcessor::builder( span_exporter.build_span_exporter().unwrap(), opentelemetry_sdk::runtime::Tokio, ) .with_batch_config(batch_processor_config.build()) .build(), ); provider_builder = provider_builder.with_config(trace_config); let provider = provider_builder.build(); let tracer = provider.versioned_tracer( "opentelemetry-otlp", Some(env!("CARGO_PKG_VERSION")), None::<&str>, None, ); let _ = global::set_tracer_provider(provider); layers.push( tracing_opentelemetry::layer() .with_tracer(tracer) .with_filter(create_filter(verbose)) .boxed(), ); opentelemetry::global::set_text_map_propagator(TraceContextPropagator::new()); global::set_text_map_propagator(TraceContextPropagator::new()); } tracing_subscriber::registry().with(layers).try_init()?; Ok(()) } ================================================ FILE: crates/topos-test-sdk/Cargo.toml ================================================ [package] name = "topos-test-sdk" version = "0.1.0" edition = "2021" build = "build.rs" [lints] workspace = true [dependencies] topos-core = { workspace = true, features = ["uci", "api"] } topos-crypto = { path = "../topos-crypto/" } topos-config = { path = "../topos-config/" } topos-p2p = { path = "../topos-p2p/" } topos-tce = { path = "../topos-tce/" } topos-tce-api = { path = "../topos-tce-api/" } topos-tce-broadcast = { path = "../topos-tce-broadcast/" } topos-tce-gatekeeper = { path = "../topos-tce-gatekeeper/" } topos-tce-storage = { path = "../topos-tce-storage/" } topos-tce-synchronizer = { path = "../topos-tce-synchronizer/" } hex.workspace = true ethers.workspace = true async-trait.workspace = true futures.workspace = true lazy_static = { version = "1.4.0" } libp2p = { workspace = true, features = ["macros"] } proc_macro_sdk = { path = "./proc_macro_sdk/" } rand.workspace = true rstest.workspace = true tokio-stream.workspace = true prost.workspace = true tonic = { workspace = true, default-features = false, features = [ "prost", "codegen", "transport", ] } tower.workspace = true tokio-util.workspace = true tokio.workspace = true tracing.workspace = true async-stream.workspace = true [build-dependencies] tonic-build.workspace = true ================================================ FILE: crates/topos-test-sdk/build.rs ================================================ use std::{env, path::PathBuf, str::FromStr}; fn main() { let mut path = PathBuf::from_str( &env::var("CARGO_MANIFEST_DIR").expect("unable to build du to missing CARGO_MANIFEST_DIR"), ) .expect("Unable to build PathBuf for topos-test-sdk"); path.push("./../../target/tmp/"); let path = path.as_path(); println!( "cargo:rustc-env=TOPOS_TEST_SDK_TMP={}", path.to_str().unwrap() ); let path = PathBuf::from("./src/grpc/behaviour/"); tonic_build::configure() .out_dir(path) .compile( &[ "./proto/behaviour/helloworld.proto", "./proto/behaviour/noop.proto", ], &["proto/"], ) .unwrap(); } ================================================ FILE: crates/topos-test-sdk/proc_macro_sdk/Cargo.toml ================================================ [package] name = "proc_macro_sdk" version = "0.1.0" edition = "2021" [lints] workspace = true [lib] proc-macro = true [dependencies] syn = "1.0" quote = "1.0" ================================================ FILE: crates/topos-test-sdk/proc_macro_sdk/src/lib.rs ================================================ use proc_macro::TokenStream; use quote::format_ident; use quote::quote; use syn::parse_macro_input; use syn::Expr; use syn::ExprLit; use syn::ExprRange; use syn::Lit; #[proc_macro] pub fn generate_certificate_ids(input: TokenStream) -> TokenStream { let range: ExprRange = parse_macro_input!(input as ExprRange); let range = parse_range(range); let mut quotes = Vec::new(); for i in range { let certificate_name = format_ident!("CERTIFICATE_ID_{}", i); quotes.push(quote! { pub const #certificate_name: ::topos_core::uci::CertificateId = ::topos_core::uci::CertificateId::from_array([#i; ::topos_core::uci::CERTIFICATE_ID_LENGTH]); }); } TokenStream::from(quote! { #(#quotes)* }) } #[proc_macro] pub fn generate_source_subnet_ids(input: TokenStream) -> TokenStream { generate_subnet_ids("SOURCE", input) } #[proc_macro] pub fn generate_target_subnet_ids(input: TokenStream) -> TokenStream { generate_subnet_ids("TARGET", input) } fn generate_subnet_ids(subnet_type: &str, input: TokenStream) -> TokenStream { let range: ExprRange = parse_macro_input!(input as ExprRange); let range = parse_range(range); let mut quotes = Vec::new(); for (index, i) in range.enumerate() { let source_subnet_name = format_ident!("{}_SUBNET_ID_{}", subnet_type, index + 1); quotes.push(quote! { pub const #source_subnet_name: ::topos_core::uci::SubnetId = ::topos_core::uci::SubnetId::from_array([#i; ::topos_core::uci::SUBNET_ID_LENGTH]); }); } TokenStream::from(quote! { #(#quotes)* }) } fn parse_range(range: ExprRange) -> std::ops::Range { let from: u8 = if let Expr::Lit(ExprLit { lit: Lit::Int(value), .. }) = *range .from .expect("topos_test_sdk: generate cert/subnet, from input isn't valid") { value .base10_parse() .expect("topos_test_sdk: generate cert/subnet, unable to parse from int") } else { panic!("topos_test_sdk: generate cert/subnet, unable to parse from input"); }; let to: u8 = if let Expr::Lit(ExprLit { lit: Lit::Int(value), .. }) = *range .to .expect("topos_test_sdk: generate cert/subnet, to input isn't valid") { value .base10_parse() .expect("topos_test_sdk: generate cert/subnet, unable to parse to int") } else { panic!("topos_test_sdk: generate cert/subnet, unable to parse to input"); }; match range.limits { syn::RangeLimits::HalfOpen(_) => from..to, syn::RangeLimits::Closed(_) => from..(to + 1), } } ================================================ FILE: crates/topos-test-sdk/proto/behaviour/helloworld.proto ================================================ syntax = "proto3"; package helloworld; // The greeting service definition. service Greeter { // Sends a greeting rpc SayHello (HelloRequest) returns (HelloReply) {} // Send a greeting with a delay rpc SayHelloWithDelay(HelloWithDelayRequest) returns (HelloReply) {} } // The request message containing the user's name. message HelloRequest { string name = 1; } // The request message containing the user's name and the delay. message HelloWithDelayRequest { string name = 1; uint64 delay_in_seconds = 2; } // The response message containing the greetings message HelloReply { string message = 1; } ================================================ FILE: crates/topos-test-sdk/proto/behaviour/noop.proto ================================================ syntax = "proto3"; package noop; // The greeting service definition. service Noop { // Trigger nothing rpc do_nothing (NoopRequest) returns (NoopResponse) {} } message NoopRequest { } message NoopResponse { } ================================================ FILE: crates/topos-test-sdk/src/certificates/mod.rs ================================================ use rstest::*; use std::collections::HashMap; use topos_core::{ types::{ stream::CertificateSourceStreamPosition, stream::Position, CertificateDelivered, ProofOfDelivery, }, uci::{Certificate, CertificateId, SubnetId, INITIAL_CERTIFICATE_ID}, }; use crate::constants::PREV_CERTIFICATE_ID; use crate::constants::SOURCE_SUBNET_ID_1; use crate::constants::TARGET_SUBNET_ID_1; #[fixture] pub fn create_certificate( #[default(SOURCE_SUBNET_ID_1)] source_subnet: SubnetId, #[default(&[TARGET_SUBNET_ID_1])] target_subnets: &[SubnetId], #[default(None)] previous_certificate_id: Option, ) -> Certificate { Certificate::new_with_default_fields( previous_certificate_id.unwrap_or(INITIAL_CERTIFICATE_ID), source_subnet, target_subnets, ) .unwrap() } #[fixture] pub fn create_certificate_at_position( #[default(Position::ZERO)] position: Position, create_certificate: Certificate, ) -> CertificateDelivered { let certificate_id = create_certificate.id; let subnet_id = create_certificate.source_subnet_id; CertificateDelivered { certificate: create_certificate, proof_of_delivery: ProofOfDelivery { certificate_id, delivery_position: CertificateSourceStreamPosition { subnet_id, position, }, readies: vec![], threshold: 0, }, } } #[fixture] pub fn create_certificate_chain( #[default(SOURCE_SUBNET_ID_1)] source_subnet: topos_core::uci::SubnetId, #[default(&[TARGET_SUBNET_ID_1])] target_subnets: &[topos_core::uci::SubnetId], #[default(1)] number: usize, ) -> Vec { let mut certificates = Vec::new(); let mut parent = None; for i in 0..number { let cert = Certificate::new_with_default_fields( parent.take().unwrap_or(*PREV_CERTIFICATE_ID.as_array()), source_subnet, target_subnets, ) .unwrap(); parent = Some(*cert.id.as_array()); let id = cert.id; certificates.push(CertificateDelivered { certificate: cert, proof_of_delivery: ProofOfDelivery { certificate_id: id, delivery_position: CertificateSourceStreamPosition { subnet_id: source_subnet, position: i.try_into().unwrap(), }, readies: Vec::new(), threshold: 0, }, }); } certificates } /// Generate and assign nb_cert number of certificates to existing subnets /// Could be different number of certificates per subnet pub fn create_certificate_chains( subnets: &[SubnetId], number_of_certificates_per_subnet: usize, ) -> HashMap> { let mut result = HashMap::new(); subnets.iter().for_each(|subnet| { let targets = subnets .iter() .filter(|sub| *sub != subnet) .copied() .collect::>(); let certs = create_certificate_chain(*subnet, targets.as_ref(), number_of_certificates_per_subnet); result.entry(*subnet).or_insert(certs); }); result } ================================================ FILE: crates/topos-test-sdk/src/crypto.rs ================================================ use std::{str::FromStr, sync::Arc}; use rstest::fixture; use topos_crypto::messages::MessageSigner; #[fixture(key = "122f3ae6ade1fd136b292cea4f6243c7811160352c8821528547a1fe7c459daf")] pub fn message_signer(key: &str) -> Arc { Arc::new(MessageSigner::from_str(key).unwrap()) } ================================================ FILE: crates/topos-test-sdk/src/grpc/behaviour/helloworld.rs ================================================ /// The request message containing the user's name. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct HelloRequest { #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, } /// The request message containing the user's name and the delay. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct HelloWithDelayRequest { #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, #[prost(uint64, tag = "2")] pub delay_in_seconds: u64, } /// The response message containing the greetings #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct HelloReply { #[prost(string, tag = "1")] pub message: ::prost::alloc::string::String, } /// Generated client implementations. pub mod greeter_client { #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] use tonic::codegen::*; use tonic::codegen::http::Uri; /// The greeting service definition. #[derive(Debug, Clone)] pub struct GreeterClient { inner: tonic::client::Grpc, } impl GreeterClient { /// Attempt to create a new client by connecting to a given endpoint. pub async fn connect(dst: D) -> Result where D: TryInto, D::Error: Into, { let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; Ok(Self::new(conn)) } } impl GreeterClient where T: tonic::client::GrpcService, T::Error: Into, T::ResponseBody: Body + Send + 'static, ::Error: Into + Send, { pub fn new(inner: T) -> Self { let inner = tonic::client::Grpc::new(inner); Self { inner } } pub fn with_origin(inner: T, origin: Uri) -> Self { let inner = tonic::client::Grpc::with_origin(inner, origin); Self { inner } } pub fn with_interceptor( inner: T, interceptor: F, ) -> GreeterClient> where F: tonic::service::Interceptor, T::ResponseBody: Default, T: tonic::codegen::Service< http::Request, Response = http::Response< >::ResponseBody, >, >, , >>::Error: Into + Send + Sync, { GreeterClient::new(InterceptedService::new(inner, interceptor)) } /// Compress requests with the given encoding. /// /// This requires the server to support it otherwise it might respond with an /// error. #[must_use] pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { self.inner = self.inner.send_compressed(encoding); self } /// Enable decompressing responses. #[must_use] pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { self.inner = self.inner.accept_compressed(encoding); self } /// Limits the maximum size of a decoded message. /// /// Default: `4MB` #[must_use] pub fn max_decoding_message_size(mut self, limit: usize) -> Self { self.inner = self.inner.max_decoding_message_size(limit); self } /// Limits the maximum size of an encoded message. /// /// Default: `usize::MAX` #[must_use] pub fn max_encoding_message_size(mut self, limit: usize) -> Self { self.inner = self.inner.max_encoding_message_size(limit); self } /// Sends a greeting pub async fn say_hello( &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { self.inner .ready() .await .map_err(|e| { tonic::Status::new( tonic::Code::Unknown, format!("Service was not ready: {}", e.into()), ) })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/helloworld.Greeter/SayHello", ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("helloworld.Greeter", "SayHello")); self.inner.unary(req, path, codec).await } /// Send a greeting with a delay pub async fn say_hello_with_delay( &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { self.inner .ready() .await .map_err(|e| { tonic::Status::new( tonic::Code::Unknown, format!("Service was not ready: {}", e.into()), ) })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/helloworld.Greeter/SayHelloWithDelay", ); let mut req = request.into_request(); req.extensions_mut() .insert(GrpcMethod::new("helloworld.Greeter", "SayHelloWithDelay")); self.inner.unary(req, path, codec).await } } } /// Generated server implementations. pub mod greeter_server { #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] use tonic::codegen::*; /// Generated trait containing gRPC methods that should be implemented for use with GreeterServer. #[async_trait] pub trait Greeter: Send + Sync + 'static { /// Sends a greeting async fn say_hello( &self, request: tonic::Request, ) -> std::result::Result, tonic::Status>; /// Send a greeting with a delay async fn say_hello_with_delay( &self, request: tonic::Request, ) -> std::result::Result, tonic::Status>; } /// The greeting service definition. #[derive(Debug)] pub struct GreeterServer { inner: _Inner, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, max_decoding_message_size: Option, max_encoding_message_size: Option, } struct _Inner(Arc); impl GreeterServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } pub fn from_arc(inner: Arc) -> Self { let inner = _Inner(inner); Self { inner, accept_compression_encodings: Default::default(), send_compression_encodings: Default::default(), max_decoding_message_size: None, max_encoding_message_size: None, } } pub fn with_interceptor( inner: T, interceptor: F, ) -> InterceptedService where F: tonic::service::Interceptor, { InterceptedService::new(Self::new(inner), interceptor) } /// Enable decompressing requests with the given encoding. #[must_use] pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { self.accept_compression_encodings.enable(encoding); self } /// Compress responses with the given encoding, if the client supports it. #[must_use] pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { self.send_compression_encodings.enable(encoding); self } /// Limits the maximum size of a decoded message. /// /// Default: `4MB` #[must_use] pub fn max_decoding_message_size(mut self, limit: usize) -> Self { self.max_decoding_message_size = Some(limit); self } /// Limits the maximum size of an encoded message. /// /// Default: `usize::MAX` #[must_use] pub fn max_encoding_message_size(mut self, limit: usize) -> Self { self.max_encoding_message_size = Some(limit); self } } impl tonic::codegen::Service> for GreeterServer where T: Greeter, B: Body + Send + 'static, B::Error: Into + Send + 'static, { type Response = http::Response; type Error = std::convert::Infallible; type Future = BoxFuture; fn poll_ready( &mut self, _cx: &mut Context<'_>, ) -> Poll> { Poll::Ready(Ok(())) } fn call(&mut self, req: http::Request) -> Self::Future { let inner = self.inner.clone(); match req.uri().path() { "/helloworld.Greeter/SayHello" => { #[allow(non_camel_case_types)] struct SayHelloSvc(pub Arc); impl tonic::server::UnaryService for SayHelloSvc { type Response = super::HelloReply; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { ::say_hello(&inner, request).await }; Box::pin(fut) } } let accept_compression_encodings = self.accept_compression_encodings; let send_compression_encodings = self.send_compression_encodings; let max_decoding_message_size = self.max_decoding_message_size; let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { let inner = inner.0; let method = SayHelloSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( accept_compression_encodings, send_compression_encodings, ) .apply_max_message_size_config( max_decoding_message_size, max_encoding_message_size, ); let res = grpc.unary(method, req).await; Ok(res) }; Box::pin(fut) } "/helloworld.Greeter/SayHelloWithDelay" => { #[allow(non_camel_case_types)] struct SayHelloWithDelaySvc(pub Arc); impl< T: Greeter, > tonic::server::UnaryService for SayHelloWithDelaySvc { type Response = super::HelloReply; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { ::say_hello_with_delay(&inner, request).await }; Box::pin(fut) } } let accept_compression_encodings = self.accept_compression_encodings; let send_compression_encodings = self.send_compression_encodings; let max_decoding_message_size = self.max_decoding_message_size; let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { let inner = inner.0; let method = SayHelloWithDelaySvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( accept_compression_encodings, send_compression_encodings, ) .apply_max_message_size_config( max_decoding_message_size, max_encoding_message_size, ); let res = grpc.unary(method, req).await; Ok(res) }; Box::pin(fut) } _ => { Box::pin(async move { Ok( http::Response::builder() .status(200) .header("grpc-status", "12") .header("content-type", "application/grpc") .body(empty_body()) .unwrap(), ) }) } } } } impl Clone for GreeterServer { fn clone(&self) -> Self { let inner = self.inner.clone(); Self { inner, accept_compression_encodings: self.accept_compression_encodings, send_compression_encodings: self.send_compression_encodings, max_decoding_message_size: self.max_decoding_message_size, max_encoding_message_size: self.max_encoding_message_size, } } } impl Clone for _Inner { fn clone(&self) -> Self { Self(Arc::clone(&self.0)) } } impl std::fmt::Debug for _Inner { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{:?}", self.0) } } impl tonic::server::NamedService for GreeterServer { const NAME: &'static str = "helloworld.Greeter"; } } ================================================ FILE: crates/topos-test-sdk/src/grpc/behaviour/noop.rs ================================================ #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct NoopRequest {} #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct NoopResponse {} /// Generated client implementations. pub mod noop_client { #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] use tonic::codegen::*; use tonic::codegen::http::Uri; /// The greeting service definition. #[derive(Debug, Clone)] pub struct NoopClient { inner: tonic::client::Grpc, } impl NoopClient { /// Attempt to create a new client by connecting to a given endpoint. pub async fn connect(dst: D) -> Result where D: TryInto, D::Error: Into, { let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; Ok(Self::new(conn)) } } impl NoopClient where T: tonic::client::GrpcService, T::Error: Into, T::ResponseBody: Body + Send + 'static, ::Error: Into + Send, { pub fn new(inner: T) -> Self { let inner = tonic::client::Grpc::new(inner); Self { inner } } pub fn with_origin(inner: T, origin: Uri) -> Self { let inner = tonic::client::Grpc::with_origin(inner, origin); Self { inner } } pub fn with_interceptor( inner: T, interceptor: F, ) -> NoopClient> where F: tonic::service::Interceptor, T::ResponseBody: Default, T: tonic::codegen::Service< http::Request, Response = http::Response< >::ResponseBody, >, >, , >>::Error: Into + Send + Sync, { NoopClient::new(InterceptedService::new(inner, interceptor)) } /// Compress requests with the given encoding. /// /// This requires the server to support it otherwise it might respond with an /// error. #[must_use] pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { self.inner = self.inner.send_compressed(encoding); self } /// Enable decompressing responses. #[must_use] pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { self.inner = self.inner.accept_compressed(encoding); self } /// Limits the maximum size of a decoded message. /// /// Default: `4MB` #[must_use] pub fn max_decoding_message_size(mut self, limit: usize) -> Self { self.inner = self.inner.max_decoding_message_size(limit); self } /// Limits the maximum size of an encoded message. /// /// Default: `usize::MAX` #[must_use] pub fn max_encoding_message_size(mut self, limit: usize) -> Self { self.inner = self.inner.max_encoding_message_size(limit); self } /// Trigger nothing pub async fn do_nothing( &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { self.inner .ready() .await .map_err(|e| { tonic::Status::new( tonic::Code::Unknown, format!("Service was not ready: {}", e.into()), ) })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static("/noop.Noop/do_nothing"); let mut req = request.into_request(); req.extensions_mut().insert(GrpcMethod::new("noop.Noop", "do_nothing")); self.inner.unary(req, path, codec).await } } } /// Generated server implementations. pub mod noop_server { #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] use tonic::codegen::*; /// Generated trait containing gRPC methods that should be implemented for use with NoopServer. #[async_trait] pub trait Noop: Send + Sync + 'static { /// Trigger nothing async fn do_nothing( &self, request: tonic::Request, ) -> std::result::Result, tonic::Status>; } /// The greeting service definition. #[derive(Debug)] pub struct NoopServer { inner: _Inner, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, max_decoding_message_size: Option, max_encoding_message_size: Option, } struct _Inner(Arc); impl NoopServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } pub fn from_arc(inner: Arc) -> Self { let inner = _Inner(inner); Self { inner, accept_compression_encodings: Default::default(), send_compression_encodings: Default::default(), max_decoding_message_size: None, max_encoding_message_size: None, } } pub fn with_interceptor( inner: T, interceptor: F, ) -> InterceptedService where F: tonic::service::Interceptor, { InterceptedService::new(Self::new(inner), interceptor) } /// Enable decompressing requests with the given encoding. #[must_use] pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { self.accept_compression_encodings.enable(encoding); self } /// Compress responses with the given encoding, if the client supports it. #[must_use] pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { self.send_compression_encodings.enable(encoding); self } /// Limits the maximum size of a decoded message. /// /// Default: `4MB` #[must_use] pub fn max_decoding_message_size(mut self, limit: usize) -> Self { self.max_decoding_message_size = Some(limit); self } /// Limits the maximum size of an encoded message. /// /// Default: `usize::MAX` #[must_use] pub fn max_encoding_message_size(mut self, limit: usize) -> Self { self.max_encoding_message_size = Some(limit); self } } impl tonic::codegen::Service> for NoopServer where T: Noop, B: Body + Send + 'static, B::Error: Into + Send + 'static, { type Response = http::Response; type Error = std::convert::Infallible; type Future = BoxFuture; fn poll_ready( &mut self, _cx: &mut Context<'_>, ) -> Poll> { Poll::Ready(Ok(())) } fn call(&mut self, req: http::Request) -> Self::Future { let inner = self.inner.clone(); match req.uri().path() { "/noop.Noop/do_nothing" => { #[allow(non_camel_case_types)] struct do_nothingSvc(pub Arc); impl tonic::server::UnaryService for do_nothingSvc { type Response = super::NoopResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { ::do_nothing(&inner, request).await }; Box::pin(fut) } } let accept_compression_encodings = self.accept_compression_encodings; let send_compression_encodings = self.send_compression_encodings; let max_decoding_message_size = self.max_decoding_message_size; let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { let inner = inner.0; let method = do_nothingSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( accept_compression_encodings, send_compression_encodings, ) .apply_max_message_size_config( max_decoding_message_size, max_encoding_message_size, ); let res = grpc.unary(method, req).await; Ok(res) }; Box::pin(fut) } _ => { Box::pin(async move { Ok( http::Response::builder() .status(200) .header("grpc-status", "12") .header("content-type", "application/grpc") .body(empty_body()) .unwrap(), ) }) } } } } impl Clone for NoopServer { fn clone(&self) -> Self { let inner = self.inner.clone(); Self { inner, accept_compression_encodings: self.accept_compression_encodings, send_compression_encodings: self.send_compression_encodings, max_decoding_message_size: self.max_decoding_message_size, max_encoding_message_size: self.max_encoding_message_size, } } } impl Clone for _Inner { fn clone(&self) -> Self { Self(Arc::clone(&self.0)) } } impl std::fmt::Debug for _Inner { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{:?}", self.0) } } impl tonic::server::NamedService for NoopServer { const NAME: &'static str = "noop.Noop"; } } ================================================ FILE: crates/topos-test-sdk/src/grpc/mod.rs ================================================ pub mod behaviour { #[rustfmt::skip] pub mod helloworld; #[rustfmt::skip] pub mod noop; } pub mod implementations { use std::time::Duration; use async_trait::async_trait; use tonic::{Request, Response, Status}; use super::behaviour::{ helloworld::{greeter_server::Greeter, HelloReply, HelloRequest, HelloWithDelayRequest}, noop::{noop_server::Noop, NoopRequest, NoopResponse}, }; #[derive(Default)] pub struct DummyServer {} #[async_trait] impl Greeter for DummyServer { async fn say_hello( &self, request: Request, ) -> Result, Status> { Ok(Response::new(HelloReply { message: format!("Hello {}", request.into_inner().name), })) } async fn say_hello_with_delay( &self, request: Request, ) -> Result, Status> { let request = request.into_inner(); tokio::time::sleep(Duration::from_secs(request.delay_in_seconds)).await; Ok(Response::new(HelloReply { message: format!("Hello {}", request.name), })) } } #[derive(Default)] pub struct NoopServer {} #[async_trait] impl Noop for NoopServer { async fn do_nothing( &self, _: Request, ) -> Result, Status> { Ok(Response::new(NoopResponse {})) } } } ================================================ FILE: crates/topos-test-sdk/src/lib.rs ================================================ pub mod certificates; pub mod crypto; pub mod networking; pub mod p2p; pub mod sequencer; pub mod storage; pub mod tce; use rand::Rng; use std::{ collections::HashSet, net::SocketAddr, path::PathBuf, str::FromStr, sync::Mutex, thread, time::{SystemTime, UNIX_EPOCH}, }; use lazy_static::lazy_static; use rstest::fixture; lazy_static! { pub static ref PORT_MAPPING: Mutex> = Mutex::new(HashSet::new()); } pub mod grpc; pub mod constants { use proc_macro_sdk::generate_certificate_ids; use proc_macro_sdk::generate_source_subnet_ids; use proc_macro_sdk::generate_target_subnet_ids; use topos_core::uci::CertificateId; use topos_core::uci::CERTIFICATE_ID_LENGTH; generate_source_subnet_ids!(100..150); generate_target_subnet_ids!(150..200); // Certificate range is 0..100 pub const PREV_CERTIFICATE_ID: CertificateId = CertificateId::from_array([0u8; CERTIFICATE_ID_LENGTH]); generate_certificate_ids!(1..100); } #[macro_export] macro_rules! wait_for_event { ($node:expr, matches: $( $pattern:pat_param )|+ $( if $guard: expr )?, $error_msg:expr) => { wait_for_event!($node, matches: $( $pattern )|+ $( if $guard )?, $error_msg, 100); }; ($node:expr, matches: $( $pattern:pat_param )|+ $( if $guard: expr )?, $error_msg:expr, $timeout:expr) => { let assertion = async { while let Some(event) = $node.await { if matches!(event, $( $pattern )|+ $( if $guard )?) { break; } } }; if let Err(_) = tokio::time::timeout(std::time::Duration::from_millis($timeout), assertion).await { panic!("Timed out waiting ({}ms) for event: {}", $timeout, $error_msg); } }; } pub fn get_available_port() -> u16 { get_available_addr().port() } pub fn get_available_addr() -> SocketAddr { let mut port_mapping = PORT_MAPPING.lock().unwrap(); let mut addr = None; for _ in 0..10 { let new_addr = next_available_port(); if port_mapping.insert(new_addr.port()) { addr = Some(new_addr); break; } } assert!(addr.is_some(), "Can't find an available port"); addr.unwrap() } fn next_available_port() -> SocketAddr { // let socket = UdpSocket::bind("127.0.0.1:0").expect("Can't find an available port"); // socket.local_addr().unwrap() // use std::net::{TcpListener, TcpStream}; let host = "127.0.0.1"; // Request a random available port from the OS let listener = TcpListener::bind((host, 0)).expect("Can't bind to an available port"); let addr = listener.local_addr().expect("Can't find an available port"); // Create and accept a connection (which we'll promptly drop) in order to force the port // into the TIME_WAIT state, ensuring that the port will be reserved from some limited // amount of time (roughly 60s on some Linux systems) let _sender = TcpStream::connect(addr).expect("Can't connect to an available port"); let _incoming = listener.accept().expect("Can't accept an available port"); addr } #[fixture] fn folder_name() -> &'static str { Box::leak(Box::new( thread::current().name().unwrap().replace("::", "_"), )) } #[fixture] pub fn create_folder(folder_name: &str) -> PathBuf { let dir = env!("TOPOS_TEST_SDK_TMP"); let mut temp_dir = std::path::PathBuf::from_str(dir).expect("Unable to read CARGO_TARGET_TMPDIR"); let time = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); let mut rng = rand::thread_rng(); temp_dir.push(format!( "{}/data_{}_{}", folder_name, time.as_nanos(), rng.gen::() )); temp_dir } ================================================ FILE: crates/topos-test-sdk/src/networking/mod.rs ================================================ use std::net::SocketAddr; use std::net::{TcpListener, TcpStream}; pub fn get_available_port() -> u16 { get_available_addr().port() } pub fn get_available_addr() -> SocketAddr { let host = "127.0.0.1"; let listener = TcpListener::bind((host, 0)).expect("Can't bind to an available port"); let addr = listener .local_addr() .expect("Can't extract local addr from listener"); // Forcing the port into the TIME_WAIT state is necessary to ensure that the port will be // reserved from some limited amount of time (roughly 60s on some Linux systems) let _sender = TcpStream::connect(addr).expect("Can't connect to an available port"); let _incoming = listener.accept().expect("Can't accept connection"); addr } ================================================ FILE: crates/topos-test-sdk/src/p2p/mod.rs ================================================ use libp2p::{ build_multiaddr, identity::{self, Keypair}, Multiaddr, }; use rand::{thread_rng, Rng}; use crate::networking::get_available_port; pub fn local_peer(peer_index: u8, memory_transport: bool) -> (Keypair, Multiaddr) { let peer_id: Keypair = keypair_from_seed(peer_index); let local_listen_addr = if memory_transport { build_multiaddr![Memory(thread_rng().gen::())] } else { let port = get_available_port(); format!( "/ip4/127.0.0.1/tcp/{}/p2p/{}", port, peer_id.public().to_peer_id() ) .parse() .unwrap() }; (peer_id, local_listen_addr) } pub fn keypair_from_seed(seed: u8) -> Keypair { let mut bytes = [0u8; 32]; bytes[0] = seed; identity::Keypair::ed25519_from_bytes(bytes).expect("Invalid keypair") } ================================================ FILE: crates/topos-test-sdk/src/sequencer/mod.rs ================================================ pub const TEST_VALIDATOR_KEY: &str = "11eddfae7abe45531b3f18342c8062969323a7131d3043f1a33c40df74803cc7"; ================================================ FILE: crates/topos-test-sdk/src/storage/mod.rs ================================================ use rstest::fixture; use std::path::PathBuf; use std::sync::Arc; use topos_core::types::CertificateDelivered; use topos_tce_storage::{ epoch::EpochValidatorsStore, epoch::ValidatorPerEpochStore, fullnode::FullNodeStore, index::IndexTables, store::WriteStore, validator::ValidatorPerpetualTables, validator::ValidatorStore, StorageClient, }; use crate::folder_name; #[fixture(certificates = &[])] pub async fn storage_client(certificates: &[CertificateDelivered]) -> StorageClient { let store = create_validator_store::partial_1(certificates).await; StorageClient::new(store) } #[fixture] pub fn create_folder(folder_name: &str) -> PathBuf { let mut path = crate::create_folder(folder_name); path.push("rocksdb"); path } #[fixture(certificates = &[])] pub async fn create_validator_store( certificates: &[CertificateDelivered], #[future] create_fullnode_store: Arc, ) -> Arc { let temp_dir = create_folder::default(); let fullnode_store = create_fullnode_store.await; let store = ValidatorStore::open(&temp_dir, fullnode_store).expect("Unable to create validator store"); store .insert_certificates_delivered(certificates) .await .expect("Unable to insert predefined certificates"); store } pub async fn create_validator_store_with_fullnode( fullnode_store: Arc, ) -> Arc { ValidatorStore::open(&create_folder::default(), fullnode_store) .expect("Unable to create validator store") } #[fixture(certificates = &[])] pub async fn create_fullnode_store(certificates: &[CertificateDelivered]) -> Arc { let temp_dir = create_folder::default(); let perpetual_tables = Arc::new(ValidatorPerpetualTables::open(&temp_dir)); let index_tables = Arc::new(IndexTables::open(&temp_dir)); let validators_store = EpochValidatorsStore::new(&temp_dir).expect("Unable to create EpochValidators store"); let epoch_store = ValidatorPerEpochStore::new(0, &temp_dir).expect("Unable to create Per epoch store"); let store = FullNodeStore::open( epoch_store, validators_store, perpetual_tables, index_tables, ) .expect("Unable to create full node store"); store .insert_certificates_delivered(certificates) .await .unwrap(); store } ================================================ FILE: crates/topos-test-sdk/src/tce/gatekeeper.rs ================================================ use std::error::Error; use std::future::IntoFuture; use tokio::spawn; use tokio::task::JoinHandle; use topos_tce_gatekeeper::GatekeeperClient; use topos_tce_gatekeeper::GatekeeperError; pub async fn create_gatekeeper( ) -> Result<(GatekeeperClient, JoinHandle>), Box> { let (gatekeeper_client, gatekeeper_runtime) = topos_tce_gatekeeper::Gatekeeper::builder() .await .expect("Can't create the Gatekeeper"); let gatekeeper_join_handle = spawn(gatekeeper_runtime.into_future()); Ok((gatekeeper_client, gatekeeper_join_handle)) } ================================================ FILE: crates/topos-test-sdk/src/tce/mod.rs ================================================ use futures::future::join_all; use futures::Stream; use futures::StreamExt; use libp2p::identity::Keypair; use libp2p::{Multiaddr, PeerId}; use rstest::*; use std::collections::{HashMap, HashSet}; use std::error::Error; use std::sync::Arc; use tokio::spawn; use tokio::sync::broadcast; use tokio::{sync::mpsc, task::JoinHandle}; use tokio_stream::wrappers::BroadcastStream; use tokio_util::sync::CancellationToken; use tonic::transport::Channel; use tonic::Request; use tonic::Response; use tonic::Status; use tracing::Instrument; use tonic::transport::server::Router; use tonic::transport::Server; use topos_core::api::grpc::tce::v1::{ api_service_client::ApiServiceClient, console_service_client::ConsoleServiceClient, synchronizer_service_server::SynchronizerService as GrpcSynchronizerService, synchronizer_service_server::SynchronizerServiceServer, }; use topos_core::api::grpc::tce::v1::{ CheckpointRequest, CheckpointResponse, FetchCertificatesRequest, FetchCertificatesResponse, }; use topos_core::api::grpc::tce::v1::{StatusRequest, StatusResponse}; use topos_core::types::CertificateDelivered; use topos_core::types::ValidatorId; use topos_core::uci::SubnetId; use topos_crypto::messages::MessageSigner; use topos_p2p::{error::P2PError, Event, GrpcRouter, NetworkClient, Runtime}; use topos_tce::{events::Events, AppContext}; use topos_tce_storage::StorageClient; use topos_tce_synchronizer::SynchronizerService; use tracing::info; use self::gatekeeper::create_gatekeeper; use self::p2p::{bootstrap_network, create_network_worker}; use self::protocol::{create_reliable_broadcast_client, create_reliable_broadcast_params}; use self::public_api::create_public_api; use self::synchronizer::create_synchronizer; use crate::crypto::message_signer; use crate::p2p::local_peer; use crate::storage::create_fullnode_store; use crate::storage::create_validator_store; pub mod gatekeeper; pub mod p2p; pub mod protocol; pub mod public_api; pub mod synchronizer; #[derive(Debug)] pub struct TceContext { pub node_config: NodeConfig, pub event_stream: mpsc::Receiver, pub peer_id: PeerId, // P2P ID pub api_entrypoint: String, pub api_grpc_client: ApiServiceClient, // GRPC Client for this peer (tce node) pub console_grpc_client: ConsoleServiceClient, // Console TCE GRPC Client for this peer (tce node) pub runtime_join_handle: JoinHandle>, pub app_join_handle: JoinHandle<()>, pub gatekeeper_join_handle: JoinHandle>, pub synchronizer_join_handle: JoinHandle>, pub connected_subnets: Option>, // Particular subnet clients (topos nodes) connected to this tce node pub shutdown: (CancellationToken, mpsc::Receiver<()>), } impl Drop for TceContext { fn drop(&mut self) { self.app_join_handle.abort(); self.runtime_join_handle.abort(); self.gatekeeper_join_handle.abort(); self.synchronizer_join_handle.abort(); } } impl TceContext { pub async fn shutdown(&mut self) -> Result<(), Box> { info!("Context performing shutdown..."); self.shutdown.0.cancel(); self.shutdown.1.recv().await; info!("Shutdown finished..."); Ok(()) } } #[derive(Debug, Clone)] pub struct NodeConfig { pub seed: u8, pub keypair: Keypair, pub addr: Multiaddr, pub minimum_cluster_size: usize, pub dummy: bool, } impl Default for NodeConfig { fn default() -> Self { Self::from_seed(1) } } impl NodeConfig { pub fn standalone() -> Self { Self { dummy: true, ..Default::default() } } pub fn memory(seed: u8) -> Self { let (keypair, addr) = local_peer(seed, true); Self { seed, keypair, addr, minimum_cluster_size: 0, dummy: false, } } pub fn from_seed(seed: u8) -> Self { let (keypair, addr) = local_peer(seed, false); Self { seed, keypair, addr, minimum_cluster_size: 0, dummy: false, } } pub fn peer_id(&self) -> PeerId { self.keypair.public().to_peer_id() } pub async fn bootstrap( &self, peers: &[NodeConfig], router: Option, ) -> Result< ( NetworkClient, impl Stream + Unpin + Send, JoinHandle>, ), Box, > { bootstrap_network( self.seed, self.addr.clone(), peers, self.minimum_cluster_size, router, self.dummy, ) .await } pub async fn create( &self, peers: &[NodeConfig], router: Option, ) -> Result<(NetworkClient, impl Stream, Runtime), P2PError> { create_network_worker( self.seed, vec![self.addr.clone()], peers, self.minimum_cluster_size, router, ) .await } } #[derive(Clone)] struct DummyService {} #[async_trait::async_trait] impl GrpcSynchronizerService for DummyService { async fn fetch_certificates( &self, _request: Request, ) -> Result, Status> { Err(Status::unimplemented("fetch_certificates")) } async fn fetch_checkpoint( &self, _request: Request, ) -> Result, Status> { Err(Status::unimplemented("fetch_checkpoint")) } } pub fn create_dummy_router() -> Router { Server::builder().add_service(SynchronizerServiceServer::new(DummyService {})) } #[fixture( config = NodeConfig::default(), peers = &[], certificates = &[], validator_id = ValidatorId::default(), validators = HashSet::default() )] pub async fn start_node( certificates: &[CertificateDelivered], config: NodeConfig, peers: &[NodeConfig], validator_id: ValidatorId, validators: HashSet, message_signer: Arc, ) -> TceContext { let is_validator = validators.contains(&validator_id); let peer_id = config.keypair.public().to_peer_id(); let fullnode_store = create_fullnode_store(&[]).in_current_span().await; let validator_store = create_validator_store(certificates, futures::future::ready(fullnode_store.clone())) .in_current_span() .await; let router = GrpcRouter::new(tonic::transport::Server::builder()).add_service( SynchronizerServiceServer::new(SynchronizerService { validator_store: validator_store.clone(), }), ); let (network_client, network_stream, runtime_join_handle) = bootstrap_network( config.seed, config.addr.clone(), peers, config.minimum_cluster_size, Some(router), config.dummy, ) .in_current_span() .await .expect("Unable to bootstrap tce network"); let storage_client = StorageClient::new(validator_store.clone()); let (sender, receiver) = broadcast::channel(100); let (tce_cli, tce_stream) = create_reliable_broadcast_client( validator_id, validators, message_signer, create_reliable_broadcast_params(peers.len()), validator_store.clone(), sender, ) .in_current_span() .await; let api_storage_client = storage_client.clone(); let (api_context, api_stream) = create_public_api( futures::future::ready(api_storage_client), receiver.resubscribe(), futures::future::ready(validator_store.clone()), ) .in_current_span() .await; let (gatekeeper_client, gatekeeper_join_handle) = create_gatekeeper().await.unwrap(); let (synchronizer_stream, synchronizer_join_handle) = create_synchronizer( gatekeeper_client.clone(), network_client.clone(), validator_store.clone(), ) .in_current_span() .await; let (app, event_stream) = AppContext::new( is_validator, storage_client, tce_cli, network_client, api_context.client, gatekeeper_client, validator_store, api_context.api_context.unwrap(), ); let shutdown_token = CancellationToken::new(); let shutdown_cloned = shutdown_token.clone(); let (shutdown_sender, shutdown_receiver) = mpsc::channel(1); let app_join_handle = spawn( app.run( network_stream, tce_stream, api_stream, synchronizer_stream, BroadcastStream::new(receiver).filter_map(|v| futures::future::ready(v.ok())), (shutdown_token, shutdown_sender), ) .in_current_span(), ); TceContext { node_config: config, event_stream, peer_id, api_entrypoint: api_context.entrypoint, api_grpc_client: api_context.api_client, console_grpc_client: api_context.console_client, runtime_join_handle, app_join_handle, gatekeeper_join_handle, synchronizer_join_handle, connected_subnets: None, shutdown: (shutdown_cloned, shutdown_receiver), } } fn build_peer_config_pool(peer_number: u8) -> Vec { (1..=peer_number) .map(NodeConfig::from_seed) .map(|mut c| { c.minimum_cluster_size = peer_number as usize / 2; c }) .collect() } pub async fn start_pool( peer_number: u8, certificates: &[CertificateDelivered], ) -> HashMap { let mut clients = HashMap::new(); let peers = build_peer_config_pool(peer_number); let mut validators = Vec::new(); let mut message_signers = Vec::new(); for i in 1..=peer_number { let message_signer = Arc::new(MessageSigner::new(&[i; 32]).unwrap()); message_signers.push(message_signer.clone()); let validator_id = ValidatorId::from(message_signer.public_address); validators.push(validator_id); } let mut await_peers = Vec::new(); for (i, config) in peers.iter().enumerate() { let validator_id = validators[i]; let signer = message_signers[i].clone(); let config_cloned = config.clone(); let peers_cloned = peers.clone(); let validators_cloned = validators.clone(); let context = tracing::info_span!( "start_node", "peer_id" = config_cloned.peer_id().to_string() ); let fut = async move { let client = start_node( certificates, config_cloned, &peers_cloned, validator_id, validators_cloned .into_iter() .collect::>(), signer, ) .instrument(context) .await; (client.peer_id, client) }; await_peers.push(fut); } for (user_peer_id, client) in join_all(await_peers).await { clients.insert(user_peer_id, client); } clients } #[fixture( peer_number = 2, certificates = &[] )] pub async fn create_network( peer_number: usize, certificates: &[CertificateDelivered], ) -> HashMap { // List of peers (tce nodes) with their context let mut peers_context = start_pool(peer_number as u8, certificates).await; // Waiting for new network view let mut await_peers = Vec::new(); for (_peer_id, client) in peers_context.iter_mut() { await_peers.push(client.console_grpc_client.status(StatusRequest {})); } assert!(!join_all(await_peers) .await .into_iter() .map(|res: Result, _>| res .map(|r: tonic::Response<_>| r.into_inner().has_active_sample)) .any(|r| r.is_err() || !r.unwrap())); tracing::error!("GRPC status received and ok"); peers_context } ================================================ FILE: crates/topos-test-sdk/src/tce/p2p.rs ================================================ use std::error::Error; use futures::Stream; use libp2p::Multiaddr; use tokio::{spawn, task::JoinHandle}; use tracing::Instrument; use crate::p2p::keypair_from_seed; use topos_p2p::{error::P2PError, Event, GrpcContext, GrpcRouter, NetworkClient, Runtime}; use super::NodeConfig; pub async fn create_network_worker( seed: u8, addr: Vec, peers: &[NodeConfig], minimum_cluster_size: usize, router: Option, ) -> Result< ( NetworkClient, impl Stream + Unpin + Send, Runtime, ), P2PError, > { let key = keypair_from_seed(seed); let _peer_id = key.public().to_peer_id(); let known_peers = if seed == 1 { vec![] } else { peers .iter() .filter_map(|config| { if config.seed == 1 { Some((config.keypair.public().to_peer_id(), config.addr.clone())) } else { None } }) .collect::>() }; let grpc_context = if let Some(router) = router { GrpcContext::default().with_router(router) } else { GrpcContext::default() }; topos_p2p::network::builder() .peer_key(key.clone()) .known_peers(&known_peers) .public_addresses(addr.clone()) .listen_addresses(addr) .minimum_cluster_size(minimum_cluster_size) .grpc_context(grpc_context) .allow_private_ip(true) .build() .in_current_span() .await } pub async fn bootstrap_network( seed: u8, addr: Multiaddr, peers: &[NodeConfig], minimum_cluster_size: usize, router: Option, dummy: bool, ) -> Result< ( NetworkClient, impl Stream + Unpin + Send, JoinHandle>, ), Box, > { let (network_client, mut network_stream, runtime) = create_network_worker(seed, vec![addr], peers, minimum_cluster_size, router) .in_current_span() .await?; let runtime_join_handle = if dummy { spawn(runtime.run().in_current_span()) } else { runtime .bootstrap(&mut network_stream) .in_current_span() .await? }; println!("Network bootstrap done."); Ok((network_client, network_stream, runtime_join_handle)) } ================================================ FILE: crates/topos-test-sdk/src/tce/protocol.rs ================================================ use futures::Stream; use std::collections::HashSet; use std::sync::Arc; use tokio::sync::broadcast; use topos_config::tce::broadcast::ReliableBroadcastParams; use topos_core::types::ValidatorId; use topos_crypto::messages::MessageSigner; use topos_tce_broadcast::event::ProtocolEvents; use topos_tce_broadcast::{ReliableBroadcastClient, ReliableBroadcastConfig}; use topos_tce_storage::types::CertificateDeliveredWithPositions; use topos_tce_storage::validator::ValidatorStore; use tracing::Instrument; pub async fn create_reliable_broadcast_client( validator_id: ValidatorId, validators: HashSet, message_signer: Arc, tce_params: ReliableBroadcastParams, storage: Arc, sender: broadcast::Sender, ) -> ( ReliableBroadcastClient, impl Stream + Unpin, ) { let config = ReliableBroadcastConfig { tce_params, validator_id, validators, message_signer, }; ReliableBroadcastClient::new(config, storage, sender) .in_current_span() .await } pub fn create_reliable_broadcast_params(number_of_nodes: usize) -> ReliableBroadcastParams { let mut params = ReliableBroadcastParams { ..Default::default() }; let f = (number_of_nodes.saturating_sub(1)) / 3; params.echo_threshold = 1 + ((number_of_nodes.saturating_add(f)) / 2); params.ready_threshold = 1 + f; params.delivery_threshold = 2 * f + 1; params } ================================================ FILE: crates/topos-test-sdk/src/tce/public_api.rs ================================================ use std::str::FromStr; use std::sync::Arc; use futures::Stream; use rstest::*; use tokio::sync::broadcast; use tonic::transport::{channel, Channel}; use topos_core::api::grpc::tce::v1::{ api_service_client::ApiServiceClient, console_service_client::ConsoleServiceClient, }; use topos_tce_api::RuntimeClient; use topos_tce_api::RuntimeContext; use topos_tce_api::RuntimeEvent; use topos_tce_storage::types::CertificateDeliveredWithPositions; use topos_tce_storage::validator::ValidatorStore; use topos_tce_storage::StorageClient; use tracing::warn; use tracing::Instrument; use crate::networking::get_available_addr; use crate::storage::create_validator_store; use crate::storage::storage_client; use crate::PORT_MAPPING; pub struct PublicApiContext { pub entrypoint: String, pub client: RuntimeClient, pub api_client: ApiServiceClient, pub console_client: ConsoleServiceClient, pub api_context: Option, } #[fixture] pub fn broadcast_stream() -> broadcast::Receiver { let (_, r) = broadcast::channel(1000); r } #[fixture] pub async fn create_public_api( #[future] storage_client: StorageClient, broadcast_stream: broadcast::Receiver, #[future] create_validator_store: Arc, ) -> (PublicApiContext, impl Stream) { let storage_client = storage_client.await; let store = create_validator_store.await; let grpc_addr = get_available_addr(); let graphql_addr = get_available_addr(); let metrics_addr = get_available_addr(); let api_port = grpc_addr.port(); let api_endpoint = format!("http://0.0.0.0:{api_port}"); warn!("API endpoint: {}", api_endpoint); warn!("gRPC endpoint: {}", grpc_addr); warn!("GraphQL endpoint: {}", graphql_addr); warn!("Metrics endpoint: {}", metrics_addr); warn!("PORT MAPPING: {:?}", PORT_MAPPING.lock().unwrap()); let (client, stream, ctx) = topos_tce_api::Runtime::builder() .with_broadcast_stream(broadcast_stream) .serve_grpc_addr(grpc_addr) .serve_graphql_addr(graphql_addr) .serve_metrics_addr(metrics_addr) .store(store) .storage(storage_client) .build_and_launch() .in_current_span() .await; let api_channel = channel::Endpoint::from_str(&api_endpoint) .unwrap() .connect_lazy(); let console_channel = channel::Endpoint::from_str(&api_endpoint) .unwrap() .connect_lazy(); let api_client = ApiServiceClient::new(api_channel); let console_client = ConsoleServiceClient::new(console_channel); let context = PublicApiContext { entrypoint: api_endpoint, client, api_client, console_client, api_context: Some(ctx), }; (context, stream) } ================================================ FILE: crates/topos-test-sdk/src/tce/synchronizer.rs ================================================ use futures::Stream; use std::future::IntoFuture; use std::sync::Arc; use tokio::{spawn, task::JoinHandle}; use tokio_util::sync::CancellationToken; use tracing::Instrument; use topos_p2p::NetworkClient; use topos_tce_gatekeeper::GatekeeperClient; use topos_tce_storage::validator::ValidatorStore; use topos_tce_synchronizer::SynchronizerError; use topos_tce_synchronizer::SynchronizerEvent; pub async fn create_synchronizer( _: GatekeeperClient, network_client: NetworkClient, store: Arc, ) -> ( impl Stream, JoinHandle>, ) { let shutdown = CancellationToken::new(); let (synchronizer_runtime, synchronizer_stream) = topos_tce_synchronizer::Synchronizer::builder() .with_shutdown(shutdown) .with_store(store) .with_network_client(network_client) .build() .expect("Can't create the Synchronizer"); let synchronizer_join_handle = spawn(synchronizer_runtime.into_future().in_current_span()); (synchronizer_stream, synchronizer_join_handle) } ================================================ FILE: crates/topos-wallet/Cargo.toml ================================================ [package] name = "topos-wallet" description = "Key manager" version = "0.1.0" edition = "2021" [lints] workspace = true [dependencies] secp256k1.workspace = true byteorder.workspace = true hex.workspace = true thiserror.workspace = true tracing.workspace = true keccak-hash = "0.10.0" eth-keystore = "0.5.0" topos-crypto.workspace = true ================================================ FILE: crates/topos-wallet/src/error.rs ================================================ use thiserror::Error; #[derive(Debug, Error)] pub enum Error { #[error("Keystore error: {0}")] KeystoreError(#[from] eth_keystore::KeystoreError), #[error("Keystore file io error: {0}")] KeystoreFileError(#[from] std::io::Error), #[error("Invalid key error: {0}")] InvalidKeyError(String), #[error("Elliptic curve error: {0}")] Secp256k1Error(#[from] secp256k1::Error), #[error("Invalid signature: {0}")] InvalidSignature(String), } ================================================ FILE: crates/topos-wallet/src/lib.rs ================================================ use std::fs; use std::path::PathBuf; pub mod error; // File tree generated by Polygon Edge /// Key for the authentication on libp2p (secp256k1) pub const NETWORK_KEY: &str = "libp2p/libp2p.key"; /// Key for the contracts authentication (secp256k1) pub const VALIDATOR_KEY: &str = "consensus/validator.key"; /// Key for the IBFT authentication (bls) pub const VALIDATOR_BLS_KEY: &str = "consensus/validator-bls.key"; /// Load from the filesystem pub fn load_fs_secret(file: PathBuf) -> Option { match &fs::read_to_string(&file) { Ok(s) => Some(hex::decode(s).unwrap_or_else(|_| panic!("decode failure for {}", s))), Err(e) => panic!("Failed at reading {file:?}: {e}"), } } /// Load from the AWS Secret Manager #[allow(dead_code)] pub fn load_aws_secrets(secrets_config: &str) { println!("loading from aws-sm {}", secrets_config); } pub type SecretKey = Vec; pub type PublicKey = Vec; #[derive(Default, Debug)] pub struct SecretManager { pub network: Option, pub validator: Option, pub validator_bls: Option, } impl SecretManager { pub fn from_fs(home_path: PathBuf) -> Self { Self { network: load_fs_secret(home_path.join(NETWORK_KEY)), validator: load_fs_secret(home_path.join(VALIDATOR_KEY)), validator_bls: load_fs_secret(home_path.join(VALIDATOR_BLS_KEY)), } } pub fn from_aws(_secrets_config: &str) -> Self { println!("loading from aws-sm"); todo!() } pub fn validator_pubkey(&self) -> Option { self.validator .as_ref() .map(|pk| topos_crypto::keys::derive_public_key(pk).unwrap()) } } ================================================ FILE: docs/.gitignore ================================================ book/ src/mdbook-plantuml-img/ ================================================ FILE: docs/README.md ================================================ # The Topos specification and development internals documentation The specification doc is compiled from several source files with [`mdBook`](https://github.com/rust-lang/mdBook). To view it live, locally, from the repo root: Ensure graphviz and plantuml are installed: ```sh brew install graphviz plantuml # for macOS sudo apt-get install graphviz plantuml # for Ubuntu/Debian ``` Then install and build the book: ```sh cargo install mdbook mdbook-plantuml mdbook-linkcheck mdbook-graphviz mdbook serve doc open http://localhost:3000 ``` ================================================ FILE: docs/architecture/certificates_collector.md ================================================ # CertificatesCollector The `CertificatesCollector` is responsible for fetching and validating certificates by gathering the certificate data across peers, verifying it and persist it if everything is ok. ## General design Upon receiving a `SourceStreamPosition` from the `CheckpointsCollector`, the `CertificatesCollector` will check the difference between the current position that the local node have, and the expected position reported by the `CheckpointsCollector`. The `CertificatesCollector` will then ask the peers of the network to retrieve the list of `CertificateId` to sync, in order. Upon receiving those responses, ================================================ FILE: docs/architecture/checkpoints_collector.md ================================================ # CheckpointsCollector The `CheckpointsCollector` is the component that will communicate with other peers to negotiate a network checkpoint to sync with. ## General design The `CheckpointsCollector` will use the P2P network to ask for the `Checkpoints` of others peers. The selected peers will be provided by the `Gatekeeper`, returning a chunk of random peers to communicate with. This component asks for Peer's `Checkpoint`. `Peers` will respond with their current `Checkpoint`. This is the responsibility of the `CheckpointsCollector` to communicate with others peers to build and find a network `Checkpoint` that can be used to `sync`. ## Internal design The `CheckpointsCollector`, as describe above, has a main goal of negotiating a common checkpoint to start syncing the node. The first thing to do for the `CheckpointsCollector` is to contact the `Gatekeeper` for a list of peers to sync with. Upon receiving the list of peers, the `CheckpointsCollector` will open connections and send a message to ask for checkpoint, it is pretty straightforward, but it's the first step to define if the peers are trustable to sync. The message sent to all peers as the following: ```protobuf message CheckpointRequest { bool content = 1; } message CheckpointResponse { repeatable SourceStreamPosition heads = 1; CheckpointContents content = 2; } message CheckpointContents { map content = 1; int32 count = 2; } ``` ``` SEND to peer1: CheckpointRequest { content: false } RECV from peer1: CheckpointResponse { heads: [SourceStreamPosition { subnet_id: "0x0a", certificate_id: "0xba", position: 10}, ..], content: CheckpointContents { content: { .. }, count: 10 } } ``` Upon receiving the response of every peer, the `CheckpointsCollector` will gather every `CheckpointResponse` and analyze them in order to decide if it can trust the peers that respond. For all subnets contained in the `CheckpointResponse` of every peer, the `CheckpointsCollector` will ask the local storage for the current node `SourceStreamPosition` head. For every `SourceStreamPosition` that are lower than our current head, we don't have to sync. ### TTL on CheckpointRequest If a peer does not respond following a TTL on the request, this peer will be tagged as byzantine. If the `CheckpointsCollector` find itself having less responses than expected regarding a preconfigured threshold, it can decide to dump every response and ask for a new set of peers from the `Gatekeeper`. (Informing the `Gatekeeper` with the list of peers that didn't respond in time). ### Selecting the smallest Position across responses per subnet Because of the distributed and asynchronous delivery of certificates during the broadcast, some peers of our set of sync peers can be late or in advance compare to others. In order to sync and have a consistent view of the network, the node needs to detect this pattern and chose the smallest position in the responses in order to ask for the `SourceStreamPosition` for that subnet at that position. ``` Responses: RECV from peer1: CheckpointResponse { heads: [SourceStreamPosition { subnet_id: "0x0a", certificate_id: "0xba", position: 10}, ..], .. } RECV from peer2: CheckpointResponse { heads: [SourceStreamPosition { subnet_id: "0x0a", certificate_id: "0xba", position: 10}, ..], .. } RECV from peer3: CheckpointResponse { heads: [SourceStreamPosition { subnet_id: "0x0a", certificate_id: "0xef", position: 9}, ..], .. } Problem: Received different position for the same subnet Fallback: Requesting `SourceStreamPosition` for the position 9 to check for consistency ``` ``` Responses: RECV from peer1: SourceStreamPositionResponse { subnet_id: "0x0a", certificate_id: "0xef", position: 9} RECV from peer2: SourceStreamPositionResponse { subnet_id: "0x0a", certificate_id: "0xef", position: 9} RECV from peer3: SourceStreamPositionResponse { subnet_id: "0x0a", certificate_id: "0xef", position: 9} Result: This set of peers are sync and we have a consistent point in the stream to sync. Every peers have the same certificate_id and position. ``` ### Inconsistent CheckpointResponses Apart from TTL there are some cases which can represent an inconsistent set of `CheckpointResponse`. #### Receiving different certificate_id for the same Position The `Position` of a `certificate` in the `SourceStream` of a subnet is guaranteed to be the same across all TCE node. It is enforced by the topos protocol itself and more precisely by the `Broadcast` mechanisms. When receiving inconsistent `SourceStreamPosition` for a `subnet` across multiples `CheckpointResponse`, if it hits a threshold, the node needs to dump every response and fetch a new set of peers from the `Gatekeeper`. ``` Responses: RECV from peer1: CheckpointResponse { heads: [SourceStreamPosition { subnet_id: "0x0a", certificate_id: "0xba", position: 10}, ..], .. } RECV from peer2: CheckpointResponse { heads: [SourceStreamPosition { subnet_id: "0x0a", certificate_id: "0xba", position: 10}, ..], .. } RECV from peer3: CheckpointResponse { heads: [SourceStreamPosition { subnet_id: "0x0a", certificate_id: "0xef", position: 10}, ..], .. } Problem: Receiving different cert for the same position of the same subnet Fallback: Requesting another batch of TCE nodes until receiving consistent response ``` #### Receiving different Position for the same certificate_id The `Position` of a `certificate` in the `SourceStream` of a subnet is guaranteed to be the same across all TCE node. It is enforced by the Topos protocol itself and more precisely by the `Broadcast` mechanisms. When receiving inconsistent `SourceStreamPosition` for a `subnet` across multiples `CheckpointResponse`, if it hits a threshold, the node needs to dump every response and fetch a new set of peers from the `Gatekeeper`. ``` Responses: RECV from peer1: CheckpointResponse { heads: [SourceStreamPosition { subnet_id: "0x0a", certificate_id: "0xba", position: 10}, ..], .. } RECV from peer2: CheckpointResponse { heads: [SourceStreamPosition { subnet_id: "0x0a", certificate_id: "0xba", position: 10}, ..], .. } RECV from peer3: CheckpointResponse { heads: [SourceStreamPosition { subnet_id: "0x0a", certificate_id: "0xba", position: 11}, ..], .. } Problem: Receiving different position for the same cert of the same subnet Fallback: Requesting another batch of TCE nodes until receiving consistent response ``` ================================================ FILE: docs/architecture/gatekeeper.md ================================================ # Gatekeeper The `Gatekeeper` is a central piece of the node. It has the role of defining which peers can participate and which nodes need to be listened to. ## General Design The `Gatekeeper` will have a local in-memory state representing, - The list of TCE nodes allowed to participate - The list of all subnets allowed to submit certificates The `Gatekeeper` will manage the two lists using multiple mechanisms that are not defined for now. The `Gatekeeper` can receive and respond to commands in order to provide information to other components: - Update the configuration of the `Gatekeeper` - Request a full list of all peers - Request a random list of peers - Request a full list of all subnets ## Internal design The `Gatekeeper` isn't fully designed for now but a first iteration will be to expose a simple gRPC API to push update of the list of TCE validators. In the future we'll need to find solution to fetch this information from the source of truth. The goal here is to expose methods that can be used by any components and in a near future, replace the internal implementation. This component will be responsible for exposing the lists of peers/subnets, and it'll be also responsible for maintaining some kind of reputation for peers that we're connecting with. ### Peer list The `Peer` list maintained by the `Gatekeeper` will be a simple list of `PeerId`. ### Subnet list The `Subnet` list maintained by the `Gatekeeper` will be a simple list of `SubnetId`. ================================================ FILE: docs/architecture/synchronizer.md ================================================ # Synchronizer The `Synchronizer` is responsible for organizing the components involved in the sync process. ## General design The main goal of the `Synchronizer` is to be the entry point of the `Runtime` to start and drive the `sync` process of the node. The `Synchronizer` is responsible for spawning and driving the components involved in the `sync` process by listening events coming from those components but also sending command to them if needed. The `Synchronizer` will have some commands used by the `Runtime`, and expose events to notify the `Runtime` about important actions, events or issues during the `sync` process. The `Synchronizer` manages two main subcomponents which are `CheckpointsCollector` and `CertificatesCollector`. ================================================ FILE: docs/book.toml ================================================ [book] language = "en" multilingual = false src = "src" title = "The Topos specification and development internals documentation" [preprocessor.graphviz] command = "mdbook-graphviz" [preprocessor.plantuml] command = "mdbook-plantuml" ================================================ FILE: docs/src/README.md ================================================ # Preamble This documentation aims at including the first shoot for development specification. Please refer also to for documentation on the protocol level. ================================================ FILE: docs/src/SUMMARY.md ================================================ # Summary [Preamble](README.md) - [Topos Node Architecture](topos-node.md) - [Test](test.md) [Glossary](glossary.md) ================================================ FILE: docs/src/glossary.md ================================================ # Glossary - BABE: (Blind Assignment for Blockchain Extension). The consensus algorithm validators use as block production mechanism. See [the Polkadot wiki][0] for more information. - Extrinsic: An element of a relay-chain block which triggers a specific entry-point of a runtime module with given arguments. - GRANDPA: (Ghost-based Recursive ANcestor Deriving Prefix Agreement). The algorithm validators uses to guarantee finality of the subnet. See [the Polkadot wiki][0] for more information. - Pallet: A component of the Runtime logic, encapsulating storage, routines, and entry-points. - Runtime: The subnet state machine. - Runtime API: A means for the node-side behavior to access structured information based on the state of a fork of the blockchain. - Worker: A long-running task which is responsible for carrying out a particular category of subprotocols, e.g., DKG is a Topos subprotocol implemented as a Worker. - Validator: Specially-selected node in the network who is responsible for validating subnet block and issuing attestations about their validity. Also of use is the [Substrate Glossary](https://substrate.dev/docs/en/knowledgebase/getting-started/glossary). [0]: https://wiki.polkadot.network/docs/learn-consensus ================================================ FILE: docs/src/test.md ================================================ # Test The tests are described using [Gherkin](https://cucumber.io/docs/gherkin/), a language for [Behavior-driven development](https://en.wikipedia.org/wiki/Behavior-driven_development). The tests are located in the root folder `tests`. Each file with the `.feature` extension implements the test scenario for each component. Currently, the high level logical components are namely, - Subnet - TCE - API ================================================ FILE: docs/src/topos-node.md ================================================ # Topos Node ## Overview of the components ```plantuml @startuml node "Subnet Node (Process)" as subnet_node { [Runtime]-[Service] } node "TCE Node (Process)" as tce_node { [Web Api] } package "Oracle App (Process)" as oracle_app { [zk-VM] [Certificate Generator] [FROST Signature Generator] } [Runtime] <--> [Certificate Generator]: RPC/websocket [Certificate Generator] <--> [FROST Signature Generator]: Message passing [Certificate Generator] <--> [zk-VM]: Message passing [Certificate Generator] ---> [Web Api]: Message passing @enduml ``` ================================================ FILE: grafana/benchmarks-dashboard.json ================================================ { "annotations": { "list": [ { "builtIn": 1, "datasource": { "type": "grafana", "uid": "-- Grafana --" }, "enable": true, "hide": true, "iconColor": "rgba(0, 211, 255, 1)", "name": "Annotations & Alerts", "target": { "limit": 100, "matchAny": false, "tags": [], "type": "dashboard" }, "type": "dashboard" } ] }, "editable": true, "fiscalYearStartMonth": 0, "graphTooltip": 0, "id": 419, "links": [], "liveNow": false, "panels": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "auto", "spanNulls": false, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null }, { "color": "red", "value": 80 } ] } }, "overrides": [] }, "gridPos": { "h": 5, "w": 10, "x": 0, "y": 0 }, "id": 51, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, "tooltip": { "mode": "single", "sort": "none" } }, "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "builder", "expr": "topos_certificate_delivered_total", "legendFormat": "__auto", "range": true, "refId": "A" } ], "title": "Panel Title", "transformations": [ { "id": "convertFieldType", "options": { "conversions": [], "fields": {} } } ], "type": "timeseries" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisCenteredZero": false, "axisColorMode": "text", "axisGridShow": true, "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "auto", "spanNulls": false, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null }, { "color": "red", "value": 80 } ] }, "unit": "s" }, "overrides": [ { "matcher": { "id": "byFrameRefID", "options": "max" }, "properties": [ { "id": "custom.fillBelowTo", "value": "min" }, { "id": "custom.fillOpacity", "value": 17 } ] }, { "matcher": { "id": "byFrameRefID", "options": "avg" }, "properties": [ { "id": "custom.lineWidth", "value": 3 } ] } ] }, "gridPos": { "h": 5, "w": 13, "x": 10, "y": 0 }, "id": 52, "interval": "4", "options": { "legend": { "calcs": [ "stdDev" ], "displayMode": "list", "placement": "right", "showLegend": true }, "tooltip": { "mode": "single", "sort": "none" } }, "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "code", "expr": "avg(rate(topos_double_echo_delivery_latency_sum[$__rate_interval]) / rate(topos_double_echo_delivery_latency_count[$__rate_interval]))", "legendFormat": "avg", "range": true, "refId": "avg" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "code", "expr": "min(rate(topos_double_echo_delivery_latency_sum[$__rate_interval]) / rate(topos_double_echo_delivery_latency_count[$__rate_interval]))", "hide": false, "legendFormat": "min", "range": true, "refId": "min" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "code", "expr": "max(rate(topos_double_echo_delivery_latency_sum[$__rate_interval]) / rate(topos_double_echo_delivery_latency_count[$__rate_interval]))", "hide": false, "legendFormat": "max", "range": true, "refId": "max" }, { "datasource": { "name": "Expression", "type": "__expr__", "uid": "__expr__" }, "expression": "avg", "hide": false, "reducer": "mean", "refId": "reduced_point", "settings": { "mode": "dropNN" }, "type": "reduce" } ], "title": "Latency", "transformations": [], "type": "timeseries" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "fieldConfig": { "defaults": { "color": { "mode": "thresholds" }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null }, { "color": "red", "value": 80 } ] }, "unit": "none" }, "overrides": [ { "matcher": { "id": "byFrameRefID", "options": "Ratio" }, "properties": [ { "id": "thresholds", "value": { "mode": "percentage", "steps": [ { "color": "green", "value": null } ] } } ] } ] }, "gridPos": { "h": 8, "w": 2, "x": 0, "y": 5 }, "id": 25, "options": { "colorMode": "none", "graphMode": "none", "justifyMode": "auto", "orientation": "auto", "reduceOptions": { "calcs": [ "lastNotNull" ], "fields": "", "values": false }, "textMode": "auto" }, "pluginVersion": "9.3.8", "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "code", "expr": "sum(topos_certificate_processing_total{job=~\"$peer\"})", "legendFormat": "Received", "range": true, "refId": "A" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "code", "expr": "sum(topos_certificate_delivered_total{job=~\"$peer\"})", "hide": false, "legendFormat": "Delivered", "range": true, "refId": "B" }, { "datasource": { "name": "Expression", "type": "__expr__", "uid": "__expr__" }, "expression": "100 * ($B ) / ($A) ", "hide": false, "refId": "Ratio", "type": "math" } ], "title": "Health", "type": "stat" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "fieldConfig": { "defaults": { "custom": { "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "scaleDistribution": { "type": "linear" } } }, "overrides": [] }, "gridPos": { "h": 8, "w": 9, "x": 2, "y": 5 }, "id": 37, "interval": "5", "options": { "calculate": false, "cellGap": 1, "color": { "exponent": 0.5, "fill": "dark-orange", "mode": "scheme", "reverse": true, "scale": "exponential", "scheme": "Magma", "steps": 128 }, "exemplars": { "color": "rgba(255,0,255,0.7)" }, "filterValues": { "le": 1e-10 }, "legend": { "show": true }, "rowsFrame": { "layout": "auto" }, "tooltip": { "show": true, "yHistogram": false }, "yAxis": { "axisPlacement": "left", "reverse": false } }, "pluginVersion": "9.3.8", "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "builder", "expr": "sum by(job) (increase(topos_certificate_delivered_total[$__rate_interval]))", "legendFormat": "__auto", "range": true, "refId": "A" } ], "title": "Delivery dynamic", "type": "heatmap" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "auto", "spanNulls": false, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null }, { "color": "red", "value": 80 } ] } }, "overrides": [ { "matcher": { "id": "byFrameRefID", "options": "input" }, "properties": [ { "id": "custom.lineWidth", "value": 2 }, { "id": "custom.lineStyle", "value": { "dash": [ 10, 10 ], "fill": "dash" } } ] } ] }, "gridPos": { "h": 7, "w": 13, "x": 11, "y": 5 }, "id": 39, "interval": "14", "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "right", "showLegend": true }, "tooltip": { "mode": "single", "sort": "none" } }, "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "builder", "expr": "avg(sum by(job) (irate(topos_certificate_delivered_total[$__rate_interval])))", "hide": false, "legendFormat": "Actual", "range": true, "refId": "A" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "code", "expr": "avg(sum by(job) (irate(topos_certificate_processing_total[$__interval])))", "hide": true, "legendFormat": "Input", "range": true, "refId": "input" } ], "title": "Throughput", "type": "timeseries" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisCenteredZero": false, "axisColorMode": "text", "axisGridShow": true, "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "auto", "spanNulls": false, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null }, { "color": "red", "value": 80 } ] }, "unit": "s" }, "overrides": [ { "matcher": { "id": "byFrameRefID", "options": "max" }, "properties": [ { "id": "custom.fillBelowTo", "value": "min" }, { "id": "custom.fillOpacity", "value": 17 } ] }, { "matcher": { "id": "byFrameRefID", "options": "avg" }, "properties": [ { "id": "custom.lineWidth", "value": 3 } ] } ] }, "gridPos": { "h": 5, "w": 13, "x": 11, "y": 12 }, "id": 41, "interval": "4", "options": { "legend": { "calcs": [ "stdDev" ], "displayMode": "list", "placement": "right", "showLegend": true }, "tooltip": { "mode": "single", "sort": "none" } }, "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "code", "expr": "avg(rate(topos_double_echo_delivery_latency_sum[$__rate_interval]) / rate(topos_double_echo_delivery_latency_count[$__rate_interval]))", "legendFormat": "avg", "range": true, "refId": "avg" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "code", "expr": "min(rate(topos_double_echo_delivery_latency_sum[$__rate_interval]) / rate(topos_double_echo_delivery_latency_count[$__rate_interval]))", "hide": false, "legendFormat": "min", "range": true, "refId": "min" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "code", "expr": "max(rate(topos_double_echo_delivery_latency_sum[$__rate_interval]) / rate(topos_double_echo_delivery_latency_count[$__rate_interval]))", "hide": false, "legendFormat": "max", "range": true, "refId": "max" } ], "title": "Latency", "type": "timeseries" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "description": "Received from gRPC", "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "auto", "spanNulls": false, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null }, { "color": "red", "value": 80 } ] } }, "overrides": [] }, "gridPos": { "h": 8, "w": 11, "x": 0, "y": 13 }, "id": 43, "interval": "5", "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, "tooltip": { "mode": "single", "sort": "none" } }, "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "builder", "expr": "topos_api_grpc_certificate_received_total", "legendFormat": "{{job}}", "range": true, "refId": "A" } ], "title": "gRPC received", "type": "timeseries" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "description": "", "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "auto", "spanNulls": false, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null }, { "color": "red", "value": 80 } ] } }, "overrides": [] }, "gridPos": { "h": 8, "w": 13, "x": 11, "y": 17 }, "id": 9, "interval": "4", "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, "tooltip": { "mode": "single", "sort": "none" } }, "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "code", "expr": "avg(topos_certificate_processing_total{job=~\"$peer\"})", "hide": false, "legendFormat": "Double Echo module", "range": true, "refId": "A" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "code", "expr": "avg(topos_certificate_processing_from_api_total{job=~\"$peer\"})", "hide": false, "legendFormat": "Received by gRPC API", "range": true, "refId": "B" } ], "title": "Certificate reception in the double echo", "type": "timeseries" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "auto", "spanNulls": false, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null }, { "color": "red", "value": 80 } ] } }, "overrides": [] }, "gridPos": { "h": 8, "w": 11, "x": 0, "y": 21 }, "id": 45, "interval": "5", "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, "tooltip": { "mode": "single", "sort": "none" } }, "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "builder", "expr": "topos_double_echo_active_tasks_count", "legendFormat": "{{job}}", "range": true, "refId": "A" } ], "title": "Active tokio tasks", "type": "timeseries" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "description": "Upon received from the gRPC API", "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "auto", "spanNulls": false, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null }, { "color": "red", "value": 80 } ] } }, "overrides": [] }, "gridPos": { "h": 8, "w": 13, "x": 11, "y": 25 }, "id": 48, "interval": "4", "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, "tooltip": { "mode": "single", "sort": "none" } }, "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "code", "expr": "topos_certificate_processing_from_api_total{job=~\"$peer\"}", "hide": false, "legendFormat": "API {{job}}", "range": true, "refId": "A" } ], "title": "Certificate reception from API", "type": "timeseries" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "fieldConfig": { "defaults": { "custom": { "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "scaleDistribution": { "type": "linear" } } }, "overrides": [] }, "gridPos": { "h": 8, "w": 11, "x": 0, "y": 29 }, "id": 35, "interval": "5", "options": { "calculate": false, "cellGap": 1, "color": { "exponent": 0.5, "fill": "dark-orange", "mode": "scheme", "reverse": true, "scale": "exponential", "scheme": "Oranges", "steps": 113 }, "exemplars": { "color": "rgba(255,0,255,0.7)" }, "filterValues": { "le": 1e-9 }, "legend": { "show": true }, "rowsFrame": { "layout": "auto" }, "tooltip": { "show": true, "yHistogram": false }, "yAxis": { "axisPlacement": "left", "reverse": false } }, "pluginVersion": "9.3.8", "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "builder", "expr": "sum by(le) (increase(topos_p2p_gossip_batch_size_bucket[$__rate_interval]))", "format": "heatmap", "legendFormat": "{{job}}", "range": true, "refId": "A" } ], "title": "Batch size", "type": "heatmap" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "description": "Upon received from GossipSub", "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "auto", "spanNulls": false, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null }, { "color": "red", "value": 80 } ] } }, "overrides": [] }, "gridPos": { "h": 8, "w": 13, "x": 11, "y": 33 }, "id": 49, "interval": "4", "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, "tooltip": { "mode": "single", "sort": "none" } }, "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "builder", "expr": "topos_libp2p_gossipsub_topic_msg_recv_counts_total{job=~\"$peer\", hash=\"topos_gossip\"}", "hide": false, "legendFormat": "API {{job}}", "range": true, "refId": "A" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "hide": false, "refId": "B" } ], "title": "Certificate reception from Gossip", "type": "timeseries" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "auto", "spanNulls": false, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null }, { "color": "red", "value": 80 } ] } }, "overrides": [] }, "gridPos": { "h": 8, "w": 13, "x": 11, "y": 41 }, "id": 47, "interval": "4", "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, "tooltip": { "mode": "single", "sort": "none" } }, "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "builder", "expr": "topos_double_echo_broadcast_finished_total", "legendFormat": "{{job}}", "range": true, "refId": "A" } ], "title": "Broadcast finished", "type": "timeseries" }, { "collapsed": false, "gridPos": { "h": 1, "w": 24, "x": 0, "y": 49 }, "id": 32, "panels": [], "title": "Misc", "type": "row" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "description": "Latency between rounds", "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "auto", "spanNulls": false, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null }, { "color": "red", "value": 80 } ] } }, "overrides": [ { "matcher": { "id": "byFrameRefID", "options": "A" }, "properties": [ { "id": "custom.axisPlacement", "value": "right" } ] }, { "matcher": { "id": "byFrameRefID", "options": "Echo published" }, "properties": [ { "id": "custom.axisPlacement", "value": "right" } ] } ] }, "gridPos": { "h": 8, "w": 12, "x": 0, "y": 50 }, "id": 30, "interval": "4", "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, "tooltip": { "mode": "single", "sort": "none" } }, "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "code", "expr": "sum by(hash) (increase(topos_libp2p_gossipsub_topic_msg_recv_counts_total{hash=\"topos_gossip\", job=~\"$peer\"}[$__rate_interval]))", "legendFormat": "Gossip received", "range": true, "refId": "Gossip received" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "code", "expr": "sum by(hash) (increase(topos_libp2p_gossipsub_topic_msg_published_total{hash=\"topos_echo\", job=~\"$peer\"}[$__rate_interval]))", "hide": false, "legendFormat": "Echo published", "range": true, "refId": "Echo published" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "code", "expr": "sum by(hash) (increase(topos_libp2p_gossipsub_topic_iwant_msgs_total{hash=\"topos_gossip\", job=~\"$peer\"}[$__rate_interval]))", "hide": false, "legendFormat": "IWANT Gossip", "range": true, "refId": "IWANT" } ], "title": "Gossip received vs. Echo published", "type": "timeseries" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "description": "Measure the redundancy", "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "auto", "spanNulls": false, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null }, { "color": "red", "value": 80 } ] } }, "overrides": [ { "matcher": { "id": "byFrameRefID", "options": "Ratio" }, "properties": [ { "id": "custom.axisPlacement", "value": "right" }, { "id": "custom.scaleDistribution", "value": { "log": 10, "type": "log" } }, { "id": "custom.lineStyle", "value": { "dash": [ 0, 10 ], "fill": "dot" } }, { "id": "custom.lineWidth", "value": 4 } ] } ] }, "gridPos": { "h": 8, "w": 12, "x": 12, "y": 50 }, "id": 29, "interval": "4", "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, "tooltip": { "mode": "single", "sort": "none" } }, "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "code", "expr": "sum by(instance) (avg by(hash) (topos_libp2p_gossipsub_topic_msg_recv_counts_total{job=~\"$peer\"}))", "hide": false, "legendFormat": "Filtered", "range": true, "refId": "filtered" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "code", "expr": "sum by(instance) (avg by(hash) (topos_libp2p_gossipsub_topic_msg_recv_counts_unfiltered_total{job=~\"$peer\"}))", "hide": false, "legendFormat": "Unfiltered", "range": true, "refId": "unfiltered" }, { "datasource": { "name": "Expression", "type": "__expr__", "uid": "__expr__" }, "expression": "$unfiltered / $filtered", "hide": false, "refId": "Ratio", "type": "math" } ], "title": "Ratio payload recv unfiltered vs. filtered", "type": "timeseries" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "description": "Latency between rounds", "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "auto", "spanNulls": false, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null }, { "color": "red", "value": 80 } ] } }, "overrides": [ { "matcher": { "id": "byFrameRefID", "options": "A" }, "properties": [ { "id": "custom.axisLabel", "value": "IWANT" } ] } ] }, "gridPos": { "h": 8, "w": 12, "x": 0, "y": 58 }, "id": 31, "interval": "4", "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, "tooltip": { "mode": "single", "sort": "none" } }, "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "code", "expr": "sum by(hash) (increase(topos_libp2p_gossipsub_topic_msg_recv_counts_total{hash=\"topos_echo\", job=~\"$peer\"}[$__rate_interval]))", "hide": false, "legendFormat": "Echo received", "range": true, "refId": "Gossip received" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "code", "expr": "sum by(hash) (increase(topos_libp2p_gossipsub_topic_msg_published_total{hash=\"topos_ready\", job=~\"$peer\"}[$__rate_interval]))", "hide": false, "legendFormat": "Ready published", "range": true, "refId": "Echo published" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "code", "expr": "sum by(hash) (increase(topos_libp2p_gossipsub_topic_iwant_msgs_total{hash=\"topos_echo\", job=~\"$peer\"}[$__rate_interval]))", "hide": false, "legendFormat": "IWANT Echo", "range": true, "refId": "A" } ], "title": "Echo received vs. Ready published", "type": "timeseries" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "fieldConfig": { "defaults": { "color": { "mode": "continuous-GrYlRd" }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null }, { "color": "red", "value": 80 } ] } }, "overrides": [] }, "gridPos": { "h": 16, "w": 12, "x": 12, "y": 58 }, "id": 27, "options": { "displayMode": "basic", "minVizHeight": 10, "minVizWidth": 0, "orientation": "horizontal", "reduceOptions": { "calcs": [ "lastNotNull" ], "fields": "", "values": false }, "showUnfilled": true, "valueMode": "color" }, "pluginVersion": "9.3.8", "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "code", "expr": "topos_certificate_delivered_total{job=~\"$peer\"}", "legendFormat": "{{job}}", "range": true, "refId": "A" } ], "title": "Certificate delivered per peer", "type": "bargauge" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "description": "Latency between rounds", "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "auto", "spanNulls": false, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null }, { "color": "red", "value": 80 } ] } }, "overrides": [ { "matcher": { "id": "byFrameRefID", "options": "Missing delivery" }, "properties": [ { "id": "custom.axisPlacement", "value": "right" }, { "id": "custom.drawStyle", "value": "line" }, { "id": "custom.lineStyle", "value": { "dash": [ 0, 10 ], "fill": "dot" } }, { "id": "custom.lineWidth", "value": 3 } ] }, { "matcher": { "id": "byFrameRefID", "options": "A" }, "properties": [ { "id": "custom.axisLabel", "value": "IWANT" } ] } ] }, "gridPos": { "h": 8, "w": 12, "x": 0, "y": 66 }, "id": 33, "interval": "4", "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, "tooltip": { "mode": "single", "sort": "none" } }, "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "code", "expr": "topos_libp2p_gossipsub_topic_msg_recv_counts_total{hash=\"topos_ready\"}", "hide": true, "legendFormat": "Ready received", "range": true, "refId": "Ready received" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "code", "expr": "sum(topos_libp2p_gossipsub_topic_msg_published_total{hash=\"topos_ready\"})", "hide": false, "legendFormat": "Ready published", "range": true, "refId": "Ready published" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "code", "expr": "sum by(hash) (increase(topos_libp2p_gossipsub_topic_iwant_msgs_total{hash=\"topos_ready\", job=~\"$peer\"}[$__rate_interval]))", "hide": true, "legendFormat": "IWANT Ready", "range": true, "refId": "A" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "code", "expr": "avg(topos_certificate_processing_total{job=~\"$peer\"})", "hide": true, "legendFormat": "__auto", "range": true, "refId": "total_cert" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "builder", "expr": "avg(topos_certificate_delivered_total)", "hide": true, "legendFormat": "__auto", "range": true, "refId": "cert_delivered" }, { "datasource": { "name": "Expression", "type": "__expr__", "uid": "__expr__" }, "expression": "$total_cert - $cert_delivered", "hide": true, "refId": "Missing delivery", "type": "math" } ], "title": "Ready received vs. Ready published vs. Delivered certificate", "type": "timeseries" }, { "collapsed": false, "gridPos": { "h": 1, "w": 24, "x": 0, "y": 74 }, "id": 19, "panels": [], "title": "P2P - Gossip protocol", "type": "row" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "auto", "spanNulls": false, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null }, { "color": "red", "value": 80 } ] } }, "overrides": [] }, "gridPos": { "h": 8, "w": 12, "x": 0, "y": 75 }, "id": 20, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, "tooltip": { "mode": "single", "sort": "none" } }, "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "code", "expr": "irate(topos_libp2p_gossipsub_topic_iwant_msgs_total{ job=~\"$peer\"}[$__interval])", "legendFormat": "IWANT {{hash}} - {{job}}", "range": true, "refId": "A" } ], "title": "IWANT msg", "type": "timeseries" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "auto", "spanNulls": false, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null }, { "color": "red", "value": 80 } ] } }, "overrides": [] }, "gridPos": { "h": 8, "w": 12, "x": 12, "y": 75 }, "id": 21, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, "tooltip": { "mode": "single", "sort": "none" } }, "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "code", "expr": "irate(topos_libp2p_gossipsub_topic_msg_recv_counts_total{ job=~\"$peer\"}[$__interval])", "legendFormat": "{{hash}} {{job}}", "range": true, "refId": "A" } ], "title": "MSG recv filtered", "type": "timeseries" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "fieldConfig": { "defaults": { "color": { "mode": "thresholds" }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null }, { "color": "red", "value": 80 } ] } }, "overrides": [] }, "gridPos": { "h": 8, "w": 12, "x": 0, "y": 83 }, "id": 22, "options": { "orientation": "auto", "reduceOptions": { "calcs": [ "lastNotNull" ], "fields": "", "values": false }, "showThresholdLabels": false, "showThresholdMarkers": true }, "pluginVersion": "9.3.8", "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "code", "expr": "sum by(hash) (topos_libp2p_gossipsub_topic_msg_recv_counts_total{ job=~\"$peer\"})", "legendFormat": "{{hash}} ", "range": true, "refId": "A" } ], "title": "MSG recv total per topic", "type": "gauge" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "fillOpacity": 80, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "lineWidth": 1, "scaleDistribution": { "type": "linear" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null }, { "color": "red", "value": 80 } ] } }, "overrides": [] }, "gridPos": { "h": 8, "w": 12, "x": 12, "y": 83 }, "id": 23, "options": { "barRadius": 0, "barWidth": 0.97, "fullHighlight": false, "groupWidth": 0.7, "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, "orientation": "auto", "showValue": "auto", "stacking": "none", "tooltip": { "mode": "single", "sort": "none" }, "xTickLabelRotation": 0, "xTickLabelSpacing": 0 }, "pluginVersion": "9.5.3", "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "code", "exemplar": false, "expr": "max by(hash, job) (topos_libp2p_gossipsub_topic_msg_sent_counts_total{ job=~\"$peer\"})", "format": "heatmap", "instant": true, "legendFormat": "{{job}} {{hash}}", "range": false, "refId": "A" } ], "title": "MSG sent total per topic", "type": "barchart" }, { "collapsed": false, "gridPos": { "h": 1, "w": 24, "x": 0, "y": 91 }, "id": 15, "panels": [], "title": "Storage layer", "type": "row" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "auto", "spanNulls": false, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null }, { "color": "red", "value": 80 } ] } }, "overrides": [] }, "gridPos": { "h": 8, "w": 12, "x": 0, "y": 92 }, "id": 16, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, "tooltip": { "mode": "single", "sort": "none" } }, "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "code", "expr": "irate(topos_storage_command_channel_capacity_total{ job=~\"$peer\"}[$__interval])", "legendFormat": "{{job}}", "range": true, "refId": "A" } ], "title": "Command channel at capacity", "type": "timeseries" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "fieldConfig": { "defaults": { "color": { "mode": "thresholds" }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null }, { "color": "red", "value": 80 } ] } }, "overrides": [] }, "gridPos": { "h": 8, "w": 12, "x": 12, "y": 92 }, "id": 17, "options": { "displayMode": "gradient", "minVizHeight": 10, "minVizWidth": 0, "orientation": "auto", "reduceOptions": { "calcs": [ "lastNotNull" ], "fields": "", "values": false }, "showUnfilled": true, "valueMode": "color" }, "pluginVersion": "9.3.8", "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "code", "expr": "sum(increase(topos_storage_pending_certificate_existance_latency_bucket{ job=~\"$peer\"}[$__range])) by (le)", "format": "heatmap", "legendFormat": "{{le}}", "range": true, "refId": "A" } ], "title": "Pending existance latency", "type": "bargauge" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "fieldConfig": { "defaults": { "color": { "mode": "thresholds" }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green" }, { "color": "red", "value": 80 } ] } }, "overrides": [] }, "gridPos": { "h": 8, "w": 12, "x": 12, "y": 100 }, "id": 18, "options": { "displayMode": "gradient", "minVizHeight": 10, "minVizWidth": 0, "orientation": "auto", "reduceOptions": { "calcs": [ "lastNotNull" ], "fields": "", "values": false }, "showUnfilled": true, "valueMode": "color" }, "pluginVersion": "9.3.8", "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "code", "expr": "sum(increase(topos_storage_adding_pending_certificate_latency_bucket{ job=~\"$peer\"}[$__range])) by (le)", "format": "heatmap", "legendFormat": "__auto", "range": true, "refId": "A" } ], "title": "Adding to pending latency", "type": "bargauge" }, { "collapsed": false, "gridPos": { "h": 1, "w": 24, "x": 0, "y": 108 }, "id": 11, "panels": [], "title": "P2P layer", "type": "row" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "auto", "spanNulls": false, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green" }, { "color": "red", "value": 80 } ] } }, "overrides": [] }, "gridPos": { "h": 12, "w": 12, "x": 0, "y": 109 }, "id": 3, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, "tooltip": { "mode": "single", "sort": "none" } }, "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "code", "expr": "irate(topos_p2p_event_stream_capacity_total{ job=~\"$peer\"}[$__interval])", "legendFormat": "{{job}}", "range": true, "refId": "A" } ], "title": "P2P Channel at capacity", "type": "timeseries" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "auto", "spanNulls": false, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green" }, { "color": "red", "value": 80 } ] } }, "overrides": [] }, "gridPos": { "h": 12, "w": 12, "x": 12, "y": 109 }, "id": 2, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, "tooltip": { "mode": "single", "sort": "none" } }, "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "code", "expr": "irate(topos_p2p_echo_message_total{ job=~\"$peer\"}[$__interval])", "legendFormat": "Echo {{job}}", "range": true, "refId": "A" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "code", "expr": "irate(topos_p2p_ready_message_total{ job=~\"$peer\"}[$__interval])", "hide": false, "legendFormat": "Ready {{job}}", "range": true, "refId": "B" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "code", "expr": "irate(topos_p2p_gossip_message_total{ job=~\"$peer\"}[$__interval])", "hide": false, "legendFormat": "Gossip {{job}}", "range": true, "refId": "C" } ], "title": "Message received", "type": "timeseries" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "auto", "spanNulls": false, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green" }, { "color": "red", "value": 80 } ] } }, "overrides": [] }, "gridPos": { "h": 12, "w": 12, "x": 0, "y": 121 }, "id": 1, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, "tooltip": { "mode": "single", "sort": "none" } }, "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "code", "expr": "sum(topos_p2p_gossipsub_message_sent_total{ job=~\"$peer\"})", "legendFormat": "{{job}}", "range": true, "refId": "A" } ], "title": "Gossip message sent", "type": "timeseries" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "fieldConfig": { "defaults": { "color": { "mode": "thresholds" }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green" }, { "color": "red", "value": 80 } ] } }, "overrides": [] }, "gridPos": { "h": 12, "w": 12, "x": 12, "y": 121 }, "id": 7, "options": { "displayMode": "gradient", "minVizHeight": 10, "minVizWidth": 0, "orientation": "auto", "reduceOptions": { "calcs": [ "lastNotNull" ], "fields": "", "values": false }, "showUnfilled": true, "valueMode": "color" }, "pluginVersion": "9.3.8", "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "code", "exemplar": false, "expr": "sum(increase(topos_p2p_gossip_batch_size_bucket{ job=~\"$peer\"}[$__range])) by (le)", "format": "heatmap", "instant": false, "legendFormat": "{{le}}", "range": true, "refId": "A" } ], "title": "Gossip message sent", "type": "bargauge" }, { "collapsed": false, "gridPos": { "h": 1, "w": 24, "x": 0, "y": 133 }, "id": 12, "panels": [], "title": "Double Echo - external", "type": "row" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "auto", "spanNulls": false, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green" }, { "color": "red", "value": 80 } ] } }, "overrides": [] }, "gridPos": { "h": 8, "w": 12, "x": 0, "y": 134 }, "id": 13, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, "tooltip": { "mode": "single", "sort": "none" } }, "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "code", "expr": "irate(topos_double_echo_command_channel_capacity_total{ job=~\"$peer\"}[$__interval])", "legendFormat": "{{job}}", "range": true, "refId": "A" } ], "title": "Command channel capacity", "type": "timeseries" }, { "collapsed": false, "gridPos": { "h": 1, "w": 24, "x": 0, "y": 142 }, "id": 10, "panels": [], "title": "Double Echo - internal", "type": "row" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "auto", "spanNulls": false, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green" }, { "color": "red", "value": 80 } ] } }, "overrides": [] }, "gridPos": { "h": 8, "w": 12, "x": 0, "y": 143 }, "id": 4, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, "tooltip": { "mode": "single", "sort": "none" } }, "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "code", "expr": "topos_double_echo_current_buffer_size{ job=~\"$peer\"}", "hide": false, "legendFormat": "{{job}}", "range": true, "refId": "B" } ], "title": "Double echo - Buffer size", "type": "timeseries" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "auto", "spanNulls": false, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green" }, { "color": "red", "value": 80 } ] } }, "overrides": [] }, "gridPos": { "h": 8, "w": 12, "x": 12, "y": 143 }, "id": 5, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, "tooltip": { "mode": "single", "sort": "none" } }, "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "code", "expr": "irate(topos_double_echo_buffer_capacity_total{ job=~\"$peer\"}[$__interval])", "legendFormat": "{{job}}", "range": true, "refId": "A" } ], "title": "Double Echo Buffer at capacity", "type": "timeseries" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "auto", "spanNulls": false, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green" }, { "color": "red", "value": 80 } ] } }, "overrides": [] }, "gridPos": { "h": 8, "w": 12, "x": 0, "y": 151 }, "id": 6, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, "tooltip": { "mode": "single", "sort": "none" } }, "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "code", "expr": "topos_double_echo_buffered_message_count{ job=~\"$peer\"}", "hide": false, "legendFormat": "{{job}}", "range": true, "refId": "B" } ], "title": "Double echo - Buffered messages", "type": "timeseries" }, { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "auto", "spanNulls": false, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green" }, { "color": "red", "value": 80 } ] } }, "overrides": [] }, "gridPos": { "h": 8, "w": 12, "x": 12, "y": 151 }, "id": 8, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, "tooltip": { "mode": "single", "sort": "none" } }, "targets": [ { "datasource": { "type": "prometheus", "uid": "${datasource}" }, "editorMode": "code", "expr": "topos_double_echo_current_buffer_size{ job=~\"$peer\"}", "hide": false, "legendFormat": "{{job}}", "range": true, "refId": "B" } ], "title": "Double echo - Buffer size", "type": "timeseries" } ], "refresh": "5s", "schemaVersion": 37, "style": "dark", "tags": [], "templating": { "list": [ { "current": { "selected": false, "text": "All", "value": "$__all" }, "datasource": { "type": "prometheus", "uid": "${datasource}" }, "definition": "label_values(topos_certificate_processing_total,job)", "hide": 0, "includeAll": true, "multi": true, "name": "peer", "options": [], "query": { "query": "label_values(topos_certificate_processing_total,job)", "refId": "PrometheusVariableQueryEditor-VariableQuery" }, "refresh": 1, "regex": "", "skipUrlSync": false, "sort": 0, "type": "query" }, { "current": { "selected": true, "text": "Prometheus", "value": "Prometheus" }, "hide": 0, "includeAll": false, "label": "Datasource", "multi": false, "name": "datasource", "options": [], "query": "prometheus", "queryValue": "", "refresh": 1, "regex": "", "skipUrlSync": false, "type": "datasource" } ] }, "time": { "from": "now-1h", "to": "now" }, "timepicker": {}, "timezone": "", "title": "Benchmarks - Gossiped Certificate", "uid": "f4d3b025-4b36-454a-a724-818f85806b6e", "version": 9, "weekStart": "" } ================================================ FILE: rust-toolchain ================================================ [toolchain] channel = "1.74.0" profile = "minimal" ================================================ FILE: rustfmt.toml ================================================ edition = "2021" use_field_init_shorthand = true reorder_imports = true format_strings = true ================================================ FILE: scripts/check_readme.sh ================================================ #!/bin/bash set -e param=$1 function check { if [ "$param" == "generate" ]; then cargo readme -r $1 > $1/README.md else diff <(cargo readme -r $1) $1/README.md || (echo 1>&2 "Please update the $1/README with "'`'"cargo readme -r $1 > $1/README.md"'`' && exit 1 ) fi } check crates/topos-tce-broadcast check crates/topos-tce-storage