Showing preview only (2,602K chars total). Download the full file or copy to clipboard to get everything.
Repository: serai-dex/serai
Branch: develop
Commit: 737dbcbaa78a
Files: 594
Total size: 2.4 MB
Directory structure:
gitextract_j2hgunrb/
├── .gitattributes
├── .github/
│ ├── LICENSE
│ ├── actions/
│ │ ├── bitcoin/
│ │ │ └── action.yml
│ │ ├── build-dependencies/
│ │ │ └── action.yml
│ │ ├── monero/
│ │ │ └── action.yml
│ │ ├── monero-wallet-rpc/
│ │ │ └── action.yml
│ │ └── test-dependencies/
│ │ └── action.yml
│ ├── nightly-version
│ └── workflows/
│ ├── common-tests.yml
│ ├── coordinator-tests.yml
│ ├── crypto-tests.yml
│ ├── daily-deny.yml
│ ├── full-stack-tests.yml
│ ├── lint.yml
│ ├── message-queue-tests.yml
│ ├── mini-tests.yml
│ ├── monthly-nightly-update.yml
│ ├── networks-tests.yml
│ ├── no-std.yml
│ ├── pages.yml
│ ├── processor-tests.yml
│ ├── reproducible-runtime.yml
│ └── tests.yml
├── .gitignore
├── .rustfmt.toml
├── AGPL-3.0
├── CONTRIBUTING.md
├── Cargo.toml
├── LICENSE
├── README.md
├── audits/
│ ├── Cypher Stack crypto March 2023/
│ │ ├── LICENSE
│ │ └── README.md
│ └── Cypher Stack networks bitcoin August 2023/
│ ├── LICENSE
│ └── README.md
├── common/
│ ├── db/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ └── src/
│ │ ├── create_db.rs
│ │ ├── lib.rs
│ │ ├── mem.rs
│ │ ├── parity_db.rs
│ │ └── rocks.rs
│ ├── env/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ └── src/
│ │ └── lib.rs
│ ├── patchable-async-sleep/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── README.md
│ │ └── src/
│ │ └── lib.rs
│ ├── request/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── README.md
│ │ └── src/
│ │ ├── lib.rs
│ │ ├── request.rs
│ │ └── response.rs
│ ├── std-shims/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── README.md
│ │ └── src/
│ │ ├── collections.rs
│ │ ├── io.rs
│ │ ├── lib.rs
│ │ └── sync.rs
│ └── zalloc/
│ ├── Cargo.toml
│ ├── LICENSE
│ ├── build.rs
│ └── src/
│ └── lib.rs
├── coordinator/
│ ├── Cargo.toml
│ ├── LICENSE
│ ├── README.md
│ ├── src/
│ │ ├── cosign_evaluator.rs
│ │ ├── db.rs
│ │ ├── main.rs
│ │ ├── p2p.rs
│ │ ├── processors.rs
│ │ ├── substrate/
│ │ │ ├── cosign.rs
│ │ │ ├── db.rs
│ │ │ └── mod.rs
│ │ ├── tests/
│ │ │ ├── mod.rs
│ │ │ └── tributary/
│ │ │ ├── chain.rs
│ │ │ ├── dkg.rs
│ │ │ ├── handle_p2p.rs
│ │ │ ├── mod.rs
│ │ │ ├── sync.rs
│ │ │ └── tx.rs
│ │ └── tributary/
│ │ ├── db.rs
│ │ ├── handle.rs
│ │ ├── mod.rs
│ │ ├── scanner.rs
│ │ ├── signing_protocol.rs
│ │ ├── spec.rs
│ │ └── transaction.rs
│ └── tributary/
│ ├── Cargo.toml
│ ├── LICENSE
│ ├── README.md
│ ├── src/
│ │ ├── block.rs
│ │ ├── blockchain.rs
│ │ ├── lib.rs
│ │ ├── mempool.rs
│ │ ├── merkle.rs
│ │ ├── provided.rs
│ │ ├── tendermint/
│ │ │ ├── mod.rs
│ │ │ └── tx.rs
│ │ ├── tests/
│ │ │ ├── block.rs
│ │ │ ├── blockchain.rs
│ │ │ ├── mempool.rs
│ │ │ ├── merkle.rs
│ │ │ ├── mod.rs
│ │ │ ├── p2p.rs
│ │ │ ├── tendermint.rs
│ │ │ └── transaction/
│ │ │ ├── mod.rs
│ │ │ ├── signed.rs
│ │ │ └── tendermint.rs
│ │ └── transaction.rs
│ └── tendermint/
│ ├── Cargo.toml
│ ├── LICENSE
│ ├── README.md
│ ├── src/
│ │ ├── block.rs
│ │ ├── ext.rs
│ │ ├── lib.rs
│ │ ├── message_log.rs
│ │ ├── round.rs
│ │ └── time.rs
│ └── tests/
│ └── ext.rs
├── crypto/
│ ├── ciphersuite/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── kp256/
│ │ │ ├── Cargo.toml
│ │ │ ├── LICENSE
│ │ │ ├── README.md
│ │ │ └── src/
│ │ │ └── lib.rs
│ │ └── src/
│ │ ├── lib.md
│ │ └── lib.rs
│ ├── dalek-ff-group/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── README.md
│ │ └── src/
│ │ ├── ciphersuite.rs
│ │ ├── field.rs
│ │ └── lib.rs
│ ├── dkg/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── dealer/
│ │ │ ├── Cargo.toml
│ │ │ ├── LICENSE
│ │ │ ├── README.md
│ │ │ └── src/
│ │ │ └── lib.rs
│ │ ├── musig/
│ │ │ ├── Cargo.toml
│ │ │ ├── LICENSE
│ │ │ ├── README.md
│ │ │ └── src/
│ │ │ ├── lib.rs
│ │ │ └── tests.rs
│ │ ├── pedpop/
│ │ │ ├── Cargo.toml
│ │ │ ├── LICENSE
│ │ │ ├── README.md
│ │ │ └── src/
│ │ │ ├── encryption.rs
│ │ │ ├── lib.rs
│ │ │ └── tests.rs
│ │ ├── promote/
│ │ │ ├── Cargo.toml
│ │ │ ├── LICENSE
│ │ │ ├── README.md
│ │ │ └── src/
│ │ │ ├── lib.rs
│ │ │ └── tests.rs
│ │ ├── recovery/
│ │ │ ├── Cargo.toml
│ │ │ ├── LICENSE
│ │ │ ├── README.md
│ │ │ └── src/
│ │ │ └── lib.rs
│ │ └── src/
│ │ └── lib.rs
│ ├── dleq/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── README.md
│ │ └── src/
│ │ ├── cross_group/
│ │ │ ├── aos.rs
│ │ │ ├── bits.rs
│ │ │ ├── mod.rs
│ │ │ ├── scalar.rs
│ │ │ └── schnorr.rs
│ │ ├── lib.rs
│ │ └── tests/
│ │ ├── cross_group/
│ │ │ ├── aos.rs
│ │ │ ├── mod.rs
│ │ │ ├── scalar.rs
│ │ │ └── schnorr.rs
│ │ └── mod.rs
│ ├── ed448/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── README.md
│ │ └── src/
│ │ ├── backend.rs
│ │ ├── ciphersuite.rs
│ │ ├── field.rs
│ │ ├── lib.rs
│ │ ├── point.rs
│ │ └── scalar.rs
│ ├── ff-group-tests/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── README.md
│ │ └── src/
│ │ ├── field.rs
│ │ ├── group.rs
│ │ ├── lib.rs
│ │ └── prime_field.rs
│ ├── frost/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── README.md
│ │ └── src/
│ │ ├── algorithm.rs
│ │ ├── curve/
│ │ │ ├── dalek.rs
│ │ │ ├── ed448.rs
│ │ │ ├── kp256.rs
│ │ │ └── mod.rs
│ │ ├── lib.rs
│ │ ├── nonce.rs
│ │ ├── sign.rs
│ │ └── tests/
│ │ ├── literal/
│ │ │ ├── dalek.rs
│ │ │ ├── ed448.rs
│ │ │ ├── kp256.rs
│ │ │ ├── mod.rs
│ │ │ └── vectors/
│ │ │ ├── frost-ed25519-sha512.json
│ │ │ ├── frost-ed448-shake256.json
│ │ │ ├── frost-p256-sha256.json
│ │ │ ├── frost-ristretto255-sha512.json
│ │ │ └── frost-secp256k1-sha256.json
│ │ ├── mod.rs
│ │ ├── nonces.rs
│ │ └── vectors.rs
│ ├── multiexp/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── README.md
│ │ └── src/
│ │ ├── batch.rs
│ │ ├── lib.rs
│ │ ├── pippenger.rs
│ │ ├── straus.rs
│ │ └── tests/
│ │ ├── batch.rs
│ │ └── mod.rs
│ ├── schnorr/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── README.md
│ │ └── src/
│ │ ├── aggregate.rs
│ │ ├── lib.rs
│ │ └── tests/
│ │ ├── mod.rs
│ │ └── rfc8032.rs
│ ├── schnorrkel/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── README.md
│ │ └── src/
│ │ ├── lib.rs
│ │ └── tests.rs
│ └── transcript/
│ ├── Cargo.toml
│ ├── LICENSE
│ ├── README.md
│ └── src/
│ ├── lib.rs
│ ├── merlin.rs
│ └── tests.rs
├── deny.toml
├── docs/
│ ├── .gitignore
│ ├── .ruby-version
│ ├── Gemfile
│ ├── _config.yml
│ ├── amm/
│ │ └── index.md
│ ├── cross_chain/
│ │ └── index.md
│ ├── economics/
│ │ ├── genesis.md
│ │ ├── index.md
│ │ ├── post.md
│ │ └── pre.md
│ ├── index.md
│ ├── infrastructure/
│ │ ├── coordinator.md
│ │ ├── index.md
│ │ ├── message_queue.md
│ │ ├── processor.md
│ │ └── serai.md
│ ├── integrating/
│ │ └── index.md
│ ├── protocol_changes/
│ │ └── index.md
│ └── validator/
│ └── index.md
├── message-queue/
│ ├── Cargo.toml
│ ├── LICENSE
│ ├── README.md
│ └── src/
│ ├── client.rs
│ ├── lib.rs
│ ├── main.rs
│ ├── messages.rs
│ └── queue.rs
├── mini/
│ ├── Cargo.toml
│ ├── LICENSE
│ ├── README.md
│ └── src/
│ ├── lib.rs
│ └── tests/
│ ├── activation_race/
│ │ └── mod.rs
│ └── mod.rs
├── networks/
│ ├── bitcoin/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── src/
│ │ │ ├── crypto.rs
│ │ │ ├── lib.rs
│ │ │ ├── rpc.rs
│ │ │ ├── tests/
│ │ │ │ ├── crypto.rs
│ │ │ │ └── mod.rs
│ │ │ └── wallet/
│ │ │ ├── mod.rs
│ │ │ └── send.rs
│ │ └── tests/
│ │ ├── rpc.rs
│ │ ├── runner.rs
│ │ └── wallet.rs
│ └── ethereum/
│ ├── .gitignore
│ ├── Cargo.toml
│ ├── LICENSE
│ ├── README.md
│ ├── alloy-simple-request-transport/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── README.md
│ │ └── src/
│ │ └── lib.rs
│ ├── build.rs
│ ├── contracts/
│ │ ├── Deployer.sol
│ │ ├── IERC20.sol
│ │ ├── Router.sol
│ │ ├── Sandbox.sol
│ │ └── Schnorr.sol
│ ├── relayer/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── README.md
│ │ └── src/
│ │ └── main.rs
│ └── src/
│ ├── abi/
│ │ └── mod.rs
│ ├── crypto.rs
│ ├── deployer.rs
│ ├── erc20.rs
│ ├── lib.rs
│ ├── machine.rs
│ ├── router.rs
│ └── tests/
│ ├── abi/
│ │ └── mod.rs
│ ├── contracts/
│ │ ├── ERC20.sol
│ │ └── Schnorr.sol
│ ├── crypto.rs
│ ├── mod.rs
│ ├── router.rs
│ └── schnorr.rs
├── orchestration/
│ ├── Cargo.toml
│ ├── README.md
│ ├── dev/
│ │ ├── coordinator/
│ │ │ └── .folder
│ │ ├── message-queue/
│ │ │ └── .folder
│ │ ├── networks/
│ │ │ ├── bitcoin/
│ │ │ │ └── run.sh
│ │ │ ├── ethereum/
│ │ │ │ └── run.sh
│ │ │ ├── ethereum-relayer/
│ │ │ │ └── .folder
│ │ │ ├── monero/
│ │ │ │ ├── hashes-v0.18.3.4.txt
│ │ │ │ └── run.sh
│ │ │ └── monero-wallet-rpc/
│ │ │ └── run.sh
│ │ ├── processor/
│ │ │ ├── bitcoin/
│ │ │ │ └── .folder
│ │ │ ├── ethereum/
│ │ │ │ └── .folder
│ │ │ └── monero/
│ │ │ └── .folder
│ │ └── serai/
│ │ └── run.sh
│ ├── runtime/
│ │ └── Dockerfile
│ ├── src/
│ │ ├── coordinator.rs
│ │ ├── docker.rs
│ │ ├── ethereum_relayer.rs
│ │ ├── main.rs
│ │ ├── message_queue.rs
│ │ ├── mimalloc.rs
│ │ ├── networks/
│ │ │ ├── bitcoin.rs
│ │ │ ├── ethereum/
│ │ │ │ ├── consensus/
│ │ │ │ │ ├── lighthouse.rs
│ │ │ │ │ ├── mod.rs
│ │ │ │ │ └── nimbus.rs
│ │ │ │ ├── execution/
│ │ │ │ │ ├── anvil.rs
│ │ │ │ │ ├── mod.rs
│ │ │ │ │ └── reth.rs
│ │ │ │ └── mod.rs
│ │ │ ├── mod.rs
│ │ │ └── monero.rs
│ │ ├── processor.rs
│ │ └── serai.rs
│ └── testnet/
│ ├── coordinator/
│ │ └── .folder
│ ├── message-queue/
│ │ └── .folder
│ ├── networks/
│ │ ├── bitcoin/
│ │ │ └── run.sh
│ │ ├── ethereum/
│ │ │ ├── consensus/
│ │ │ │ ├── lighthouse/
│ │ │ │ │ └── run.sh
│ │ │ │ └── nimbus/
│ │ │ │ └── run.sh
│ │ │ ├── execution/
│ │ │ │ ├── geth/
│ │ │ │ │ └── run.sh
│ │ │ │ └── reth/
│ │ │ │ └── run.sh
│ │ │ └── run.sh
│ │ ├── ethereum-relayer/
│ │ │ └── .folder
│ │ └── monero/
│ │ ├── hashes-v0.18.3.4.txt
│ │ └── run.sh
│ ├── processor/
│ │ ├── bitcoin/
│ │ │ └── .folder
│ │ ├── ethereum/
│ │ │ └── .folder
│ │ └── monero/
│ │ └── .folder
│ └── serai/
│ └── run.sh
├── patches/
│ ├── directories-next/
│ │ ├── Cargo.toml
│ │ └── src/
│ │ └── lib.rs
│ ├── home/
│ │ ├── Cargo.toml
│ │ └── src/
│ │ └── lib.rs
│ ├── matches/
│ │ ├── Cargo.toml
│ │ └── src/
│ │ └── lib.rs
│ └── option-ext/
│ ├── Cargo.toml
│ └── src/
│ └── lib.rs
├── processor/
│ ├── Cargo.toml
│ ├── LICENSE
│ ├── README.md
│ ├── messages/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ └── src/
│ │ └── lib.rs
│ └── src/
│ ├── additional_key.rs
│ ├── batch_signer.rs
│ ├── coordinator.rs
│ ├── cosigner.rs
│ ├── db.rs
│ ├── key_gen.rs
│ ├── lib.rs
│ ├── main.rs
│ ├── multisigs/
│ │ ├── db.rs
│ │ ├── mod.rs
│ │ ├── scanner.rs
│ │ └── scheduler/
│ │ ├── mod.rs
│ │ ├── smart_contract.rs
│ │ └── utxo.rs
│ ├── networks/
│ │ ├── bitcoin.rs
│ │ ├── ethereum.rs
│ │ ├── mod.rs
│ │ └── monero.rs
│ ├── plan.rs
│ ├── signer.rs
│ ├── slash_report_signer.rs
│ └── tests/
│ ├── addresses.rs
│ ├── batch_signer.rs
│ ├── cosigner.rs
│ ├── key_gen.rs
│ ├── literal/
│ │ └── mod.rs
│ ├── mod.rs
│ ├── scanner.rs
│ ├── signer.rs
│ └── wallet.rs
├── rust-toolchain.toml
├── spec/
│ ├── DKG Exclusions.md
│ ├── Getting Started.md
│ ├── Serai.md
│ ├── coordinator/
│ │ ├── Coordinator.md
│ │ └── Tributary.md
│ ├── cryptography/
│ │ ├── Distributed Key Generation.md
│ │ └── FROST.md
│ ├── integrations/
│ │ ├── Bitcoin.md
│ │ ├── Ethereum.md
│ │ ├── Instructions.md
│ │ └── Monero.md
│ ├── policy/
│ │ └── Canonical Chain.md
│ ├── processor/
│ │ ├── Multisig Rotation.md
│ │ ├── Processor.md
│ │ ├── Scanning.md
│ │ └── UTXO Management.md
│ └── protocol/
│ ├── Constants.md
│ ├── In Instructions.md
│ └── Validator Sets.md
├── substrate/
│ ├── abi/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ └── src/
│ │ ├── babe.rs
│ │ ├── coins.rs
│ │ ├── dex.rs
│ │ ├── economic_security.rs
│ │ ├── emissions.rs
│ │ ├── genesis_liquidity.rs
│ │ ├── grandpa.rs
│ │ ├── in_instructions.rs
│ │ ├── lib.rs
│ │ ├── liquidity_tokens.rs
│ │ ├── signals.rs
│ │ ├── system.rs
│ │ ├── timestamp.rs
│ │ ├── tx.rs
│ │ └── validator_sets.rs
│ ├── client/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── src/
│ │ │ ├── lib.rs
│ │ │ ├── networks/
│ │ │ │ ├── bitcoin.rs
│ │ │ │ ├── mod.rs
│ │ │ │ └── monero.rs
│ │ │ ├── serai/
│ │ │ │ ├── coins.rs
│ │ │ │ ├── dex.rs
│ │ │ │ ├── genesis_liquidity.rs
│ │ │ │ ├── in_instructions.rs
│ │ │ │ ├── liquidity_tokens.rs
│ │ │ │ ├── mod.rs
│ │ │ │ └── validator_sets.rs
│ │ │ └── tests/
│ │ │ ├── mod.rs
│ │ │ └── networks/
│ │ │ ├── bitcoin.rs
│ │ │ ├── mod.rs
│ │ │ └── monero.rs
│ │ └── tests/
│ │ ├── batch.rs
│ │ ├── burn.rs
│ │ ├── common/
│ │ │ ├── dex.rs
│ │ │ ├── genesis_liquidity.rs
│ │ │ ├── in_instructions.rs
│ │ │ ├── mod.rs
│ │ │ ├── tx.rs
│ │ │ └── validator_sets.rs
│ │ ├── dex.rs
│ │ ├── dht.rs
│ │ ├── emissions.rs
│ │ ├── genesis_liquidity.rs
│ │ ├── time.rs
│ │ └── validator_sets.rs
│ ├── coins/
│ │ ├── pallet/
│ │ │ ├── Cargo.toml
│ │ │ ├── LICENSE
│ │ │ └── src/
│ │ │ ├── lib.rs
│ │ │ ├── mock.rs
│ │ │ └── tests.rs
│ │ └── primitives/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ └── src/
│ │ └── lib.rs
│ ├── dex/
│ │ └── pallet/
│ │ ├── Cargo.toml
│ │ ├── LICENSE-AGPL3
│ │ ├── LICENSE-APACHE2
│ │ └── src/
│ │ ├── benchmarking.rs
│ │ ├── lib.rs
│ │ ├── mock.rs
│ │ ├── tests.rs
│ │ ├── types.rs
│ │ └── weights.rs
│ ├── economic-security/
│ │ └── pallet/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ └── src/
│ │ └── lib.rs
│ ├── emissions/
│ │ ├── pallet/
│ │ │ ├── Cargo.toml
│ │ │ ├── LICENSE
│ │ │ └── src/
│ │ │ └── lib.rs
│ │ └── primitives/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ └── src/
│ │ └── lib.rs
│ ├── genesis-liquidity/
│ │ ├── pallet/
│ │ │ ├── Cargo.toml
│ │ │ ├── LICENSE
│ │ │ └── src/
│ │ │ └── lib.rs
│ │ └── primitives/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ └── src/
│ │ └── lib.rs
│ ├── in-instructions/
│ │ ├── pallet/
│ │ │ ├── Cargo.toml
│ │ │ ├── LICENSE
│ │ │ └── src/
│ │ │ └── lib.rs
│ │ └── primitives/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ └── src/
│ │ ├── lib.rs
│ │ └── shorthand.rs
│ ├── node/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── build.rs
│ │ └── src/
│ │ ├── chain_spec.rs
│ │ ├── cli.rs
│ │ ├── command.rs
│ │ ├── keystore.rs
│ │ ├── main.rs
│ │ ├── rpc.rs
│ │ └── service.rs
│ ├── primitives/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ └── src/
│ │ ├── account.rs
│ │ ├── amount.rs
│ │ ├── balance.rs
│ │ ├── block.rs
│ │ ├── constants.rs
│ │ ├── lib.rs
│ │ └── networks.rs
│ ├── runtime/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── build.rs
│ │ └── src/
│ │ ├── abi.rs
│ │ └── lib.rs
│ ├── signals/
│ │ ├── pallet/
│ │ │ ├── Cargo.toml
│ │ │ ├── LICENSE
│ │ │ └── src/
│ │ │ └── lib.rs
│ │ └── primitives/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ └── src/
│ │ └── lib.rs
│ └── validator-sets/
│ ├── pallet/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ └── src/
│ │ └── lib.rs
│ └── primitives/
│ ├── Cargo.toml
│ ├── LICENSE
│ └── src/
│ └── lib.rs
└── tests/
├── coordinator/
│ ├── Cargo.toml
│ ├── LICENSE
│ └── src/
│ ├── lib.rs
│ └── tests/
│ ├── batch.rs
│ ├── key_gen.rs
│ ├── mod.rs
│ ├── rotation.rs
│ └── sign.rs
├── docker/
│ ├── Cargo.toml
│ ├── LICENSE
│ ├── README.md
│ └── src/
│ └── lib.rs
├── full-stack/
│ ├── Cargo.toml
│ ├── LICENSE
│ └── src/
│ ├── lib.rs
│ └── tests/
│ ├── mint_and_burn.rs
│ └── mod.rs
├── message-queue/
│ ├── Cargo.toml
│ ├── LICENSE
│ └── src/
│ └── lib.rs
├── no-std/
│ ├── Cargo.toml
│ ├── LICENSE
│ ├── README.md
│ └── src/
│ └── lib.rs
├── processor/
│ ├── Cargo.toml
│ ├── LICENSE
│ └── src/
│ ├── lib.rs
│ ├── networks.rs
│ └── tests/
│ ├── batch.rs
│ ├── key_gen.rs
│ ├── mod.rs
│ └── send.rs
└── reproducible-runtime/
├── Cargo.toml
├── LICENSE
└── src/
└── lib.rs
================================================
FILE CONTENTS
================================================
================================================
FILE: .gitattributes
================================================
# Auto detect text files and perform LF normalization
* text=auto
* text eol=lf
*.pdf binary
================================================
FILE: .github/LICENSE
================================================
MIT License
Copyright (c) 2022-2023 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: .github/actions/bitcoin/action.yml
================================================
name: bitcoin-regtest
description: Spawns a regtest Bitcoin daemon
inputs:
version:
description: "Version to download and run"
required: false
default: "27.0"
runs:
using: "composite"
steps:
- name: Bitcoin Daemon Cache
id: cache-bitcoind
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
with:
path: bitcoin.tar.gz
key: bitcoind-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
- name: Download the Bitcoin Daemon
if: steps.cache-bitcoind.outputs.cache-hit != 'true'
shell: bash
run: |
RUNNER_OS=linux
RUNNER_ARCH=x86_64
FILE=bitcoin-${{ inputs.version }}-$RUNNER_ARCH-$RUNNER_OS-gnu.tar.gz
wget https://bitcoincore.org/bin/bitcoin-core-${{ inputs.version }}/$FILE
mv $FILE bitcoin.tar.gz
- name: Extract the Bitcoin Daemon
shell: bash
run: |
tar xzvf bitcoin.tar.gz
cd bitcoin-${{ inputs.version }}
sudo mv bin/* /bin && sudo mv lib/* /lib
- name: Bitcoin Regtest Daemon
shell: bash
run: PATH=$PATH:/usr/bin ./orchestration/dev/networks/bitcoin/run.sh -daemon
================================================
FILE: .github/actions/build-dependencies/action.yml
================================================
name: build-dependencies
description: Installs build dependencies for Serai
runs:
using: "composite"
steps:
- name: Remove unused packages
shell: bash
run: |
# Ensure the repositories are synced
sudo apt update -y
# Actually perform the removals
sudo apt remove -y "*powershell*" "*nuget*" "*bazel*" "*ansible*" "*terraform*" "*heroku*" "*aws*" azure-cli
sudo apt remove -y "*nodejs*" "*npm*" "*yarn*" "*java*" "*kotlin*" "*golang*" "*swift*" "*julia*" "*fortran*" "*android*"
sudo apt remove -y "*apache2*" "*nginx*" "*firefox*" "*chromium*" "*chrome*" "*edge*"
sudo apt remove -y --allow-remove-essential -f shim-signed *python3*
# This removal command requires the prior removals due to unmet dependencies otherwise
sudo apt remove -y "*qemu*" "*sql*" "*texinfo*" "*imagemagick*"
# Reinstall python3 as a general dependency of a functional operating system
sudo apt install -y python3 --fix-missing
if: runner.os == 'Linux'
- name: Remove unused packages
shell: bash
run: |
(gem uninstall -aIx) || (exit 0)
brew uninstall --force "*msbuild*" "*powershell*" "*nuget*" "*bazel*" "*ansible*" "*terraform*" "*heroku*" "*aws*" azure-cli
brew uninstall --force "*nodejs*" "*npm*" "*yarn*" "*java*" "*kotlin*" "*golang*" "*swift*" "*julia*" "*fortran*" "*android*"
brew uninstall --force "*apache2*" "*nginx*" "*firefox*" "*chromium*" "*chrome*" "*edge*"
brew uninstall --force "*qemu*" "*sql*" "*texinfo*" "*imagemagick*"
brew cleanup
if: runner.os == 'macOS'
- name: Install dependencies
shell: bash
run: |
if [ "$RUNNER_OS" == "Linux" ]; then
sudo apt install -y ca-certificates protobuf-compiler libclang-dev
elif [ "$RUNNER_OS" == "Windows" ]; then
choco install protoc
elif [ "$RUNNER_OS" == "macOS" ]; then
brew install protobuf llvm
HOMEBREW_ROOT_PATH=/opt/homebrew # Apple Silicon
if [ $(uname -m) = "x86_64" ]; then HOMEBREW_ROOT_PATH=/usr/local; fi # Intel
ls $HOMEBREW_ROOT_PATH/opt/llvm/lib | grep "libclang.dylib" # Make sure this installed `libclang`
echo "DYLD_LIBRARY_PATH=$HOMEBREW_ROOT_PATH/opt/llvm/lib:$DYLD_LIBRARY_PATH" >> "$GITHUB_ENV"
fi
- name: Install solc
shell: bash
run: |
cargo +1.89 install svm-rs --version =0.5.18
svm install 0.8.26
svm use 0.8.26
- name: Remove preinstalled Docker
shell: bash
run: |
docker system prune -a --volumes
sudo apt remove -y *docker*
# Install uidmap which will be required for the explicitly installed Docker
sudo apt install uidmap
if: runner.os == 'Linux'
- name: Update system dependencies
shell: bash
run: |
sudo apt update -y
sudo apt upgrade -y
sudo apt autoremove -y
sudo apt clean
if: runner.os == 'Linux'
- name: Install rootless Docker
uses: docker/setup-docker-action@b60f85385d03ac8acfca6d9996982511d8620a19
with:
rootless: true
set-host: true
if: runner.os == 'Linux'
# - name: Cache Rust
# uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43
================================================
FILE: .github/actions/monero/action.yml
================================================
name: monero-regtest
description: Spawns a regtest Monero daemon
inputs:
version:
description: "Version to download and run"
required: false
default: v0.18.3.4
runs:
using: "composite"
steps:
- name: Monero Daemon Cache
id: cache-monerod
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
with:
path: /usr/bin/monerod
key: monerod-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
- name: Download the Monero Daemon
if: steps.cache-monerod.outputs.cache-hit != 'true'
# Calculates OS/ARCH to demonstrate it, yet then locks to linux-x64 due
# to the contained folder not following the same naming scheme and
# requiring further expansion not worth doing right now
shell: bash
run: |
RUNNER_OS=${{ runner.os }}
RUNNER_ARCH=${{ runner.arch }}
RUNNER_OS=${RUNNER_OS,,}
RUNNER_ARCH=${RUNNER_ARCH,,}
RUNNER_OS=linux
RUNNER_ARCH=x64
FILE=monero-$RUNNER_OS-$RUNNER_ARCH-${{ inputs.version }}.tar.bz2
wget https://downloads.getmonero.org/cli/$FILE
tar -xvf $FILE
sudo mv monero-x86_64-linux-gnu-${{ inputs.version }}/monerod /usr/bin/monerod
sudo chmod 777 /usr/bin/monerod
sudo chmod +x /usr/bin/monerod
- name: Monero Regtest Daemon
shell: bash
run: PATH=$PATH:/usr/bin ./orchestration/dev/networks/monero/run.sh --detach
================================================
FILE: .github/actions/monero-wallet-rpc/action.yml
================================================
name: monero-wallet-rpc
description: Spawns a Monero Wallet-RPC.
inputs:
version:
description: "Version to download and run"
required: false
default: v0.18.3.4
runs:
using: "composite"
steps:
- name: Monero Wallet RPC Cache
id: cache-monero-wallet-rpc
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
with:
path: monero-wallet-rpc
key: monero-wallet-rpc-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
- name: Download the Monero Wallet RPC
if: steps.cache-monero-wallet-rpc.outputs.cache-hit != 'true'
# Calculates OS/ARCH to demonstrate it, yet then locks to linux-x64 due
# to the contained folder not following the same naming scheme and
# requiring further expansion not worth doing right now
shell: bash
run: |
RUNNER_OS=${{ runner.os }}
RUNNER_ARCH=${{ runner.arch }}
RUNNER_OS=${RUNNER_OS,,}
RUNNER_ARCH=${RUNNER_ARCH,,}
RUNNER_OS=linux
RUNNER_ARCH=x64
FILE=monero-$RUNNER_OS-$RUNNER_ARCH-${{ inputs.version }}.tar.bz2
wget https://downloads.getmonero.org/cli/$FILE
tar -xvf $FILE
mv monero-x86_64-linux-gnu-${{ inputs.version }}/monero-wallet-rpc monero-wallet-rpc
- name: Monero Wallet RPC
shell: bash
run: |
./monero-wallet-rpc --allow-mismatched-daemon-version \
--daemon-address 0.0.0.0:18081 --daemon-login serai:seraidex \
--disable-rpc-login --rpc-bind-port 18082 \
--wallet-dir ./ \
--detach
================================================
FILE: .github/actions/test-dependencies/action.yml
================================================
name: test-dependencies
description: Installs test dependencies for Serai
inputs:
monero-version:
description: "Monero version to download and run as a regtest node"
required: false
default: v0.18.3.4
bitcoin-version:
description: "Bitcoin version to download and run as a regtest node"
required: false
default: "27.1"
runs:
using: "composite"
steps:
- name: Install Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Install Foundry
uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773
with:
version: nightly-f625d0fa7c51e65b4bf1e8f7931cd1c6e2e285e9
cache: false
- name: Run a Monero Regtest Node
uses: ./.github/actions/monero
with:
version: ${{ inputs.monero-version }}
- name: Run a Bitcoin Regtest Node
uses: ./.github/actions/bitcoin
with:
version: ${{ inputs.bitcoin-version }}
- name: Run a Monero Wallet-RPC
uses: ./.github/actions/monero-wallet-rpc
================================================
FILE: .github/nightly-version
================================================
nightly-2025-11-01
================================================
FILE: .github/workflows/common-tests.yml
================================================
name: common/ Tests
on:
push:
branches:
- develop
paths:
- "common/**"
pull_request:
paths:
- "common/**"
workflow_dispatch:
jobs:
test-common:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Run Tests
run: |
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
-p std-shims \
-p zalloc \
-p patchable-async-sleep \
-p serai-db \
-p serai-env \
-p simple-request
================================================
FILE: .github/workflows/coordinator-tests.yml
================================================
name: Coordinator Tests
on:
push:
branches:
- develop
paths:
- "common/**"
- "crypto/**"
- "networks/**"
- "message-queue/**"
- "coordinator/**"
- "orchestration/**"
- "tests/docker/**"
- "tests/coordinator/**"
pull_request:
paths:
- "common/**"
- "crypto/**"
- "networks/**"
- "message-queue/**"
- "coordinator/**"
- "orchestration/**"
- "tests/docker/**"
- "tests/coordinator/**"
workflow_dispatch:
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Run coordinator Docker tests
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-coordinator-tests
================================================
FILE: .github/workflows/crypto-tests.yml
================================================
name: crypto/ Tests
on:
push:
branches:
- develop
paths:
- "common/**"
- "crypto/**"
pull_request:
paths:
- "common/**"
- "crypto/**"
workflow_dispatch:
jobs:
test-crypto:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Run Tests
run: |
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
-p flexible-transcript \
-p ff-group-tests \
-p dalek-ff-group \
-p minimal-ed448 \
-p ciphersuite \
-p ciphersuite-kp256 \
-p multiexp \
-p schnorr-signatures \
-p dleq \
-p dkg \
-p dkg-recovery \
-p dkg-dealer \
-p dkg-promote \
-p dkg-musig \
-p dkg-pedpop \
-p modular-frost \
-p frost-schnorrkel
================================================
FILE: .github/workflows/daily-deny.yml
================================================
name: Daily Deny Check
on:
schedule:
- cron: "0 0 * * *"
jobs:
deny:
name: Run cargo deny
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Advisory Cache
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
with:
path: ~/.cargo/advisory-db
key: rust-advisory-db
- name: Install cargo deny
run: cargo +1.89 install cargo-deny --version =0.18.3
- name: Run cargo deny
run: cargo deny -L error --all-features check --hide-inclusion-graph
================================================
FILE: .github/workflows/full-stack-tests.yml
================================================
name: Full Stack Tests
on:
push:
branches:
- develop
pull_request:
workflow_dispatch:
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Run Full Stack Docker tests
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-full-stack-tests
================================================
FILE: .github/workflows/lint.yml
================================================
name: Lint
on:
push:
branches:
- develop
pull_request:
workflow_dispatch:
jobs:
clippy:
strategy:
matrix:
os: [ubuntu-latest, macos-15-intel, macos-latest, windows-latest]
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Get nightly version to use
id: nightly
shell: bash
run: echo "version=$(cat .github/nightly-version)" >> $GITHUB_OUTPUT
- name: Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Install nightly rust
run: rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c rust-src -c clippy
- name: Run Clippy
run: cargo +${{ steps.nightly.outputs.version }} clippy --all-features --all-targets -- -D warnings -A clippy::items_after_test_module
# Also verify the lockfile isn't dirty
# This happens when someone edits a Cargo.toml yet doesn't do anything
# which causes the lockfile to be updated
# The above clippy run will cause it to be updated, so checking there's
# no differences present now performs the desired check
- name: Verify lockfile
shell: bash
run: git diff | wc -l | LC_ALL="en_US.utf8" grep -x -e "^[ ]*0"
deny:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Advisory Cache
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
with:
path: ~/.cargo/advisory-db
key: rust-advisory-db
- name: Install cargo deny
run: cargo +1.89 install cargo-deny --version =0.18.4
- name: Run cargo deny
run: cargo deny -L error --all-features check --hide-inclusion-graph
fmt:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Get nightly version to use
id: nightly
shell: bash
run: echo "version=$(cat .github/nightly-version)" >> $GITHUB_OUTPUT
- name: Install nightly rust
run: rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -c rustfmt
- name: Run rustfmt
run: cargo +${{ steps.nightly.outputs.version }} fmt -- --check
machete:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Verify all dependencies are in use
run: |
cargo +1.89 install cargo-machete --version =0.8.0
cargo +1.89 machete
================================================
FILE: .github/workflows/message-queue-tests.yml
================================================
name: Message Queue Tests
on:
push:
branches:
- develop
paths:
- "common/**"
- "crypto/**"
- "message-queue/**"
- "orchestration/**"
- "tests/docker/**"
- "tests/message-queue/**"
pull_request:
paths:
- "common/**"
- "crypto/**"
- "message-queue/**"
- "orchestration/**"
- "tests/docker/**"
- "tests/message-queue/**"
workflow_dispatch:
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Run message-queue Docker tests
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-message-queue-tests
================================================
FILE: .github/workflows/mini-tests.yml
================================================
name: mini/ Tests
on:
push:
branches:
- develop
paths:
- "mini/**"
pull_request:
paths:
- "mini/**"
workflow_dispatch:
jobs:
test-common:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Run Tests
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p mini-serai
================================================
FILE: .github/workflows/monthly-nightly-update.yml
================================================
name: Monthly Nightly Update
on:
schedule:
- cron: "0 0 1 * *"
jobs:
update:
name: Update nightly
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
with:
submodules: "recursive"
- name: Write nightly version
run: echo $(date +"nightly-%Y-%m"-01) > .github/nightly-version
- name: Create the commit
run: |
git config user.name "GitHub Actions"
git config user.email "<>"
git checkout -b $(date +"nightly-%Y-%m")
git add .github/nightly-version
git commit -m "Update nightly"
git push -u origin $(date +"nightly-%Y-%m")
- name: Pull Request
uses: actions/github-script@d7906e4ad0b1822421a7e6a35d5ca353c962f410
with:
script: |
const { repo, owner } = context.repo;
const result = await github.rest.pulls.create({
title: (new Date()).toLocaleString(
false,
{ month: "long", year: "numeric" }
) + " - Rust Nightly Update",
owner,
repo,
head: "nightly-" + (new Date()).toISOString().split("-").splice(0, 2).join("-"),
base: "develop",
body: "PR auto-generated by a GitHub workflow."
});
github.rest.issues.addLabels({
owner,
repo,
issue_number: result.data.number,
labels: ["improvement"]
});
================================================
FILE: .github/workflows/networks-tests.yml
================================================
name: networks/ Tests
on:
push:
branches:
- develop
paths:
- "common/**"
- "crypto/**"
- "networks/**"
pull_request:
paths:
- "common/**"
- "crypto/**"
- "networks/**"
workflow_dispatch:
jobs:
test-networks:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Test Dependencies
uses: ./.github/actions/test-dependencies
- name: Run Tests
run: |
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
-p bitcoin-serai \
-p alloy-simple-request-transport \
-p ethereum-serai \
-p serai-ethereum-relayer \
================================================
FILE: .github/workflows/no-std.yml
================================================
name: no-std build
on:
push:
branches:
- develop
paths:
- "common/**"
- "crypto/**"
- "networks/**"
- "tests/no-std/**"
pull_request:
paths:
- "common/**"
- "crypto/**"
- "networks/**"
- "tests/no-std/**"
workflow_dispatch:
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Install RISC-V Toolchain
run: sudo apt update && sudo apt install -y gcc-riscv64-unknown-elf gcc-multilib && rustup target add riscv32imac-unknown-none-elf
- name: Verify no-std builds
run: CFLAGS=-I/usr/include cargo build --target riscv32imac-unknown-none-elf -p serai-no-std-tests
================================================
FILE: .github/workflows/pages.yml
================================================
# MIT License
#
# Copyright (c) 2022 just-the-docs
# Copyright (c) 2022-2024 Luke Parker
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
name: Deploy Rust docs and Jekyll site to Pages
on:
push:
branches:
- "develop"
workflow_dispatch:
permissions:
contents: read
pages: write
id-token: write
# Only allow one concurrent deployment
concurrency:
group: "pages"
cancel-in-progress: true
jobs:
# Build job
build:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Setup Ruby
uses: ruby/setup-ruby@44511735964dcb71245e7e55f72539531f7bc0eb
with:
bundler-cache: true
cache-version: 0
working-directory: "${{ github.workspace }}/docs"
- name: Setup Pages
id: pages
uses: actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b
- name: Build with Jekyll
run: cd ${{ github.workspace }}/docs && bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}"
env:
JEKYLL_ENV: production
- name: Get nightly version to use
id: nightly
shell: bash
run: echo "version=$(cat .github/nightly-version)" >> $GITHUB_OUTPUT
- name: Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Buld Rust docs
run: |
rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c rust-docs
RUSTDOCFLAGS="--cfg docsrs" cargo +${{ steps.nightly.outputs.version }} doc --workspace --no-deps --all-features
mv target/doc docs/_site/rust
- name: Upload artifact
uses: actions/upload-pages-artifact@7b1f4a764d45c48632c6b24a0339c27f5614fb0b
with:
path: "docs/_site/"
# Deployment job
deploy:
environment:
name: github-pages
url: ${{ steps.deployment.outputs.page_url }}
runs-on: ubuntu-latest
needs: build
steps:
- name: Deploy to GitHub Pages
id: deployment
uses: actions/deploy-pages@d6db90164ac5ed86f2b6aed7e0febac5b3c0c03e
================================================
FILE: .github/workflows/processor-tests.yml
================================================
name: Processor Tests
on:
push:
branches:
- develop
paths:
- "common/**"
- "crypto/**"
- "networks/**"
- "message-queue/**"
- "processor/**"
- "orchestration/**"
- "tests/docker/**"
- "tests/processor/**"
pull_request:
paths:
- "common/**"
- "crypto/**"
- "networks/**"
- "message-queue/**"
- "processor/**"
- "orchestration/**"
- "tests/docker/**"
- "tests/processor/**"
workflow_dispatch:
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Run processor Docker tests
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-processor-tests
================================================
FILE: .github/workflows/reproducible-runtime.yml
================================================
name: Reproducible Runtime
on:
push:
branches:
- develop
paths:
- "Cargo.lock"
- "common/**"
- "crypto/**"
- "substrate/**"
- "orchestration/runtime/**"
- "tests/reproducible-runtime/**"
pull_request:
paths:
- "Cargo.lock"
- "common/**"
- "crypto/**"
- "substrate/**"
- "orchestration/runtime/**"
- "tests/reproducible-runtime/**"
workflow_dispatch:
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Run Reproducible Runtime tests
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-reproducible-runtime-tests
================================================
FILE: .github/workflows/tests.yml
================================================
name: Tests
on:
push:
branches:
- develop
paths:
- "common/**"
- "crypto/**"
- "networks/**"
- "message-queue/**"
- "processor/**"
- "coordinator/**"
- "substrate/**"
pull_request:
paths:
- "common/**"
- "crypto/**"
- "networks/**"
- "message-queue/**"
- "processor/**"
- "coordinator/**"
- "substrate/**"
workflow_dispatch:
jobs:
test-infra:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Run Tests
run: |
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
-p serai-message-queue \
-p serai-processor-messages \
-p serai-processor \
-p tendermint-machine \
-p tributary-chain \
-p serai-coordinator \
-p serai-orchestrator \
-p serai-docker-tests
test-substrate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Run Tests
run: |
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
-p serai-primitives \
-p serai-coins-primitives \
-p serai-coins-pallet \
-p serai-dex-pallet \
-p serai-validator-sets-primitives \
-p serai-validator-sets-pallet \
-p serai-genesis-liquidity-primitives \
-p serai-genesis-liquidity-pallet \
-p serai-emissions-primitives \
-p serai-emissions-pallet \
-p serai-economic-security-pallet \
-p serai-in-instructions-primitives \
-p serai-in-instructions-pallet \
-p serai-signals-primitives \
-p serai-signals-pallet \
-p serai-abi \
-p serai-runtime \
-p serai-node
test-serai-client:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Run Tests
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client
================================================
FILE: .gitignore
================================================
target
# Don't commit any `Cargo.lock` which aren't the workspace's
Cargo.lock
!./Cargo.lock
# Don't commit any `Dockerfile`, as they're auto-generated, except the only one which isn't
Dockerfile
Dockerfile.fast-epoch
!orchestration/runtime/Dockerfile
.test-logs
.vscode
================================================
FILE: .rustfmt.toml
================================================
edition = "2021"
tab_spaces = 2
max_width = 100
# Let the developer decide based on the 100 char line limit
use_small_heuristics = "Max"
error_on_line_overflow = true
error_on_unformatted = true
imports_granularity = "Crate"
reorder_imports = false
reorder_modules = false
unstable_features = true
spaces_around_ranges = true
binop_separator = "Back"
================================================
FILE: AGPL-3.0
================================================
GNU AFFERO GENERAL PUBLIC LICENSE
Version 3, 19 November 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU Affero General Public License is a free, copyleft license for
software and other kinds of works, specifically designed to ensure
cooperation with the community in the case of network server software.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
our General Public Licenses are intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
Developers that use our General Public Licenses protect your rights
with two steps: (1) assert copyright on the software, and (2) offer
you this License which gives you legal permission to copy, distribute
and/or modify the software.
A secondary benefit of defending all users' freedom is that
improvements made in alternate versions of the program, if they
receive widespread use, become available for other developers to
incorporate. Many developers of free software are heartened and
encouraged by the resulting cooperation. However, in the case of
software used on network servers, this result may fail to come about.
The GNU General Public License permits making a modified version and
letting the public access it on a server without ever releasing its
source code to the public.
The GNU Affero General Public License is designed specifically to
ensure that, in such cases, the modified source code becomes available
to the community. It requires the operator of a network server to
provide the source code of the modified version running there to the
users of that server. Therefore, public use of a modified version, on
a publicly accessible server, gives the public access to the source
code of the modified version.
An older license, called the Affero General Public License and
published by Affero, was designed to accomplish similar goals. This is
a different license, not a version of the Affero GPL, but Affero has
released a new version of the Affero GPL which permits relicensing under
this license.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU Affero General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Remote Network Interaction; Use with the GNU General Public License.
Notwithstanding any other provision of this License, if you modify the
Program, your modified version must prominently offer all users
interacting with it remotely through a computer network (if your version
supports such interaction) an opportunity to receive the Corresponding
Source of your version by providing access to the Corresponding Source
from a network server at no charge, through some standard or customary
means of facilitating copying of software. This Corresponding Source
shall include the Corresponding Source for any work covered by version 3
of the GNU General Public License that is incorporated pursuant to the
following paragraph.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the work with which it is combined will remain governed by version
3 of the GNU General Public License.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU Affero General Public License from time to time. Such new versions
will be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU Affero General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU Affero General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU Affero General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If your software can interact with users remotely through a computer
network, you should also make sure that it provides a way for users to
get its source. For example, if your program is a web application, its
interface could display a "Source" link that leads users to an archive
of the code. There are many ways you could offer source, and different
solutions will be better for different programs; see section 13 for the
specific requirements.
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU AGPL, see
<https://www.gnu.org/licenses/>.
================================================
FILE: CONTRIBUTING.md
================================================
# Contributing
Contributions come in a variety of forms. Developing Serai, helping document it,
using its libraries in another project, using and testing it, and simply sharing
it are all valuable ways of contributing.
This document will specifically focus on contributions to this repository in the
form of code and documentation.
### Rules
- Stable native Rust, nightly wasm and tools.
- `cargo fmt` must be used.
- `cargo clippy` must pass, except for the ignored rules (`type_complexity` and
`dead_code`).
- The CI must pass.
- Only use uppercase variable names when relevant to cryptography.
- Use a two-space ident when possible.
- Put a space after comment markers.
- Don't use multiple newlines between sections of code.
- Have a newline before EOF.
### Guidelines
- Sort inputs as core, std, third party, and then Serai.
- Comment code reasonably.
- Include tests for new features.
- Sign commits.
### Submission
All submissions should be through GitHub. Contributions to a crate will be
licensed according to the crate's existing license, with the crate's copyright
holders (distinct from authors) having the right to re-license the crate via a
unanimous decision.
================================================
FILE: Cargo.toml
================================================
[workspace]
resolver = "2"
members = [
# std patches
"patches/matches",
# Rewrites/redirects
"patches/option-ext",
"patches/directories-next",
"common/std-shims",
"common/zalloc",
"common/patchable-async-sleep",
"common/db",
"common/env",
"common/request",
"crypto/transcript",
"crypto/ff-group-tests",
"crypto/dalek-ff-group",
"crypto/ed448",
"crypto/ciphersuite",
"crypto/ciphersuite/kp256",
"crypto/multiexp",
"crypto/schnorr",
"crypto/dleq",
"crypto/dkg",
"crypto/dkg/recovery",
"crypto/dkg/dealer",
"crypto/dkg/promote",
"crypto/dkg/musig",
"crypto/dkg/pedpop",
"crypto/frost",
"crypto/schnorrkel",
"networks/bitcoin",
"networks/ethereum/alloy-simple-request-transport",
"networks/ethereum",
"networks/ethereum/relayer",
"message-queue",
"processor/messages",
"processor",
"coordinator/tributary/tendermint",
"coordinator/tributary",
"coordinator",
"substrate/primitives",
"substrate/coins/primitives",
"substrate/coins/pallet",
"substrate/dex/pallet",
"substrate/validator-sets/primitives",
"substrate/validator-sets/pallet",
"substrate/genesis-liquidity/primitives",
"substrate/genesis-liquidity/pallet",
"substrate/emissions/primitives",
"substrate/emissions/pallet",
"substrate/economic-security/pallet",
"substrate/in-instructions/primitives",
"substrate/in-instructions/pallet",
"substrate/signals/primitives",
"substrate/signals/pallet",
"substrate/abi",
"substrate/runtime",
"substrate/node",
"substrate/client",
"orchestration",
"mini",
"tests/no-std",
"tests/docker",
"tests/message-queue",
"tests/processor",
"tests/coordinator",
"tests/full-stack",
"tests/reproducible-runtime",
]
# Always compile Monero (and a variety of dependencies) with optimizations due
# to the extensive operations required for Bulletproofs
[profile.dev.package]
subtle = { opt-level = 3 }
curve25519-dalek = { opt-level = 3 }
ff = { opt-level = 3 }
group = { opt-level = 3 }
crypto-bigint = { opt-level = 3 }
dalek-ff-group = { opt-level = 3 }
minimal-ed448 = { opt-level = 3 }
multiexp = { opt-level = 3 }
monero-oxide = { opt-level = 3 }
[profile.release]
panic = "unwind"
overflow-checks = true
[patch.crates-io]
# Dependencies from monero-oxide which originate from within our own tree
std-shims = { path = "common/std-shims" }
simple-request = { path = "common/request" }
dalek-ff-group = { path = "crypto/dalek-ff-group" }
flexible-transcript = { path = "crypto/transcript" }
modular-frost = { path = "crypto/frost" }
# https://github.com/rust-lang-nursery/lazy-static.rs/issues/201
lazy_static = { git = "https://github.com/rust-lang-nursery/lazy-static.rs", rev = "5735630d46572f1e5377c8f2ba0f79d18f53b10c" }
# These have `std` alternatives
matches = { path = "patches/matches" }
home = { path = "patches/home" }
# directories-next was created because directories was unmaintained
# directories-next is now unmaintained while directories is maintained
# The directories author pulls in ridiculously pointless crates and prefers
# copyleft licenses
# The following two patches resolve everything
option-ext = { path = "patches/option-ext" }
directories-next = { path = "patches/directories-next" }
[workspace.lints.clippy]
uninlined_format_args = "allow" # TODO
unwrap_or_default = "allow"
manual_is_multiple_of = "allow"
incompatible_msrv = "allow" # Manually verified with a GitHub workflow
borrow_as_ptr = "deny"
cast_lossless = "deny"
cast_possible_truncation = "deny"
cast_possible_wrap = "deny"
cast_precision_loss = "deny"
cast_ptr_alignment = "deny"
cast_sign_loss = "deny"
checked_conversions = "deny"
cloned_instead_of_copied = "deny"
enum_glob_use = "deny"
expl_impl_clone_on_copy = "deny"
explicit_into_iter_loop = "deny"
explicit_iter_loop = "deny"
flat_map_option = "deny"
float_cmp = "deny"
fn_params_excessive_bools = "deny"
ignored_unit_patterns = "deny"
implicit_clone = "deny"
inefficient_to_string = "deny"
invalid_upcast_comparisons = "deny"
large_stack_arrays = "deny"
linkedlist = "deny"
macro_use_imports = "deny"
manual_instant_elapsed = "deny"
# TODO manual_let_else = "deny"
manual_ok_or = "deny"
manual_string_new = "deny"
map_unwrap_or = "deny"
match_bool = "deny"
match_same_arms = "deny"
missing_fields_in_debug = "deny"
# TODO needless_continue = "deny"
needless_pass_by_value = "deny"
ptr_cast_constness = "deny"
range_minus_one = "deny"
range_plus_one = "deny"
redundant_closure_for_method_calls = "deny"
redundant_else = "deny"
string_add_assign = "deny"
unchecked_time_subtraction = "deny"
unnecessary_box_returns = "deny"
unnecessary_join = "deny"
unnecessary_wraps = "deny"
unnested_or_patterns = "deny"
unused_async = "deny"
unused_self = "deny"
zero_sized_map_values = "deny"
# TODO: These were incurred when updating Rust as necessary for compilation, yet aren't being fixed
# at this time due to the impacts it'd have throughout the repository (when this isn't actively the
# primary branch, `next` is)
needless_continue = "allow"
needless_lifetimes = "allow"
useless_conversion = "allow"
empty_line_after_doc_comments = "allow"
manual_div_ceil = "allow"
manual_let_else = "allow"
unnecessary_map_or = "allow"
result_large_err = "allow"
unneeded_struct_pattern = "allow"
[workspace.lints.rust]
unused = "allow" # TODO: https://github.com/rust-lang/rust/issues/147648
mismatched_lifetime_syntaxes = "allow"
unused_attributes = "allow"
unused_parens = "allow"
================================================
FILE: LICENSE
================================================
Serai crates are licensed under one of two licenses, either MIT or AGPL-3.0,
depending on the crate in question. Each crate declares their license in their
`Cargo.toml` and includes a `LICENSE` file detailing its status. Additionally,
a full copy of the AGPL-3.0 License is included in the root of this repository
as a reference text. This copy should be provided with any distribution of a
crate licensed under the AGPL-3.0, as per its terms.
The GitHub actions/workflows (`.github`) are licensed under the MIT license.
================================================
FILE: README.md
================================================
# Serai
Serai is a new DEX, built from the ground up, initially planning on listing
Bitcoin, Ethereum, DAI, and Monero, offering a liquidity-pool-based trading
experience. Funds are stored in an economically secured threshold-multisig
wallet.
[Getting Started](spec/Getting%20Started.md)
### Layout
- `audits`: Audits for various parts of Serai.
- `spec`: The specification of the Serai protocol, both internally and as
networked.
- `docs`: User-facing documentation on the Serai protocol.
- `common`: Crates containing utilities common to a variety of areas under
Serai, none neatly fitting under another category.
- `crypto`: A series of composable cryptographic libraries built around the
`ff`/`group` APIs, achieving a variety of tasks. These range from generic
infrastructure, to our IETF-compliant FROST implementation, to a DLEq proof as
needed for Bitcoin-Monero atomic swaps.
- `networks`: Various libraries intended for usage in Serai yet also by the
wider community. This means they will always support the functionality Serai
needs, yet won't disadvantage other use cases when possible.
- `message-queue`: An ordered message server so services can talk to each other,
even when the other is offline.
- `processor`: A generic chain processor to process data for Serai and process
events from Serai, executing transactions as expected and needed.
- `coordinator`: A service to manage processors and communicate over a P2P
network with other validators.
- `substrate`: Substrate crates used to instantiate the Serai network.
- `orchestration`: Dockerfiles and scripts to deploy a Serai node/test
environment.
- `tests`: Tests for various crates. Generally, `crate/src/tests` is used, or
`crate/tests`, yet any tests requiring crates' binaries are placed here.
### Security
Serai hosts a bug bounty program via
[Immunefi](https://immunefi.com/bounty/serai/). For in-scope critical
vulnerabilities, we will reward whitehats with up to $30,000.
Anything not in-scope should still be submitted through Immunefi, with rewards
issued at the discretion of the Immunefi program managers.
### Links
- [Website](https://serai.exchange/): https://serai.exchange/
- [Immunefi](https://immunefi.com/bounty/serai/): https://immunefi.com/bounty/serai/
- [Twitter](https://twitter.com/SeraiDEX): https://twitter.com/SeraiDEX
- [Discord](https://discord.gg/mpEUtJR3vz): https://discord.gg/mpEUtJR3vz
- [Matrix](https://matrix.to/#/#serai:matrix.org): https://matrix.to/#/#serai:matrix.org
- [Reddit](https://www.reddit.com/r/SeraiDEX/): https://www.reddit.com/r/SeraiDEX/
- [Telegram](https://t.me/SeraiDEX): https://t.me/SeraiDEX
================================================
FILE: audits/Cypher Stack crypto March 2023/LICENSE
================================================
MIT License
Copyright (c) 2023 Cypher Stack
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: audits/Cypher Stack crypto March 2023/README.md
================================================
# Cypher Stack /crypto Audit, March 2023
This audit was over the /crypto folder, excluding the ed448 crate, the `Ed448`
ciphersuite in the ciphersuite crate, and the `dleq/experimental` feature. It is
encompassing up to commit 669d2dbffc1dafb82a09d9419ea182667115df06.
Please see https://github.com/cypherstack/serai-audit for provenance.
================================================
FILE: audits/Cypher Stack networks bitcoin August 2023/LICENSE
================================================
MIT License
Copyright (c) 2023 Cypher Stack
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: audits/Cypher Stack networks bitcoin August 2023/README.md
================================================
# Cypher Stack /networks/bitcoin Audit, August 2023
This audit was over the `/networks/bitcoin` folder (at the time located at
`/coins/bitcoin`). It is encompassing up to commit
5121ca75199dff7bd34230880a1fdd793012068c.
Please see https://github.com/cypherstack/serai-btc-audit for provenance.
================================================
FILE: common/db/Cargo.toml
================================================
[package]
name = "serai-db"
version = "0.1.0"
description = "A simple database trait and backends for it"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/common/db"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = []
edition = "2021"
rust-version = "1.65"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
[dependencies]
parity-db = { version = "0.4", default-features = false, optional = true }
rocksdb = { version = "0.24", default-features = false, features = ["zstd"], optional = true }
[features]
parity-db = ["dep:parity-db"]
rocksdb = ["dep:rocksdb"]
================================================
FILE: common/db/LICENSE
================================================
MIT License
Copyright (c) 2022-2023 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: common/db/src/create_db.rs
================================================
#[doc(hidden)]
pub fn serai_db_key(
db_dst: &'static [u8],
item_dst: &'static [u8],
key: impl AsRef<[u8]>,
) -> Vec<u8> {
let db_len = u8::try_from(db_dst.len()).unwrap();
let dst_len = u8::try_from(item_dst.len()).unwrap();
[[db_len].as_ref(), db_dst, [dst_len].as_ref(), item_dst, key.as_ref()].concat()
}
/// Creates a series of structs which provide namespacing for keys
///
/// # Description
///
/// Creates a unit struct and a default implementation for the `key`, `get`, and `set`. The macro
/// uses a syntax similar to defining a function. Parameters are concatenated to produce a key,
/// they must be `scale` encodable. The return type is used to auto encode and decode the database
/// value bytes using `borsh`.
///
/// # Arguments
///
/// * `db_name` - A database name
/// * `field_name` - An item name
/// * `args` - Comma separated list of key arguments
/// * `field_type` - The return type
///
/// # Example
///
/// ```ignore
/// create_db!(
/// TributariesDb {
/// AttemptsDb: (key_bytes: &[u8], attempt_id: u32) -> u64,
/// ExpiredDb: (genesis: [u8; 32]) -> Vec<u8>
/// }
/// )
/// ```
#[macro_export]
macro_rules! create_db {
($db_name: ident {
$($field_name: ident: ($($arg: ident: $arg_type: ty),*) -> $field_type: ty$(,)?)*
}) => {
$(
#[derive(Clone, Debug)]
pub(crate) struct $field_name;
impl $field_name {
pub(crate) fn key($($arg: $arg_type),*) -> Vec<u8> {
use scale::Encode;
$crate::serai_db_key(
stringify!($db_name).as_bytes(),
stringify!($field_name).as_bytes(),
($($arg),*).encode()
)
}
pub(crate) fn set(txn: &mut impl DbTxn $(, $arg: $arg_type)*, data: &$field_type) {
let key = $field_name::key($($arg),*);
txn.put(&key, borsh::to_vec(data).unwrap());
}
pub(crate) fn get(getter: &impl Get, $($arg: $arg_type),*) -> Option<$field_type> {
getter.get($field_name::key($($arg),*)).map(|data| {
borsh::from_slice(data.as_ref()).unwrap()
})
}
#[allow(dead_code)]
pub(crate) fn del(txn: &mut impl DbTxn $(, $arg: $arg_type)*) {
txn.del(&$field_name::key($($arg),*))
}
}
)*
};
}
#[macro_export]
macro_rules! db_channel {
($db_name: ident {
$($field_name: ident: ($($arg: ident: $arg_type: ty),*) -> $field_type: ty$(,)?)*
}) => {
$(
create_db! {
$db_name {
$field_name: ($($arg: $arg_type,)* index: u32) -> $field_type,
}
}
impl $field_name {
pub(crate) fn send(txn: &mut impl DbTxn $(, $arg: $arg_type)*, value: &$field_type) {
// Use index 0 to store the amount of messages
let messages_sent_key = $field_name::key($($arg),*, 0);
let messages_sent = txn.get(&messages_sent_key).map(|counter| {
u32::from_le_bytes(counter.try_into().unwrap())
}).unwrap_or(0);
txn.put(&messages_sent_key, (messages_sent + 1).to_le_bytes());
// + 2 as index 1 is used for the amount of messages read
// Using distinct counters enables send to be called without mutating anything recv may
// at the same time
let index_to_use = messages_sent + 2;
$field_name::set(txn, $($arg),*, index_to_use, value);
}
pub(crate) fn try_recv(txn: &mut impl DbTxn $(, $arg: $arg_type)*) -> Option<$field_type> {
let messages_recvd_key = $field_name::key($($arg),*, 1);
let messages_recvd = txn.get(&messages_recvd_key).map(|counter| {
u32::from_le_bytes(counter.try_into().unwrap())
}).unwrap_or(0);
let index_to_read = messages_recvd + 2;
let res = $field_name::get(txn, $($arg),*, index_to_read);
if res.is_some() {
$field_name::del(txn, $($arg),*, index_to_read);
txn.put(&messages_recvd_key, (messages_recvd + 1).to_le_bytes());
}
res
}
}
)*
};
}
================================================
FILE: common/db/src/lib.rs
================================================
mod create_db;
pub use create_db::*;
mod mem;
pub use mem::*;
#[cfg(feature = "rocksdb")]
mod rocks;
#[cfg(feature = "rocksdb")]
pub use rocks::{RocksDB, new_rocksdb};
#[cfg(feature = "parity-db")]
mod parity_db;
#[cfg(feature = "parity-db")]
pub use parity_db::{ParityDb, new_parity_db};
/// An object implementing get.
pub trait Get {
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>>;
}
/// An atomic database operation.
#[must_use]
pub trait DbTxn: Send + Get {
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>);
fn del(&mut self, key: impl AsRef<[u8]>);
fn commit(self);
}
/// A database supporting atomic operations.
pub trait Db: 'static + Send + Sync + Clone + Get {
type Transaction<'a>: DbTxn;
fn key(db_dst: &'static [u8], item_dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec<u8> {
let db_len = u8::try_from(db_dst.len()).unwrap();
let dst_len = u8::try_from(item_dst.len()).unwrap();
[[db_len].as_ref(), db_dst, [dst_len].as_ref(), item_dst, key.as_ref()].concat()
}
fn txn(&mut self) -> Self::Transaction<'_>;
}
================================================
FILE: common/db/src/mem.rs
================================================
use core::fmt::Debug;
use std::{
sync::{Arc, RwLock},
collections::{HashSet, HashMap},
};
use crate::*;
/// An atomic operation for the in-memory database.
#[must_use]
#[derive(PartialEq, Eq, Debug)]
pub struct MemDbTxn<'a>(&'a MemDb, HashMap<Vec<u8>, Vec<u8>>, HashSet<Vec<u8>>);
impl<'a> Get for MemDbTxn<'a> {
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
if self.2.contains(key.as_ref()) {
return None;
}
self
.1
.get(key.as_ref())
.cloned()
.or_else(|| self.0 .0.read().unwrap().get(key.as_ref()).cloned())
}
}
impl<'a> DbTxn for MemDbTxn<'a> {
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) {
self.2.remove(key.as_ref());
self.1.insert(key.as_ref().to_vec(), value.as_ref().to_vec());
}
fn del(&mut self, key: impl AsRef<[u8]>) {
self.1.remove(key.as_ref());
self.2.insert(key.as_ref().to_vec());
}
fn commit(mut self) {
let mut db = self.0 .0.write().unwrap();
for (key, value) in self.1.drain() {
db.insert(key, value);
}
for key in self.2 {
db.remove(&key);
}
}
}
/// An in-memory database.
#[derive(Clone, Debug)]
pub struct MemDb(Arc<RwLock<HashMap<Vec<u8>, Vec<u8>>>>);
impl PartialEq for MemDb {
fn eq(&self, other: &MemDb) -> bool {
*self.0.read().unwrap() == *other.0.read().unwrap()
}
}
impl Eq for MemDb {}
impl Default for MemDb {
fn default() -> MemDb {
MemDb(Arc::new(RwLock::new(HashMap::new())))
}
}
impl MemDb {
/// Create a new in-memory database.
pub fn new() -> MemDb {
MemDb::default()
}
}
impl Get for MemDb {
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
self.0.read().unwrap().get(key.as_ref()).cloned()
}
}
impl Db for MemDb {
type Transaction<'a> = MemDbTxn<'a>;
fn txn(&mut self) -> MemDbTxn<'_> {
MemDbTxn(self, HashMap::new(), HashSet::new())
}
}
================================================
FILE: common/db/src/parity_db.rs
================================================
use std::sync::Arc;
pub use ::parity_db::{Options, Db as ParityDb};
use crate::*;
#[must_use]
pub struct Transaction<'a>(&'a Arc<ParityDb>, Vec<(u8, Vec<u8>, Option<Vec<u8>>)>);
impl Get for Transaction<'_> {
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
let mut res = self.0.get(&key);
for change in &self.1 {
if change.1 == key.as_ref() {
res.clone_from(&change.2);
}
}
res
}
}
impl DbTxn for Transaction<'_> {
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) {
self.1.push((0, key.as_ref().to_vec(), Some(value.as_ref().to_vec())))
}
fn del(&mut self, key: impl AsRef<[u8]>) {
self.1.push((0, key.as_ref().to_vec(), None))
}
fn commit(self) {
self.0.commit(self.1).unwrap()
}
}
impl Get for Arc<ParityDb> {
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
ParityDb::get(self, 0, key.as_ref()).unwrap()
}
}
impl Db for Arc<ParityDb> {
type Transaction<'a> = Transaction<'a>;
fn txn(&mut self) -> Self::Transaction<'_> {
Transaction(self, vec![])
}
}
pub fn new_parity_db(path: &str) -> Arc<ParityDb> {
Arc::new(ParityDb::open_or_create(&Options::with_columns(std::path::Path::new(path), 1)).unwrap())
}
================================================
FILE: common/db/src/rocks.rs
================================================
use std::sync::Arc;
use rocksdb::{
DBCompressionType, ThreadMode, SingleThreaded, LogLevel, WriteOptions,
Transaction as RocksTransaction, Options, OptimisticTransactionDB,
};
use crate::*;
#[must_use]
pub struct Transaction<'a, T: ThreadMode>(
RocksTransaction<'a, OptimisticTransactionDB<T>>,
&'a OptimisticTransactionDB<T>,
);
impl<T: ThreadMode> Get for Transaction<'_, T> {
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
self.0.get(key).expect("couldn't read from RocksDB via transaction")
}
}
impl<T: ThreadMode> DbTxn for Transaction<'_, T> {
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) {
self.0.put(key, value).expect("couldn't write to RocksDB via transaction")
}
fn del(&mut self, key: impl AsRef<[u8]>) {
self.0.delete(key).expect("couldn't delete from RocksDB via transaction")
}
fn commit(self) {
self.0.commit().expect("couldn't commit to RocksDB via transaction");
self.1.flush_wal(true).expect("couldn't flush RocksDB WAL");
self.1.flush().expect("couldn't flush RocksDB");
}
}
impl<T: ThreadMode> Get for Arc<OptimisticTransactionDB<T>> {
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
OptimisticTransactionDB::get(self, key).expect("couldn't read from RocksDB")
}
}
impl<T: Send + ThreadMode + 'static> Db for Arc<OptimisticTransactionDB<T>> {
type Transaction<'a> = Transaction<'a, T>;
fn txn(&mut self) -> Self::Transaction<'_> {
let mut opts = WriteOptions::default();
opts.set_sync(true);
Transaction(self.transaction_opt(&opts, &Default::default()), &**self)
}
}
pub type RocksDB = Arc<OptimisticTransactionDB<SingleThreaded>>;
pub fn new_rocksdb(path: &str) -> RocksDB {
let mut options = Options::default();
options.create_if_missing(true);
options.set_compression_type(DBCompressionType::Zstd);
options.set_wal_compression_type(DBCompressionType::Zstd);
// 10 MB
options.set_max_total_wal_size(10 * 1024 * 1024);
options.set_wal_size_limit_mb(10);
options.set_log_level(LogLevel::Warn);
// 1 MB
options.set_max_log_file_size(1024 * 1024);
options.set_recycle_log_file_num(1);
Arc::new(OptimisticTransactionDB::open(&options, path).unwrap())
}
================================================
FILE: common/env/Cargo.toml
================================================
[package]
name = "serai-env"
version = "0.1.0"
description = "A common library for Serai apps to access environment variables"
license = "AGPL-3.0-only"
repository = "https://github.com/serai-dex/serai/tree/develop/common/env"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = []
edition = "2021"
rust-version = "1.60"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
================================================
FILE: common/env/LICENSE
================================================
AGPL-3.0-only license
Copyright (c) 2023 Luke Parker
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License Version 3 as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
================================================
FILE: common/env/src/lib.rs
================================================
#![cfg_attr(docsrs, feature(doc_cfg))]
// Obtain a variable from the Serai environment/secret store.
pub fn var(variable: &str) -> Option<String> {
// TODO: Move this to a proper secret store
// TODO: Unset this variable
std::env::var(variable).ok()
}
================================================
FILE: common/patchable-async-sleep/Cargo.toml
================================================
[package]
name = "patchable-async-sleep"
version = "0.1.0"
description = "An async sleep function, patchable to the preferred runtime"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/common/patchable-async-sleep"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["async", "sleep", "tokio", "smol", "async-std"]
edition = "2021"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
[dependencies]
tokio = { version = "1", default-features = false, features = [ "time"] }
================================================
FILE: common/patchable-async-sleep/LICENSE
================================================
MIT License
Copyright (c) 2024 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: common/patchable-async-sleep/README.md
================================================
# Patchable Async Sleep
An async sleep function, patchable to the preferred runtime.
This crate is `tokio`-backed. Applications which don't want to use `tokio`
should patch this crate to one which works witht heir preferred runtime. The
point of it is to have a minimal API surface to trivially facilitate such work.
================================================
FILE: common/patchable-async-sleep/src/lib.rs
================================================
#![cfg_attr(docsrs, feature(doc_cfg))]
#![doc = include_str!("../README.md")]
#![deny(missing_docs)]
use core::time::Duration;
/// Sleep for the specified duration.
pub fn sleep(duration: Duration) -> impl core::future::Future<Output = ()> {
tokio::time::sleep(duration)
}
================================================
FILE: common/request/Cargo.toml
================================================
[package]
name = "simple-request"
version = "0.1.0"
description = "A simple HTTP(S) request library"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/common/simple-request"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["http", "https", "async", "request", "ssl"]
edition = "2021"
rust-version = "1.70"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
[dependencies]
tower-service = { version = "0.3", default-features = false }
hyper = { version = "1", default-features = false, features = ["http1", "client"] }
hyper-util = { version = "0.1", default-features = false, features = ["http1", "client-legacy", "tokio"] }
http-body-util = { version = "0.1", default-features = false }
tokio = { version = "1", default-features = false }
hyper-rustls = { version = "0.27", default-features = false, features = ["http1", "ring", "rustls-native-certs", "native-tokio"], optional = true }
zeroize = { version = "1", optional = true }
base64ct = { version = "1", features = ["alloc"], optional = true }
[features]
tls = ["hyper-rustls"]
basic-auth = ["zeroize", "base64ct"]
default = ["tls"]
================================================
FILE: common/request/LICENSE
================================================
MIT License
Copyright (c) 2023 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: common/request/README.md
================================================
# Simple Request
A simple alternative to reqwest, supporting HTTPS, intended to support a
majority of use cases with a fraction of the dependency tree.
This library is built directly around `hyper`, `hyper-rustls`, and does require
`tokio`. Support for `async-std` would be welcome.
================================================
FILE: common/request/src/lib.rs
================================================
#![cfg_attr(docsrs, feature(doc_cfg))]
#![doc = include_str!("../README.md")]
use std::sync::Arc;
use tokio::sync::Mutex;
use tower_service::Service as TowerService;
#[cfg(feature = "tls")]
use hyper_rustls::{HttpsConnectorBuilder, HttpsConnector};
use hyper::{Uri, header::HeaderValue, body::Bytes, client::conn::http1::SendRequest};
use hyper_util::{
rt::tokio::TokioExecutor,
client::legacy::{Client as HyperClient, connect::HttpConnector},
};
pub use hyper;
mod request;
pub use request::*;
mod response;
pub use response::*;
#[derive(Debug)]
pub enum Error {
InvalidUri,
MissingHost,
InconsistentHost,
ConnectionError(Box<dyn Send + Sync + std::error::Error>),
Hyper(hyper::Error),
HyperUtil(hyper_util::client::legacy::Error),
}
#[cfg(not(feature = "tls"))]
type Connector = HttpConnector;
#[cfg(feature = "tls")]
type Connector = HttpsConnector<HttpConnector>;
#[derive(Clone, Debug)]
enum Connection {
ConnectionPool(HyperClient<Connector, Full<Bytes>>),
Connection {
connector: Connector,
host: Uri,
connection: Arc<Mutex<Option<SendRequest<Full<Bytes>>>>>,
},
}
#[derive(Clone, Debug)]
pub struct Client {
connection: Connection,
}
impl Client {
fn connector() -> Connector {
let mut res = HttpConnector::new();
res.set_keepalive(Some(core::time::Duration::from_secs(60)));
res.set_nodelay(true);
res.set_reuse_address(true);
#[cfg(feature = "tls")]
res.enforce_http(false);
#[cfg(feature = "tls")]
let res = HttpsConnectorBuilder::new()
.with_native_roots()
.expect("couldn't fetch system's SSL roots")
.https_or_http()
.enable_http1()
.wrap_connector(res);
res
}
pub fn with_connection_pool() -> Client {
Client {
connection: Connection::ConnectionPool(
HyperClient::builder(TokioExecutor::new())
.pool_idle_timeout(core::time::Duration::from_secs(60))
.build(Self::connector()),
),
}
}
pub fn without_connection_pool(host: &str) -> Result<Client, Error> {
Ok(Client {
connection: Connection::Connection {
connector: Self::connector(),
host: {
let uri: Uri = host.parse().map_err(|_| Error::InvalidUri)?;
if uri.host().is_none() {
Err(Error::MissingHost)?;
};
uri
},
connection: Arc::new(Mutex::new(None)),
},
})
}
pub async fn request<R: Into<Request>>(&self, request: R) -> Result<Response<'_>, Error> {
let request: Request = request.into();
let mut request = request.0;
if let Some(header_host) = request.headers().get(hyper::header::HOST) {
match &self.connection {
Connection::ConnectionPool(_) => {}
Connection::Connection { host, .. } => {
if header_host.to_str().map_err(|_| Error::InvalidUri)? != host.host().unwrap() {
Err(Error::InconsistentHost)?;
}
}
}
} else {
let host = match &self.connection {
Connection::ConnectionPool(_) => {
request.uri().host().ok_or(Error::MissingHost)?.to_string()
}
Connection::Connection { host, .. } => {
let host_str = host.host().unwrap();
if let Some(uri_host) = request.uri().host() {
if host_str != uri_host {
Err(Error::InconsistentHost)?;
}
}
host_str.to_string()
}
};
request
.headers_mut()
.insert(hyper::header::HOST, HeaderValue::from_str(&host).map_err(|_| Error::InvalidUri)?);
}
let response = match &self.connection {
Connection::ConnectionPool(client) => {
client.request(request).await.map_err(Error::HyperUtil)?
}
Connection::Connection { connector, host, connection } => {
let mut connection_lock = connection.lock().await;
// If there's not a connection...
if connection_lock.is_none() {
let call_res = connector.clone().call(host.clone()).await;
#[cfg(not(feature = "tls"))]
let call_res = call_res.map_err(|e| Error::ConnectionError(format!("{e:?}").into()));
#[cfg(feature = "tls")]
let call_res = call_res.map_err(Error::ConnectionError);
let (requester, connection) =
hyper::client::conn::http1::handshake(call_res?).await.map_err(Error::Hyper)?;
// This will die when we drop the requester, so we don't need to track an AbortHandle
// for it
tokio::spawn(connection);
*connection_lock = Some(requester);
}
let connection = connection_lock.as_mut().unwrap();
let mut err = connection.ready().await.err();
if err.is_none() {
// Send the request
let res = connection.send_request(request).await;
if let Ok(res) = res {
return Ok(Response(res, self));
}
err = res.err();
}
// Since this connection has been put into an error state, drop it
*connection_lock = None;
Err(Error::Hyper(err.unwrap()))?
}
};
Ok(Response(response, self))
}
}
================================================
FILE: common/request/src/request.rs
================================================
use hyper::body::Bytes;
#[cfg(feature = "basic-auth")]
use hyper::header::HeaderValue;
pub use http_body_util::Full;
#[cfg(feature = "basic-auth")]
use crate::Error;
#[derive(Debug)]
pub struct Request(pub(crate) hyper::Request<Full<Bytes>>);
impl Request {
#[cfg(feature = "basic-auth")]
fn username_password_from_uri(&self) -> Result<(String, String), Error> {
if let Some(authority) = self.0.uri().authority() {
let authority = authority.as_str();
if authority.contains('@') {
// Decode the username and password from the URI
let mut userpass = authority.split('@').next().unwrap().to_string();
let mut userpass_iter = userpass.split(':');
let username = userpass_iter.next().unwrap().to_string();
let password = userpass_iter.next().map_or_else(String::new, str::to_string);
zeroize::Zeroize::zeroize(&mut userpass);
return Ok((username, password));
}
}
Err(Error::InvalidUri)
}
#[cfg(feature = "basic-auth")]
pub fn basic_auth(&mut self, username: &str, password: &str) {
use zeroize::Zeroize;
use base64ct::{Encoding, Base64};
let mut formatted = format!("{username}:{password}");
let mut encoded = Base64::encode_string(formatted.as_bytes());
formatted.zeroize();
self.0.headers_mut().insert(
hyper::header::AUTHORIZATION,
HeaderValue::from_str(&format!("Basic {encoded}")).unwrap(),
);
encoded.zeroize();
}
#[cfg(feature = "basic-auth")]
pub fn basic_auth_from_uri(&mut self) -> Result<(), Error> {
let (mut username, mut password) = self.username_password_from_uri()?;
self.basic_auth(&username, &password);
use zeroize::Zeroize;
username.zeroize();
password.zeroize();
Ok(())
}
#[cfg(feature = "basic-auth")]
pub fn with_basic_auth(&mut self) {
let _ = self.basic_auth_from_uri();
}
}
impl From<hyper::Request<Full<Bytes>>> for Request {
fn from(request: hyper::Request<Full<Bytes>>) -> Request {
Request(request)
}
}
================================================
FILE: common/request/src/response.rs
================================================
use hyper::{
StatusCode,
header::{HeaderValue, HeaderMap},
body::{Buf, Incoming},
};
use http_body_util::BodyExt;
use crate::{Client, Error};
// Borrows the client so its async task lives as long as this response exists.
#[allow(dead_code)]
#[derive(Debug)]
pub struct Response<'a>(pub(crate) hyper::Response<Incoming>, pub(crate) &'a Client);
impl<'a> Response<'a> {
pub fn status(&self) -> StatusCode {
self.0.status()
}
pub fn headers(&self) -> &HeaderMap<HeaderValue> {
self.0.headers()
}
pub async fn body(self) -> Result<impl std::io::Read, Error> {
Ok(self.0.into_body().collect().await.map_err(Error::Hyper)?.aggregate().reader())
}
}
================================================
FILE: common/std-shims/Cargo.toml
================================================
[package]
name = "std-shims"
version = "0.1.4"
description = "A series of std shims to make alloc more feasible"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/common/std-shims"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["nostd", "no_std", "alloc", "io"]
edition = "2021"
rust-version = "1.64"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
[dependencies]
rustversion = { version = "1", default-features = false }
spin = { version = "0.10", default-features = false, features = ["use_ticket_mutex", "once", "lazy"] }
hashbrown = { version = "0.14", default-features = false, features = ["ahash", "inline-more"] }
[features]
std = []
default = ["std"]
================================================
FILE: common/std-shims/LICENSE
================================================
MIT License
Copyright (c) 2023 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: common/std-shims/README.md
================================================
# std shims
A crate which passes through to std when the default `std` feature is enabled,
yet provides a series of shims when it isn't.
No guarantee of one-to-one parity is provided. The shims provided aim to be sufficient for the
average case.
`HashSet` and `HashMap` are provided via `hashbrown`. Synchronization primitives are provided via
`spin` (avoiding a requirement on `critical-section`).
types are not guaranteed to be
================================================
FILE: common/std-shims/src/collections.rs
================================================
#[cfg(feature = "std")]
pub use std::collections::*;
#[cfg(not(feature = "std"))]
pub use alloc::collections::*;
#[cfg(not(feature = "std"))]
pub use hashbrown::{HashSet, HashMap};
================================================
FILE: common/std-shims/src/io.rs
================================================
#[cfg(feature = "std")]
pub use std::io::*;
#[cfg(not(feature = "std"))]
mod shims {
use core::fmt::{Debug, Formatter};
use alloc::{boxed::Box, vec::Vec};
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum ErrorKind {
UnexpectedEof,
Other,
}
pub struct Error {
kind: ErrorKind,
error: Box<dyn Send + Sync>,
}
impl Debug for Error {
fn fmt(&self, fmt: &mut Formatter<'_>) -> core::result::Result<(), core::fmt::Error> {
fmt.debug_struct("Error").field("kind", &self.kind).finish_non_exhaustive()
}
}
impl Error {
pub fn new<E: 'static + Send + Sync>(kind: ErrorKind, error: E) -> Error {
Error { kind, error: Box::new(error) }
}
pub fn other<E: 'static + Send + Sync>(error: E) -> Error {
Error { kind: ErrorKind::Other, error: Box::new(error) }
}
pub fn kind(&self) -> ErrorKind {
self.kind
}
pub fn into_inner(self) -> Option<Box<dyn Send + Sync>> {
Some(self.error)
}
}
pub type Result<T> = core::result::Result<T, Error>;
pub trait Read {
fn read(&mut self, buf: &mut [u8]) -> Result<usize>;
fn read_exact(&mut self, buf: &mut [u8]) -> Result<()> {
let read = self.read(buf)?;
if read != buf.len() {
Err(Error::new(ErrorKind::UnexpectedEof, "reader ran out of bytes"))?;
}
Ok(())
}
}
impl Read for &[u8] {
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
let read = buf.len().min(self.len());
buf[.. read].copy_from_slice(&self[.. read]);
*self = &self[read ..];
Ok(read)
}
}
pub trait BufRead: Read {
fn fill_buf(&mut self) -> Result<&[u8]>;
fn consume(&mut self, amt: usize);
}
impl BufRead for &[u8] {
fn fill_buf(&mut self) -> Result<&[u8]> {
Ok(*self)
}
fn consume(&mut self, amt: usize) {
*self = &self[amt ..];
}
}
pub trait Write {
fn write(&mut self, buf: &[u8]) -> Result<usize>;
fn write_all(&mut self, buf: &[u8]) -> Result<()> {
if self.write(buf)? != buf.len() {
Err(Error::new(ErrorKind::UnexpectedEof, "writer ran out of bytes"))?;
}
Ok(())
}
}
impl Write for Vec<u8> {
fn write(&mut self, buf: &[u8]) -> Result<usize> {
self.extend(buf);
Ok(buf.len())
}
}
}
#[cfg(not(feature = "std"))]
pub use shims::*;
================================================
FILE: common/std-shims/src/lib.rs
================================================
#![cfg_attr(docsrs, feature(doc_cfg))]
#![doc = include_str!("../README.md")]
#![cfg_attr(not(feature = "std"), no_std)]
pub extern crate alloc;
pub mod sync;
pub mod collections;
pub mod io;
pub use alloc::vec;
pub use alloc::str;
pub use alloc::string;
pub mod prelude {
#[rustversion::before(1.73)]
#[doc(hidden)]
pub trait StdShimsDivCeil {
fn div_ceil(self, rhs: Self) -> Self;
}
#[rustversion::before(1.73)]
mod impl_divceil {
use super::StdShimsDivCeil;
impl StdShimsDivCeil for u8 {
fn div_ceil(self, rhs: Self) -> Self {
(self + (rhs - 1)) / rhs
}
}
impl StdShimsDivCeil for u16 {
fn div_ceil(self, rhs: Self) -> Self {
(self + (rhs - 1)) / rhs
}
}
impl StdShimsDivCeil for u32 {
fn div_ceil(self, rhs: Self) -> Self {
(self + (rhs - 1)) / rhs
}
}
impl StdShimsDivCeil for u64 {
fn div_ceil(self, rhs: Self) -> Self {
(self + (rhs - 1)) / rhs
}
}
impl StdShimsDivCeil for u128 {
fn div_ceil(self, rhs: Self) -> Self {
(self + (rhs - 1)) / rhs
}
}
impl StdShimsDivCeil for usize {
fn div_ceil(self, rhs: Self) -> Self {
(self + (rhs - 1)) / rhs
}
}
}
#[cfg(feature = "std")]
#[rustversion::before(1.74)]
#[doc(hidden)]
pub trait StdShimsIoErrorOther {
fn other<E>(error: E) -> Self
where
E: Into<Box<dyn std::error::Error + Send + Sync>>;
}
#[cfg(feature = "std")]
#[rustversion::before(1.74)]
impl StdShimsIoErrorOther for std::io::Error {
fn other<E>(error: E) -> Self
where
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
std::io::Error::new(std::io::ErrorKind::Other, error)
}
}
}
================================================
FILE: common/std-shims/src/sync.rs
================================================
pub use core::sync::*;
pub use alloc::sync::*;
mod mutex_shim {
#[cfg(feature = "std")]
pub use std::sync::*;
#[cfg(not(feature = "std"))]
pub use spin::*;
#[derive(Default, Debug)]
pub struct ShimMutex<T>(Mutex<T>);
impl<T> ShimMutex<T> {
pub const fn new(value: T) -> Self {
Self(Mutex::new(value))
}
pub fn lock(&self) -> MutexGuard<'_, T> {
#[cfg(feature = "std")]
let res = self.0.lock().unwrap();
#[cfg(not(feature = "std"))]
let res = self.0.lock();
res
}
}
}
pub use mutex_shim::{ShimMutex as Mutex, MutexGuard};
#[cfg(not(feature = "std"))]
pub use spin::Lazy as LazyLock;
#[rustversion::before(1.80)]
#[cfg(feature = "std")]
pub use spin::Lazy as LazyLock;
#[rustversion::since(1.80)]
#[cfg(feature = "std")]
pub use std::sync::LazyLock;
================================================
FILE: common/zalloc/Cargo.toml
================================================
[package]
name = "zalloc"
version = "0.1.0"
description = "An allocator wrapper which zeroizes memory on dealloc"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/common/zalloc"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = []
edition = "2021"
rust-version = "1.77"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
[dependencies]
zeroize = { version = "^1.5", default-features = false }
[build-dependencies]
rustversion = { version = "1", default-features = false }
[features]
std = ["zeroize/std"]
default = ["std"]
allocator = []
================================================
FILE: common/zalloc/LICENSE
================================================
MIT License
Copyright (c) 2022-2023 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: common/zalloc/build.rs
================================================
#[rustversion::nightly]
fn main() {
println!("cargo::rustc-check-cfg=cfg(zalloc_rustc_nightly)");
println!("cargo::rustc-cfg=zalloc_rustc_nightly");
}
#[rustversion::not(nightly)]
fn main() {
println!("cargo::rustc-check-cfg=cfg(zalloc_rustc_nightly)");
}
================================================
FILE: common/zalloc/src/lib.rs
================================================
#![cfg_attr(docsrs, feature(doc_cfg))]
#![cfg_attr(docsrs, feature(doc_cfg))]
#![cfg_attr(all(zalloc_rustc_nightly, feature = "allocator"), feature(allocator_api))]
//! Implementation of a Zeroizing Allocator, enabling zeroizing memory on deallocation.
//! This can either be used with Box (requires nightly and the "allocator" feature) to provide the
//! functionality of zeroize on types which don't implement zeroize, or used as a wrapper around
//! the global allocator to ensure *all* memory is zeroized.
use core::{
slice,
alloc::{Layout, GlobalAlloc},
};
use zeroize::Zeroize;
/// An allocator wrapper which zeroizes its memory on dealloc.
pub struct ZeroizingAlloc<T>(pub T);
#[cfg(all(zalloc_rustc_nightly, feature = "allocator"))]
use core::{
ptr::NonNull,
alloc::{AllocError, Allocator},
};
#[cfg(all(zalloc_rustc_nightly, feature = "allocator"))]
unsafe impl<T: Allocator> Allocator for ZeroizingAlloc<T> {
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
self.0.allocate(layout)
}
unsafe fn deallocate(&self, mut ptr: NonNull<u8>, layout: Layout) {
slice::from_raw_parts_mut(ptr.as_mut(), layout.size()).zeroize();
self.0.deallocate(ptr, layout);
}
}
unsafe impl<T: GlobalAlloc> GlobalAlloc for ZeroizingAlloc<T> {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
self.0.alloc(layout)
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
slice::from_raw_parts_mut(ptr, layout.size()).zeroize();
self.0.dealloc(ptr, layout);
}
}
================================================
FILE: coordinator/Cargo.toml
================================================
[package]
name = "serai-coordinator"
version = "0.1.0"
description = "Serai coordinator to prepare batches and sign transactions"
license = "AGPL-3.0-only"
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = []
edition = "2021"
publish = false
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
[dependencies]
async-trait = { version = "0.1", default-features = false }
zeroize = { version = "^1.5", default-features = false, features = ["std"] }
rand_core = { version = "0.6", default-features = false, features = ["std"] }
blake2 = { version = "0.10", default-features = false, features = ["std"] }
transcript = { package = "flexible-transcript", path = "../crypto/transcript", default-features = false, features = ["std", "recommended"] }
dalek-ff-group = { path = "../crypto/dalek-ff-group", default-features = false, features = ["std"] }
ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std"] }
schnorr = { package = "schnorr-signatures", path = "../crypto/schnorr", default-features = false, features = ["std", "aggregate"] }
dkg-musig = { path = "../crypto/dkg/musig", default-features = false, features = ["std"] }
frost = { package = "modular-frost", path = "../crypto/frost" }
frost-schnorrkel = { path = "../crypto/schnorrkel" }
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
zalloc = { path = "../common/zalloc" }
serai-db = { path = "../common/db" }
serai-env = { path = "../common/env" }
processor-messages = { package = "serai-processor-messages", path = "../processor/messages" }
message-queue = { package = "serai-message-queue", path = "../message-queue" }
tributary = { package = "tributary-chain", path = "./tributary" }
sp-application-crypto = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false, features = ["std"] }
serai-client = { path = "../substrate/client", default-features = false, features = ["serai", "borsh"] }
hex = { version = "0.4", default-features = false, features = ["std"] }
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
log = { version = "0.4", default-features = false, features = ["std"] }
env_logger = { version = "0.10", default-features = false, features = ["humantime"] }
futures-util = { version = "0.3", default-features = false, features = ["std"] }
tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] }
libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "request-response", "gossipsub", "macros"] }
[dev-dependencies]
tributary = { package = "tributary-chain", path = "./tributary", features = ["tests"] }
sp-application-crypto = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false, features = ["std"] }
sp-runtime = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false, features = ["std"] }
[features]
longer-reattempts = []
parity-db = ["serai-db/parity-db"]
rocksdb = ["serai-db/rocksdb"]
================================================
FILE: coordinator/LICENSE
================================================
AGPL-3.0-only license
Copyright (c) 2023 Luke Parker
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License Version 3 as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
================================================
FILE: coordinator/README.md
================================================
# Coordinator
The Serai coordinator communicates with other coordinators to prepare batches
for Serai and sign transactions.
In order to achieve consensus over gossip, and order certain events, a
micro-blockchain is instantiated.
================================================
FILE: coordinator/src/cosign_evaluator.rs
================================================
use core::time::Duration;
use std::{
sync::Arc,
collections::{HashSet, HashMap},
};
use tokio::{
sync::{mpsc, Mutex, RwLock},
time::sleep,
};
use borsh::BorshSerialize;
use sp_application_crypto::RuntimePublic;
use serai_client::{
primitives::{ExternalNetworkId, EXTERNAL_NETWORKS},
validator_sets::primitives::{ExternalValidatorSet, Session},
Serai, SeraiError, TemporalSerai,
};
use serai_db::{Get, DbTxn, Db, create_db};
use processor_messages::coordinator::cosign_block_msg;
use crate::{
p2p::{CosignedBlock, GossipMessageKind, P2p},
substrate::LatestCosignedBlock,
};
create_db! {
CosignDb {
ReceivedCosign: (set: ExternalValidatorSet, block: [u8; 32]) -> CosignedBlock,
LatestCosign: (network: ExternalNetworkId) -> CosignedBlock,
DistinctChain: (set: ExternalValidatorSet) -> (),
}
}
pub struct CosignEvaluator<D: Db> {
db: Mutex<D>,
serai: Arc<Serai>,
stakes: RwLock<Option<HashMap<ExternalNetworkId, u64>>>,
latest_cosigns: RwLock<HashMap<ExternalNetworkId, CosignedBlock>>,
}
impl<D: Db> CosignEvaluator<D> {
async fn update_latest_cosign(&self) {
let stakes_lock = self.stakes.read().await;
// If we haven't gotten the stake data yet, return
let Some(stakes) = stakes_lock.as_ref() else { return };
let total_stake = stakes.values().copied().sum::<u64>();
let latest_cosigns = self.latest_cosigns.read().await;
let mut highest_block = 0;
for cosign in latest_cosigns.values() {
let mut networks = HashSet::new();
for (network, sub_cosign) in &*latest_cosigns {
if sub_cosign.block_number >= cosign.block_number {
networks.insert(network);
}
}
let sum_stake =
networks.into_iter().map(|network| stakes.get(network).unwrap_or(&0)).sum::<u64>();
let needed_stake = ((total_stake * 2) / 3) + 1;
if (total_stake == 0) || (sum_stake > needed_stake) {
highest_block = highest_block.max(cosign.block_number);
}
}
let mut db_lock = self.db.lock().await;
let mut txn = db_lock.txn();
if highest_block > LatestCosignedBlock::latest_cosigned_block(&txn) {
log::info!("setting latest cosigned block to {}", highest_block);
LatestCosignedBlock::set(&mut txn, &highest_block);
}
txn.commit();
}
async fn update_stakes(&self) -> Result<(), SeraiError> {
let serai = self.serai.as_of_latest_finalized_block().await?;
let mut stakes = HashMap::new();
for network in EXTERNAL_NETWORKS {
// Use if this network has published a Batch for a short-circuit of if they've ever set a key
let set_key = serai.in_instructions().last_batch_for_network(network).await?.is_some();
if set_key {
stakes.insert(
network,
serai
.validator_sets()
.total_allocated_stake(network.into())
.await?
.expect("network which published a batch didn't have a stake set")
.0,
);
}
}
// Since we've successfully built stakes, set it
*self.stakes.write().await = Some(stakes);
self.update_latest_cosign().await;
Ok(())
}
// Uses Err to signify a message should be retried
async fn handle_new_cosign(&self, cosign: CosignedBlock) -> Result<(), SeraiError> {
// If we already have this cosign or a newer cosign, return
if let Some(latest) = self.latest_cosigns.read().await.get(&cosign.network) {
if latest.block_number >= cosign.block_number {
return Ok(());
}
}
// If this an old cosign (older than a day), drop it
let latest_block = self.serai.latest_finalized_block().await?;
if (cosign.block_number + (24 * 60 * 60 / 6)) < latest_block.number() {
log::debug!("received old cosign supposedly signed by {:?}", cosign.network);
return Ok(());
}
let Some(block) = self.serai.finalized_block_by_number(cosign.block_number).await? else {
log::warn!("received cosign with a block number which doesn't map to a block");
return Ok(());
};
async fn set_with_keys_fn(
serai: &TemporalSerai<'_>,
network: ExternalNetworkId,
) -> Result<Option<ExternalValidatorSet>, SeraiError> {
let Some(latest_session) = serai.validator_sets().session(network.into()).await? else {
log::warn!("received cosign from {:?}, which doesn't yet have a session", network);
return Ok(None);
};
let prior_session = Session(latest_session.0.saturating_sub(1));
Ok(Some(
if serai
.validator_sets()
.keys(ExternalValidatorSet { network, session: prior_session })
.await?
.is_some()
{
ExternalValidatorSet { network, session: prior_session }
} else {
ExternalValidatorSet { network, session: latest_session }
},
))
}
// Get the key for this network as of the prior block
// If we have two chains, this value may be different across chains depending on if one chain
// included the set_keys and one didn't
// Because set_keys will force a cosign, it will force detection of distinct blocks
// re: set_keys using keys prior to set_keys (assumed amenable to all)
let serai = self.serai.as_of(block.header.parent_hash.into());
let Some(set_with_keys) = set_with_keys_fn(&serai, cosign.network).await? else {
return Ok(());
};
let Some(keys) = serai.validator_sets().keys(set_with_keys).await? else {
log::warn!("received cosign for a block we didn't have keys for");
return Ok(());
};
if !keys
.0
.verify(&cosign_block_msg(cosign.block_number, cosign.block), &cosign.signature.into())
{
log::warn!("received cosigned block with an invalid signature");
return Ok(());
}
log::info!(
"received cosign for block {} ({}) by {:?}",
block.number(),
hex::encode(cosign.block),
cosign.network
);
// Save this cosign to the DB
{
let mut db = self.db.lock().await;
let mut txn = db.txn();
ReceivedCosign::set(&mut txn, set_with_keys, cosign.block, &cosign);
LatestCosign::set(&mut txn, set_with_keys.network, &(cosign));
txn.commit();
}
if cosign.block != block.hash() {
log::error!(
"received cosign for a distinct block at {}. we have {}. cosign had {}",
cosign.block_number,
hex::encode(block.hash()),
hex::encode(cosign.block)
);
let serai = self.serai.as_of(latest_block.hash());
let mut db = self.db.lock().await;
// Save this set as being on a different chain
let mut txn = db.txn();
DistinctChain::set(&mut txn, set_with_keys, &());
txn.commit();
let mut total_stake = 0;
let mut total_on_distinct_chain = 0;
for network in EXTERNAL_NETWORKS {
// Get the current set for this network
let set_with_keys = {
let mut res;
while {
res = set_with_keys_fn(&serai, network).await;
res.is_err()
} {
log::error!(
"couldn't get the set with keys when checking for a distinct chain: {:?}",
res
);
tokio::time::sleep(core::time::Duration::from_secs(3)).await;
}
res.unwrap()
};
// Get its stake
// Doesn't use the stakes inside self to prevent deadlocks re: multi-lock acquisition
if let Some(set_with_keys) = set_with_keys {
let stake = {
let mut res;
while {
res =
serai.validator_sets().total_allocated_stake(set_with_keys.network.into()).await;
res.is_err()
} {
log::error!(
"couldn't get total allocated stake when checking for a distinct chain: {:?}",
res
);
tokio::time::sleep(core::time::Duration::from_secs(3)).await;
}
res.unwrap()
};
if let Some(stake) = stake {
total_stake += stake.0;
if DistinctChain::get(&*db, set_with_keys).is_some() {
total_on_distinct_chain += stake.0;
}
}
}
}
// See https://github.com/serai-dex/serai/issues/339 for the reasoning on 17%
if (total_stake * 17 / 100) <= total_on_distinct_chain {
panic!("17% of validator sets (by stake) have co-signed a distinct chain");
}
} else {
{
let mut latest_cosigns = self.latest_cosigns.write().await;
latest_cosigns.insert(cosign.network, cosign);
}
self.update_latest_cosign().await;
}
Ok(())
}
#[allow(clippy::new_ret_no_self)]
pub fn new<P: P2p>(db: D, p2p: P, serai: Arc<Serai>) -> mpsc::UnboundedSender<CosignedBlock> {
let mut latest_cosigns = HashMap::new();
for network in EXTERNAL_NETWORKS {
if let Some(cosign) = LatestCosign::get(&db, network) {
latest_cosigns.insert(network, cosign);
}
}
let evaluator = Arc::new(Self {
db: Mutex::new(db),
serai,
stakes: RwLock::new(None),
latest_cosigns: RwLock::new(latest_cosigns),
});
// Spawn a task to update stakes regularly
tokio::spawn({
let evaluator = evaluator.clone();
async move {
loop {
// Run this until it passes
while evaluator.update_stakes().await.is_err() {
log::warn!("couldn't update stakes in the cosign evaluator");
// Try again in 10 seconds
sleep(Duration::from_secs(10)).await;
}
// Run it every 10 minutes as we don't need the exact stake data for this to be valid
sleep(Duration::from_secs(10 * 60)).await;
}
}
});
// Spawn a task to receive cosigns and handle them
let (send, mut recv) = mpsc::unbounded_channel();
tokio::spawn({
let evaluator = evaluator.clone();
async move {
while let Some(msg) = recv.recv().await {
while evaluator.handle_new_cosign(msg).await.is_err() {
// Try again in 10 seconds
sleep(Duration::from_secs(10)).await;
}
}
}
});
// Spawn a task to rebroadcast the most recent cosigns
tokio::spawn({
async move {
loop {
let cosigns = evaluator.latest_cosigns.read().await.values().copied().collect::<Vec<_>>();
for cosign in cosigns {
let mut buf = vec![];
cosign.serialize(&mut buf).unwrap();
P2p::broadcast(&p2p, GossipMessageKind::CosignedBlock, buf).await;
}
sleep(Duration::from_secs(60)).await;
}
}
});
// Return the channel to send cosigns
send
}
}
================================================
FILE: coordinator/src/db.rs
================================================
use blake2::{
digest::{consts::U32, Digest},
Blake2b,
};
use scale::Encode;
use borsh::{BorshSerialize, BorshDeserialize};
use serai_client::{
in_instructions::primitives::{Batch, SignedBatch},
primitives::ExternalNetworkId,
validator_sets::primitives::{ExternalValidatorSet, Session},
};
pub use serai_db::*;
use ::tributary::ReadWrite;
use crate::tributary::{TributarySpec, Transaction, scanner::RecognizedIdType};
create_db!(
MainDb {
HandledMessageDb: (network: ExternalNetworkId) -> u64,
ActiveTributaryDb: () -> Vec<u8>,
RetiredTributaryDb: (set: ExternalValidatorSet) -> (),
FirstPreprocessDb: (
network: ExternalNetworkId,
id_type: RecognizedIdType,
id: &[u8]
) -> Vec<Vec<u8>>,
LastReceivedBatchDb: (network: ExternalNetworkId) -> u32,
ExpectedBatchDb: (network: ExternalNetworkId, id: u32) -> [u8; 32],
BatchDb: (network: ExternalNetworkId, id: u32) -> SignedBatch,
LastVerifiedBatchDb: (network: ExternalNetworkId) -> u32,
HandoverBatchDb: (set: ExternalValidatorSet) -> u32,
LookupHandoverBatchDb: (network: ExternalNetworkId, batch: u32) -> Session,
QueuedBatchesDb: (set: ExternalValidatorSet) -> Vec<u8>
}
);
impl ActiveTributaryDb {
pub fn active_tributaries<G: Get>(getter: &G) -> (Vec<u8>, Vec<TributarySpec>) {
let bytes = Self::get(getter).unwrap_or_default();
let mut bytes_ref: &[u8] = bytes.as_ref();
let mut tributaries = vec![];
while !bytes_ref.is_empty() {
tributaries.push(TributarySpec::deserialize_reader(&mut bytes_ref).unwrap());
}
(bytes, tributaries)
}
pub fn add_participating_in_tributary(txn: &mut impl DbTxn, spec: &TributarySpec) {
let (mut existing_bytes, existing) = ActiveTributaryDb::active_tributaries(txn);
for tributary in &existing {
if tributary == spec {
return;
}
}
spec.serialize(&mut existing_bytes).unwrap();
ActiveTributaryDb::set(txn, &existing_bytes);
}
pub fn retire_tributary(txn: &mut impl DbTxn, set: ExternalValidatorSet) {
let mut active = Self::active_tributaries(txn).1;
for i in 0 .. active.len() {
if active[i].set() == set {
active.remove(i);
break;
}
}
let mut bytes = vec![];
for active in active {
active.serialize(&mut bytes).unwrap();
}
Self::set(txn, &bytes);
RetiredTributaryDb::set(txn, set, &());
}
}
impl FirstPreprocessDb {
pub fn save_first_preprocess(
txn: &mut impl DbTxn,
network: ExternalNetworkId,
id_type: RecognizedIdType,
id: &[u8],
preprocess: &Vec<Vec<u8>>,
) {
if let Some(existing) = FirstPreprocessDb::get(txn, network, id_type, id) {
assert_eq!(&existing, preprocess, "saved a distinct first preprocess");
return;
}
FirstPreprocessDb::set(txn, network, id_type, id, preprocess);
}
}
impl ExpectedBatchDb {
pub fn save_expected_batch(txn: &mut impl DbTxn, batch: &Batch) {
LastReceivedBatchDb::set(txn, batch.network, &batch.id);
Self::set(
txn,
batch.network,
batch.id,
&Blake2b::<U32>::digest(batch.instructions.encode()).into(),
);
}
}
impl HandoverBatchDb {
pub fn set_handover_batch(txn: &mut impl DbTxn, set: ExternalValidatorSet, batch: u32) {
Self::set(txn, set, &batch);
LookupHandoverBatchDb::set(txn, set.network, batch, &set.session);
}
}
impl QueuedBatchesDb {
pub fn queue(txn: &mut impl DbTxn, set: ExternalValidatorSet, batch: &Transaction) {
let mut batches = Self::get(txn, set).unwrap_or_default();
batch.write(&mut batches).unwrap();
Self::set(txn, set, &batches);
}
pub fn take(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Vec<Transaction> {
let batches_vec = Self::get(txn, set).unwrap_or_default();
txn.del(Self::key(set));
let mut batches: &[u8] = &batches_vec;
let mut res = vec![];
while !batches.is_empty() {
res.push(Transaction::read(&mut batches).unwrap());
}
res
}
}
================================================
FILE: coordinator/src/main.rs
================================================
#![expect(clippy::cast_possible_truncation)]
use core::ops::Deref;
use std::{
sync::{OnceLock, Arc},
time::Duration,
collections::{VecDeque, HashSet, HashMap},
};
use zeroize::{Zeroize, Zeroizing};
use rand_core::OsRng;
use dalek_ff_group::Ristretto;
use ciphersuite::{
group::{
ff::{Field, PrimeField},
GroupEncoding,
},
Ciphersuite,
};
use schnorr::SchnorrSignature;
use frost::Participant;
use serai_db::{DbTxn, Db};
use scale::Encode;
use borsh::BorshSerialize;
use serai_client::{
primitives::ExternalNetworkId,
validator_sets::primitives::{ExternalValidatorSet, KeyPair, Session},
Public, Serai, SeraiInInstructions,
};
use message_queue::{Service, client::MessageQueue};
use tokio::{
sync::{Mutex, RwLock, mpsc, broadcast},
time::sleep,
};
use ::tributary::{ProvidedError, TransactionKind, TransactionTrait, Block, Tributary};
mod tributary;
use crate::tributary::{
TributarySpec, Label, SignData, Transaction, scanner::RecognizedIdType, PlanIds,
};
mod db;
use db::*;
mod p2p;
pub use p2p::*;
use processor_messages::{
key_gen, sign,
coordinator::{self, SubstrateSignableId},
ProcessorMessage,
};
pub mod processors;
use processors::Processors;
mod substrate;
use substrate::CosignTransactions;
mod cosign_evaluator;
use cosign_evaluator::CosignEvaluator;
#[cfg(test)]
pub mod tests;
#[global_allocator]
static ALLOCATOR: zalloc::ZeroizingAlloc<std::alloc::System> =
zalloc::ZeroizingAlloc(std::alloc::System);
#[derive(Clone)]
pub struct ActiveTributary<D: Db, P: P2p> {
pub spec: TributarySpec,
pub tributary: Arc<Tributary<D, Transaction, P>>,
}
#[derive(Clone)]
pub enum TributaryEvent<D: Db, P: P2p> {
NewTributary(ActiveTributary<D, P>),
TributaryRetired(ExternalValidatorSet),
}
// Creates a new tributary and sends it to all listeners.
async fn add_tributary<D: Db, Pro: Processors, P: P2p>(
db: D,
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
processors: &Pro,
p2p: P,
tributaries: &broadcast::Sender<TributaryEvent<D, P>>,
spec: TributarySpec,
) {
if RetiredTributaryDb::get(&db, spec.set()).is_some() {
log::info!("not adding tributary {:?} since it's been retired", spec.set());
}
log::info!("adding tributary {:?}", spec.set());
let tributary = Tributary::<_, Transaction, _>::new(
// TODO2: Use a db on a distinct volume to protect against DoS attacks
// TODO2: Delete said db once the Tributary is dropped
db,
spec.genesis(),
spec.start_time(),
key.clone(),
spec.validators(),
p2p,
)
.await
.unwrap();
// Trigger a DKG for the newly added Tributary
// If we're rebooting, we'll re-fire this message
// This is safe due to the message-queue deduplicating based off the intent system
let set = spec.set();
let our_i = spec
.i(&[], Ristretto::generator() * key.deref())
.expect("adding a tributary for a set we aren't in set for");
processors
.send(
set.network,
processor_messages::key_gen::CoordinatorMessage::GenerateKey {
id: processor_messages::key_gen::KeyGenId { session: set.session, attempt: 0 },
params: frost::ThresholdParams::new(spec.t(), spec.n(&[]), our_i.start).unwrap(),
shares: u16::from(our_i.end) - u16::from(our_i.start),
},
)
.await;
tributaries
.send(TributaryEvent::NewTributary(ActiveTributary { spec, tributary: Arc::new(tributary) }))
.map_err(|_| "all ActiveTributary recipients closed")
.unwrap();
}
// TODO: Find a better pattern for this
static HANDOVER_VERIFY_QUEUE_LOCK: OnceLock<Mutex<()>> = OnceLock::new();
#[allow(clippy::too_many_arguments)]
async fn handle_processor_message<D: Db, P: P2p>(
db: &mut D,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
serai: &Serai,
p2p: &P,
cosign_channel: &mpsc::UnboundedSender<CosignedBlock>,
tributaries: &HashMap<Session, ActiveTributary<D, P>>,
network: ExternalNetworkId,
msg: &processors::Message,
) -> bool {
#[allow(clippy::nonminimal_bool)]
if let Some(already_handled) = HandledMessageDb::get(db, msg.network) {
assert!(!(already_handled > msg.id));
assert!((already_handled == msg.id) || (already_handled == msg.id - 1));
if already_handled == msg.id {
return true;
}
} else {
assert_eq!(msg.id, 0);
}
let _hvq_lock = HANDOVER_VERIFY_QUEUE_LOCK.get_or_init(|| Mutex::new(())).lock().await;
let mut txn = db.txn();
let mut relevant_tributary = match &msg.msg {
// We'll only receive these if we fired GenerateKey, which we'll only do if if we're
// in-set, making the Tributary relevant
ProcessorMessage::KeyGen(inner_msg) => match inner_msg {
key_gen::ProcessorMessage::Commitments { id, .. } |
key_gen::ProcessorMessage::InvalidCommitments { id, .. } |
key_gen::ProcessorMessage::Shares { id, .. } |
key_gen::ProcessorMessage::InvalidShare { id, .. } |
key_gen::ProcessorMessage::GeneratedKeyPair { id, .. } |
key_gen::ProcessorMessage::Blame { id, .. } => Some(id.session),
},
ProcessorMessage::Sign(inner_msg) => match inner_msg {
// We'll only receive InvalidParticipant/Preprocess/Share if we're actively signing
sign::ProcessorMessage::InvalidParticipant { id, .. } |
sign::ProcessorMessage::Preprocess { id, .. } |
sign::ProcessorMessage::Share { id, .. } => Some(id.session),
// While the Processor's Scanner will always emit Completed, that's routed through the
// Signer and only becomes a ProcessorMessage::Completed if the Signer is present and
// confirms it
sign::ProcessorMessage::Completed { session, .. } => Some(*session),
},
ProcessorMessage::Coordinator(inner_msg) => match inner_msg {
// This is a special case as it's relevant to *all* Tributaries for this network we're
// signing in
// It doesn't return a Tributary to become `relevant_tributary` though
coordinator::ProcessorMessage::SubstrateBlockAck { block, plans } => {
// Get the sessions for these keys
let sessions = plans
.iter()
.map(|plan| plan.session)
.filter(|session| {
RetiredTributaryDb::get(&txn, ExternalValidatorSet { network, session: *session })
.is_none()
})
.collect::<HashSet<_>>();
// Ensure we have the Tributaries
for session in &sessions {
if !tributaries.contains_key(session) {
return false;
}
}
for session in sessions {
let tributary = &tributaries[&session];
let plans = plans
.iter()
.filter_map(|plan| Some(plan.id).filter(|_| plan.session == session))
.collect::<Vec<_>>();
PlanIds::set(&mut txn, &tributary.spec.genesis(), *block, &plans);
let tx = Transaction::SubstrateBlock(*block);
log::trace!(
"processor message effected transaction {} {:?}",
hex::encode(tx.hash()),
&tx
);
log::trace!("providing transaction {}", hex::encode(tx.hash()));
let res = tributary.tributary.provide_transaction(tx).await;
if !(res.is_ok() || (res == Err(ProvidedError::AlreadyProvided))) {
if res == Err(ProvidedError::LocalMismatchesOnChain) {
// Spin, since this is a crit for this Tributary
loop {
log::error!(
"{}. tributary: {}, provided: SubstrateBlock({})",
"tributary added distinct provided to delayed locally provided TX",
hex::encode(tributary.spec.genesis()),
block,
);
sleep(Duration::from_secs(60)).await;
}
}
panic!("provided an invalid transaction: {res:?}");
}
}
None
}
// We'll only fire these if we are the Substrate signer, making the Tributary relevant
coordinator::ProcessorMessage::InvalidParticipant { id, .. } |
coordinator::ProcessorMessage::CosignPreprocess { id, .. } |
coordinator::ProcessorMessage::BatchPreprocess { id, .. } |
coordinator::ProcessorMessage::SlashReportPreprocess { id, .. } |
coordinator::ProcessorMessage::SubstrateShare { id, .. } => Some(id.session),
// This causes an action on our P2P net yet not on any Tributary
coordinator::ProcessorMessage::CosignedBlock { block_number, block, signature } => {
let cosigned_block = CosignedBlock {
network,
block_number: *block_number,
block: *block,
signature: {
let mut arr = [0; 64];
arr.copy_from_slice(signature);
arr
},
};
cosign_channel.send(cosigned_block).unwrap();
let mut buf = vec![];
cosigned_block.serialize(&mut buf).unwrap();
P2p::broadcast(p2p, GossipMessageKind::CosignedBlock, buf).await;
None
}
// This causes an action on Substrate yet not on any Tributary
coordinator::ProcessorMessage::SignedSlashReport { session, signature } => {
let set = ExternalValidatorSet { network, session: *session };
let signature: &[u8] = signature.as_ref();
let signature = <[u8; 64]>::try_from(signature).unwrap();
let signature: serai_client::Signature = signature.into();
let slashes = crate::tributary::SlashReport::get(&txn, set)
.expect("signed slash report despite not having slash report locally");
let slashes_pubs = slashes
.iter()
.map(|(address, points)| (Public::from(*address), *points))
.collect::<Vec<_>>();
let tx = serai_client::SeraiValidatorSets::report_slashes(
network,
slashes
.into_iter()
.map(|(address, points)| (serai_client::SeraiAddress(address), points))
.collect::<Vec<_>>()
.try_into()
.unwrap(),
signature,
);
loop {
if serai.publish(&tx).await.is_ok() {
break None;
}
// Check if the slashes shouldn't still be reported. If not, break.
let Ok(serai) = serai.as_of_latest_finalized_block().await else {
tokio::time::sleep(core::time::Duration::from_secs(5)).await;
continue;
};
let Ok(key) = serai.validator_sets().key_pending_slash_report(network).await else {
tokio::time::sleep(core::time::Duration::from_secs(5)).await;
continue;
};
let Some(key) = key else {
break None;
};
// If this is the key for this slash report, then this will verify
use sp_application_crypto::RuntimePublic;
if !key.verify(
&serai_client::validator_sets::primitives::report_slashes_message(&set, &slashes_pubs),
&signature,
) {
break None;
}
}
}
},
// These don't return a relevant Tributary as there's no Tributary with action expected
ProcessorMessage::Substrate(inner_msg) => match inner_msg {
processor_messages::substrate::ProcessorMessage::Batch { batch } => {
assert_eq!(
batch.network, msg.network,
"processor sent us a batch for a different network than it was for",
);
ExpectedBatchDb::save_expected_batch(&mut txn, batch);
None
}
// If this is a new Batch, immediately publish it (if we can)
processor_messages::substrate::ProcessorMessage::SignedBatch { batch } => {
assert_eq!(
batch.batch.network, msg.network,
"processor sent us a signed batch for a different network than it was for",
);
log::debug!("received batch {:?} {}", batch.batch.network, batch.batch.id);
// Save this batch to the disk
BatchDb::set(&mut txn, batch.batch.network, batch.batch.id, &batch.clone());
// Get the next-to-execute batch ID
let Ok(mut next) = substrate::expected_next_batch(serai, network).await else {
return false;
};
// Since we have a new batch, publish all batches yet to be published to Serai
// This handles the edge-case where batch n+1 is signed before batch n is
let mut batches = VecDeque::new();
while let Some(batch) = BatchDb::get(&txn, network, next) {
batches.push_back(batch);
next += 1;
}
while let Some(batch) = batches.pop_front() {
// If this Batch should no longer be published, continue
let Ok(expected_next_batch) = substrate::expected_next_batch(serai, network).await else {
return false;
};
if expected_next_batch > batch.batch.id {
continue;
}
let tx = SeraiInInstructions::execute_batch(batch.clone());
log::debug!("attempting to publish batch {:?} {}", batch.batch.network, batch.batch.id,);
// This publish may fail if this transactions already exists in the mempool, which is
// possible, or if this batch was already executed on-chain
// Either case will have eventual resolution and be handled by the above check on if
// this batch should execute
let res = serai.publish(&tx).await;
if res.is_ok() {
log::info!(
"published batch {network:?} {} (block {})",
batch.batch.id,
hex::encode(batch.batch.block),
);
} else {
log::debug!(
"couldn't publish batch {:?} {}: {:?}",
batch.batch.network,
batch.batch.id,
res,
);
// If we failed to publish it, restore it
batches.push_front(batch);
// Sleep for a few seconds before retrying to prevent hammering the node
sleep(Duration::from_secs(5)).await;
}
}
None
}
},
};
// If we have a relevant Tributary, check it's actually still relevant and has yet to be retired
if let Some(relevant_tributary_value) = relevant_tributary {
if RetiredTributaryDb::get(
&txn,
ExternalValidatorSet { network: msg.network, session: relevant_tributary_value },
)
.is_some()
{
relevant_tributary = None;
}
}
// If there's a relevant Tributary...
if let Some(relevant_tributary) = relevant_tributary {
// Make sure we have it
// Per the reasoning above, we only return a Tributary as relevant if we're a participant
// Accordingly, we do *need* to have this Tributary now to handle it UNLESS the Tributary has
// already completed and this is simply an old message (which we prior checked)
let Some(ActiveTributary { spec, tributary }) = tributaries.get(&relevant_tributary) else {
// Since we don't, sleep for a fraction of a second and return false, signaling we didn't
// handle this message
// At the start of the loop which calls this function, we'll check for new tributaries,
// making this eventually resolve
sleep(Duration::from_millis(100)).await;
return false;
};
let genesis = spec.genesis();
let pub_key = Ristretto::generator() * key.deref();
let txs = match msg.msg.clone() {
ProcessorMessage::KeyGen(inner_msg) => match inner_msg {
key_gen::ProcessorMessage::Commitments { id, commitments } => {
vec![Transaction::DkgCommitments {
attempt: id.attempt,
commitments,
signed: Transaction::empty_signed(),
}]
}
key_gen::ProcessorMessage::InvalidCommitments { id, faulty } => {
// This doesn't have guaranteed timing
//
// While the party *should* be fatally slashed and not included in future attempts,
// they'll actually be fatally slashed (assuming liveness before the Tributary retires)
// and not included in future attempts *which begin after the latency window completes*
let participant = spec
.reverse_lookup_i(
&crate::tributary::removed_as_of_dkg_attempt(&txn, spec.genesis(), id.attempt)
.expect("participating in DKG attempt yet we didn't save who was removed"),
faulty,
)
.unwrap();
vec![Transaction::RemoveParticipantDueToDkg {
participant,
signed: Transaction::empty_signed(),
}]
}
key_gen::ProcessorMessage::Shares { id, mut shares } => {
// Create a MuSig-based machine to inform Substrate of this key generation
let nonces = crate::tributary::dkg_confirmation_nonces(key, spec, &mut txn, id.attempt);
let removed = crate::tributary::removed_as_of_dkg_attempt(&txn, genesis, id.attempt)
.expect("participating in a DKG attempt yet we didn't track who was removed yet?");
let our_i = spec
.i(&removed, pub_key)
.expect("processor message to DKG for an attempt we aren't a validator in");
// `tx_shares` needs to be done here as while it can be serialized from the HashMap
// without further context, it can't be deserialized without context
let mut tx_shares = Vec::with_capacity(shares.len());
for shares in &mut shares {
tx_shares.push(vec![]);
for i in 1 ..= spec.n(&removed) {
let i = Participant::new(i).unwrap();
if our_i.contains(&i) {
if shares.contains_key(&i) {
panic!("processor sent us our own shares");
}
continue;
}
tx_shares.last_mut().unwrap().push(
shares.remove(&i).expect("processor didn't send share for another validator"),
);
}
}
vec![Transaction::DkgShares {
attempt: id.attempt,
shares: tx_shares,
confirmation_nonces: nonces,
signed: Transaction::empty_signed(),
}]
}
key_gen::ProcessorMessage::InvalidShare { id, accuser, faulty, blame } => {
vec![Transaction::InvalidDkgShare {
attempt: id.attempt,
accuser,
faulty,
blame,
signed: Transaction::empty_signed(),
}]
}
key_gen::ProcessorMessage::GeneratedKeyPair { id, substrate_key, network_key } => {
// TODO2: Check the KeyGenId fields
// Tell the Tributary the key pair, get back the share for the MuSig signature
let share = crate::tributary::generated_key_pair::<D>(
&mut txn,
key,
spec,
&KeyPair(Public::from(substrate_key), network_key.try_into().unwrap()),
id.attempt,
);
// TODO: Move this into generated_key_pair?
match share {
Ok(share) => {
vec![Transaction::DkgConfirmed {
attempt: id.attempt,
confirmation_share: share,
signed: Transaction::empty_signed(),
}]
}
Err(p) => {
let participant = spec
.reverse_lookup_i(
&crate::tributary::removed_as_of_dkg_attempt(&txn, spec.genesis(), id.attempt)
.expect("participating in DKG attempt yet we didn't save who was removed"),
p,
)
.unwrap();
vec![Transaction::RemoveParticipantDueToDkg {
participant,
signed: Transaction::empty_signed(),
}]
}
}
}
key_gen::ProcessorMessage::Blame { id, participant } => {
let participant = spec
.reverse_lookup_i(
&crate::tributary::removed_as_of_dkg_attempt(&txn, spec.genesis(), id.attempt)
.expect("participating in DKG attempt yet we didn't save who was removed"),
participant,
)
.unwrap();
vec![Transaction::RemoveParticipantDueToDkg {
participant,
signed: Transaction::empty_signed(),
}]
}
},
ProcessorMessage::Sign(msg) => match msg {
sign::ProcessorMessage::InvalidParticipant { .. } => {
// TODO: Locally increase slash points to maximum (distinct from an explicitly fatal
// slash) and censor transactions (yet don't explicitly ban)
vec![]
}
sign::ProcessorMessage::Preprocess { id, preprocesses } => {
if id.attempt == 0 {
FirstPreprocessDb::save_first_preprocess(
&mut txn,
network,
RecognizedIdType::Plan,
&id.id,
&preprocesses,
);
vec![]
} else {
vec![Transaction::Sign(SignData {
plan: id.id,
attempt: id.attempt,
label: Label::Preprocess,
data: preprocesses,
signed: Transaction::empty_signed(),
})]
}
}
sign::ProcessorMessage::Share { id, shares } => {
vec![Transaction::Sign(SignData {
plan: id.id,
attempt: id.attempt,
label: Label::Share,
data: shares,
signed: Transaction::empty_signed(),
})]
}
sign::ProcessorMessage::Completed { session: _, id, tx } => {
let r = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng));
#[allow(non_snake_case)]
let R = <Ristretto as Ciphersuite>::generator() * r.deref();
let mut tx = Transaction::SignCompleted {
plan: id,
tx_hash: tx,
first_signer: pub_key,
signature: SchnorrSignature { R, s: <Ristretto as Ciphersuite>::F::ZERO },
};
let signed = SchnorrSignature::sign(key, r, tx.sign_completed_challenge());
match &mut tx {
Transaction::SignCompleted { signature, .. } => {
*signature = signed;
}
_ => unreachable!(),
}
vec![tx]
}
},
ProcessorMessage::Coordinator(inner_msg) => match inner_msg {
coordinator::ProcessorMessage::SubstrateBlockAck { .. } => unreachable!(),
coordinator::ProcessorMessage::InvalidParticipant { .. } => {
// TODO: Locally increase slash points to maximum (distinct from an explicitly fatal
// slash) and censor transactions (yet don't explicitly ban)
vec![]
}
coordinator::ProcessorMessage::CosignPreprocess { id, preprocesses } |
coordinator::ProcessorMessage::SlashReportPreprocess { id, preprocesses } => {
vec![Transaction::SubstrateSign(SignData {
plan: id.id,
attempt: id.attempt,
label: Label::Preprocess,
data: preprocesses.into_iter().map(Into::into).collect(),
signed: Transaction::empty_signed(),
})]
}
coordinator::ProcessorMessage::BatchPreprocess { id, block, preprocesses } => {
log::info!(
"informed of batch (sign ID {}, attempt {}) for block {}",
hex::encode(id.id.encode()),
id.attempt,
hex::encode(block),
);
// If this is the first attempt instance, wait until we synchronize around the batch
// first
if id.attempt == 0 {
FirstPreprocessDb::save_first_preprocess(
&mut txn,
spec.set().network,
RecognizedIdType::Batch,
&{
let SubstrateSignableId::Batch(id) = id.id else {
panic!("BatchPreprocess SubstrateSignableId wasn't Batch")
};
id.to_le_bytes()
},
&preprocesses.into_iter().map(Into::into).collect::<Vec<_>>(),
);
let intended = Transaction::Batch {
block: block.0,
batch: match id.id {
SubstrateSignableId::Batch(id) => id,
_ => panic!("BatchPreprocess did not contain Batch ID"),
},
};
// If this is the new key's first Batch, only create this TX once we verify all
// all prior published `Batch`s
// TODO: This assumes BatchPreprocess is immediately after Batch
// Ensure that assumption
let last_received = LastReceivedBatchDb::get(&txn, msg.network).unwrap();
let handover_batch = HandoverBatchDb::get(&txn, spec.set());
let mut queue = false;
if let Some(handover_batch) = handover_batch {
// There is a race condition here. We may verify all `Batch`s from the prior set,
// start signing the handover `Batch` `n`, start signing `n+1`, have `n+1` signed
// before `n` (or at the same time), yet then the prior set forges a malicious
// `Batch` `n`.
//
// The malicious `Batch` `n` would be publishable to Serai, as Serai can't
// distinguish what's intended to be a handover `Batch`, yet then anyone could
// publish the new set's `n+1`, causing their acceptance of the handover.
//
// To fix this, if this is after the handover `Batch` and we have yet to verify
// publication of the handover `Batch`, don't yet yield the provided.
if last_received > handover_batch {
if let Some(last_verified) = LastVerifiedBatchDb::get(&txn, msg.network) {
if last_verified < handover_batch {
queue = true;
}
} else {
queue = true;
}
}
} else {
HandoverBatchDb::set_handover_batch(&mut txn, spec.set(), last_received);
// If this isn't the first batch, meaning we do have to verify all prior batches, and
// the prior Batch hasn't been verified yet...
if (last_received != 0) &&
LastVerifiedBatchDb::get(&txn, msg.network)
.map_or(true, |last_verified| last_verified < (last_received - 1))
{
// Withhold this TX until we verify all prior `Batch`s
queue = true;
}
}
if queue {
QueuedBatchesDb::queue(&mut txn, spec.set(), &intended);
vec![]
} else {
// Because this is post-verification of the handover batch, take all queued `Batch`s
// now to ensure we don't provide this before an already queued Batch
// This *may* be an unreachable case due to how last_verified_batch is set, yet it
// doesn't hurt to have as a defensive pattern
let mut res = QueuedBatchesDb::take(&mut txn, spec.set());
res.push(intended);
res
}
} else {
vec![Transaction::SubstrateSign(SignData {
plan: id.id,
attempt: id.attempt,
label: Label::Preprocess,
data: preprocesses.into_iter().map(Into::into).collect(),
signed: Transaction::empty_signed(),
})]
}
}
coordinator::ProcessorMessage::SubstrateShare { id, shares } => {
vec![Transaction::SubstrateSign(SignData {
plan: id.id,
attempt: id.attempt,
label: Label::Share,
data: shares.into_iter().map(|share| share.to_vec()).collect(),
signed: Transaction::empty_signed(),
})]
}
#[allow(clippy::match_same_arms)] // Allowed to preserve layout
coordinator::ProcessorMessage::CosignedBlock { .. } => unreachable!(),
#[allow(clippy::match_same_arms)]
coordinator::ProcessorMessage::SignedSlashReport { .. } => unreachable!(),
},
ProcessorMessage::Substrate(inner_msg) => match inner_msg {
processor_messages::substrate::ProcessorMessage::Batch { .. } |
processor_messages::substrate::ProcessorMessage::SignedBatch { .. } => unreachable!(),
},
};
// If this created transactions, publish them
for mut tx in txs {
log::trace!("processor message effected transaction {} {:?}", hex::encode(tx.hash()), &tx);
match tx.kind() {
TransactionKind::Provided(_) => {
log::trace!("providing transaction {}", hex::encode(tx.hash()));
let res = tributary.provide_transaction(tx.clone()).await;
if !(res.is_ok() || (res == Err(ProvidedError::AlreadyProvided))) {
if res == Err(ProvidedError::LocalMismatchesOnChain) {
// Spin, since this is a crit for this Tributary
loop {
log::error!(
"{}. tributary: {}, provided: {:?}",
"tributary added distinct provided to delayed locally provided TX",
hex::encode(spec.genesis()),
&tx,
);
sleep(Duration::from_secs(60)).await;
}
}
panic!("provided an invalid transaction: {res:?}");
}
}
TransactionKind::Unsigned => {
log::trace!("publishing unsigned transaction {}", hex::encode(tx.hash()));
match tributary.add_transaction(tx.clone()).await {
Ok(_) => {}
Err(e) => panic!("created an invalid unsigned transaction: {e:?}"),
}
}
TransactionKind::Signed(_, _) => {
tx.sign(&mut OsRng, genesis, key);
tributary::publish_signed_transaction(&mut txn, tributary, tx).await;
}
}
}
}
HandledMessageDb::set(&mut txn, msg.network, &msg.id);
txn.commit();
true
}
#[allow(clippy::too_many_arguments)]
async fn handle_processor_messages<D: Db, Pro: Processors, P: P2p>(
mut db: D,
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
serai: Arc<Serai>,
processors: Pro,
p2p: P,
cosign_channel: mpsc::UnboundedSender<CosignedBlock>,
network: ExternalNetworkId,
mut tributary_event: mpsc::UnboundedReceiver<TributaryEvent<D, P>>,
) {
let mut tributaries = HashMap::new();
loop {
match tributary_event.try_recv() {
Ok(event) => match event {
TributaryEvent::NewTributary(tributary) => {
let set = tributary.spec.set();
assert_eq!(set.network, network);
tributaries.insert(set.session, tributary);
}
TributaryEvent::TributaryRetired(set) => {
tributaries.remove(&set.session);
}
},
Err(mpsc::error::TryRecvError::Empty) => {}
Err(mpsc::error::TryRecvError::Disconnected) => {
panic!("handle_processor_messages tributary_event sender closed")
}
}
// TODO: Check this ID is sane (last handled ID or expected next ID)
let Ok(msg) = tokio::time::timeout(Duration::from_secs(1), processors.recv(network)).await
else {
continue;
};
log::trace!("entering handle_processor_message for {:?}", network);
if handle_processor_message(
&mut db,
&key,
&serai,
&p2p,
&cosign_channel,
&tributaries,
network,
&msg,
)
.await
{
processors.ack(msg).await;
}
log::trace!("exited handle_processor_message for {:?}", network);
}
}
#[allow(clippy::too_many_arguments)]
async fn handle_cosigns_and_batch_publication<D: Db, P: P2p>(
mut db: D,
network: ExternalNetworkId,
mut tributary_event: mpsc::UnboundedReceiver<TributaryEvent<D, P>>,
) {
let mut tributaries = HashMap::new();
'outer: loop {
// TODO: Create a better async flow for this
tokio::time::sleep(core::time::Duration::from_millis(100)).await;
match tributary_event.try_recv() {
Ok(event) => match event {
TributaryEvent::NewTributary(tributary) => {
let set = tributary.spec.set();
assert_eq!(set.network, network);
tributaries.insert(set.session, tributary);
}
TributaryEvent::TributaryRetired(set) => {
tributaries.remove(&set.session);
}
},
Err(mpsc::error::TryRecvError::Empty) => {}
Err(mpsc::error::TryRecvError::Disconnected) => {
panic!("handle_processor_messages tributary_event sender closed")
}
}
// Handle pending cosigns
{
let mut txn = db.txn();
while let Some((session, block, hash)) = CosignTransactions::try_recv(&mut txn, network) {
let Some(ActiveTributary { spec, tributary }) = tributaries.get(&session) else {
log::warn!("didn't yet have tributary we're supposed to cosign with");
break;
};
log::info!(
"{network:?} {session:?} cosigning block #{block} (hash {}...)",
hex::encode(&hash[.. 8])
);
let tx = Transaction::CosignSubstrateBlock(hash);
let res = tributary.provide_transaction(tx.clone()).await;
if !(res.is_ok() || (res == Err(ProvidedError::AlreadyProvided))) {
if res == Err(ProvidedError::LocalMismatchesOnChain) {
// Spin, since this is a crit for this Tributary
loop {
log::error!(
"{}. tributary: {}, provided: {:?}",
"tributary added distinct CosignSubstrateBlock",
hex::encode(spec.genesis()),
&tx,
);
sleep(Duration::from_secs(60)).await;
}
}
panic!("provided an invalid CosignSubstrateBlock: {res:?}");
}
}
txn.commit();
}
// Verify any publifshed `Batch`s
{
let _hvq_lock = HANDOVER_VERIFY_QUEUE_LOCK.get_or_init(|| Mutex::new(())).lock().await;
let mut txn = db.txn();
let mut to_publish = vec![];
let start_id =
LastVerifiedBatchDb::get(&txn, network).map_or(0, |already_verified| already_verified + 1);
if let Some(last_id) =
substrate::verify_published_batches::<D>(&mut txn, network, u32::MAX).await
{
// Check if any of these `Batch`s were a handover `Batch` or the `Batch` before a handover
// `Batch`
// If so, we need to publish queued provided `Batch` transactions
for batch in start_id ..= last_id {
let is_pre_handover = LookupHandoverBatchDb::get(&txn, network, batch + 1);
if let Some(session) = is_pre_handover {
let set = ExternalValidatorSet { network, session };
let mut queued = QueuedBatchesDb::take(&mut txn, set);
// is_handover_batch is only set for handover `Batch`s we're participating in, making
// this safe
if queued.is_empty() {
panic!("knew the next Batch was a handover yet didn't queue it");
}
// Only publish the handover Batch
to_publish.push((set.session, queued.remove(0)));
// Re-queue the remaining batches
for remaining in queued {
QueuedBatchesDb::queue(&mut txn, set, &remaining);
}
}
let is_handover = LookupHandoverBatchDb::get(&txn, network, batch);
if let Some(session) = is_handover {
for queued in QueuedBatchesDb::take(&mut txn, ExternalValidatorSet { network, session })
{
to_publish.push((session, queued));
}
}
}
}
for (session, tx) in to_publish {
let Some(ActiveTributary { spec, tributary }) = tributaries.get(&session) else {
log::warn!("didn't yet have tributary we're supposed to provide a queued Batch for");
// Safe since this will drop the txn updating the most recently queued batch
continue 'outer;
};
log::debug!("providing Batch transaction {:?}", &tx);
let res = tributary.provide_transaction(tx.clone()).await;
if !(res.is_ok() || (res == Err(ProvidedError::AlreadyProvided))) {
if res == Err(ProvidedError::LocalMismatchesOnChain) {
// Spin, since this is a crit for this Tributary
loop {
log::error!(
"{}. tributary: {}, provided: {:?}",
"tributary added distinct Batch",
hex::encode(spec.genesis()),
&tx,
);
sleep(Duration::from_secs(60)).await;
}
}
panic!("provided an invalid Batch: {res:?}");
}
}
txn.commit();
}
}
}
pub async fn handle_processors<D: Db, Pro: Processors, P: P2p>(
db: D,
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
serai: Arc<Serai>,
processors: Pro,
p2p: P,
cosign_channel: mpsc::UnboundedSender<CosignedBlock>,
mut tributary_event: broadcast::Receiver<TributaryEvent<D, P>>,
) {
let mut channels = HashMap::new();
for network in serai_client::primitives::EXTERNAL_NETWORKS {
let (processor_send, processor_recv) = mpsc::unbounded_channel();
tokio::spawn(handle_processor_messages(
db.clone(),
key.clone(),
serai.clone(),
processors.clone(),
p2p.clone(),
cosign_channel.clone(),
network,
processor_recv,
));
let (cosign_send, cosign_recv) = mpsc::unbounded_channel();
tokio::spawn(handle_cosigns_and_batch_publication(db.clone(), network, cosign_recv));
channels.insert(network, (processor_send, cosign_send));
}
// Listen to new tributary events
loop {
match tributary_event.recv().await.unwrap() {
TributaryEvent::NewTributary(tributary) => {
let (c1, c2) = &channels[&tributary.spec.set().network];
c1.send(TributaryEvent::NewTributary(tributary.clone())).unwrap();
c2.send(TributaryEvent::NewTributary(tributary)).unwrap();
}
TributaryEvent::TributaryRetired(set) => {
let (c1, c2) = &channels[&set.network];
c1.send(TributaryEvent::TributaryRetired(set)).unwrap();
c2.send(TributaryEvent::TributaryRetired(set)).unwrap();
}
};
}
}
pub async fn run<D: Db, Pro: Processors, P: P2p>(
raw_db: D,
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
p2p: P,
processors: Pro,
serai: Arc<Serai>,
) {
let (new_tributary_spec_send, mut new_tributary_spec_recv) = mpsc::unbounded_channel();
// Reload active tributaries from the database
for spec in ActiveTributaryDb::active_tributaries(&raw_db).1 {
new_tributary_spec_send.send(spec).unwrap();
}
let (perform_slash_report_send, mut perform_slash_report_recv) = mpsc::unbounded_channel();
let (tributary_retired_send, mut tributary_retired_recv) = mpsc::unbounded_channel();
// Handle new Substrate blocks
tokio::spawn(crate::substrate::scan_task(
raw_db.clone(),
key.clone(),
processors.clone(),
serai.clone(),
new_tributary_spec_send,
perform_slash_report_send,
tributary_retired_send,
));
// Handle the Tributaries
// This should be large enough for an entire rotation of all tributaries
// If it's too small, the coordinator fail to boot, which is a decent sanity check
let (tributary_event, mut tributary_event_listener_1) = broadcast::channel(32);
let tributary_event_listener_2 = tributary_event.subscribe();
let tributary_event_listener_3 = tributary_event.subscribe();
let tributary_event_listener_4 = tributary_event.subscribe();
let tributary_event_listener_5 = tributary_event.subscribe();
// Emit TributaryEvent::TributaryRetired
tokio::spawn({
let tributary_event = tributary_event.clone();
async move {
loop {
let retired = tributary_retired_recv.recv().await.unwrap();
tributary_event.send(TributaryEvent::TributaryRetired(retired)).map_err(|_| ()).unwrap();
}
}
});
// Spawn a task to further add Tributaries as needed
tokio::spawn({
let raw_db = raw_db.clone();
let key = key.clone();
let processors = processors.clone();
let p2p = p2p.clone();
async move {
loop {
let spec = new_tributary_spec_recv.recv().await.unwrap();
// Uses an inner task as Tributary::new may take several seconds
tokio::spawn({
let raw_db = raw_db.clone();
let key = key.clone();
let processors = processors.clone();
let p2p = p2p.clone();
let tributary_event = tributary_event.clone();
async move {
add_tributary(raw_db, key, &processors, p2p, &tributary_event, spec).await;
}
});
}
}
});
// When we reach synchrony on an event requiring signing, send our preprocess for it
// TODO: Properly place this into the Tributary scanner, as it's a mess out here
let recognized_id = {
let raw_db = raw_db.clone();
let key = key.clone();
let specs = Arc::new(RwLock::new(HashMap::new()));
let tributaries = Arc::new(RwLock::new(HashMap::new()));
// Spawn a task to maintain a local view of the tributaries for whenever recognized_id is
// called
tokio::spawn({
let specs = specs.clone();
let tributaries = tributaries.clone();
let mut set_to_genesis = HashMap::new();
async move {
loop {
match tributary_event_listener_1.recv().await {
Ok(TributaryEvent::NewTributary(tributary)) => {
set_to_genesis.insert(tributary.spec.set(), tributary.spec.genesis());
tributaries.write().await.insert(tributary.spec.genesis(), tributary.tributary);
specs.write().await.insert(tributary.spec.set(), tributary.spec);
}
Ok(TributaryEvent::TributaryRetired(set)) => {
if let Some(genesis) = set_to_genesis.remove(&set) {
specs.write().await.remove(&set);
tributaries.write().await.remove(&genesis);
}
}
Err(broadcast::error::RecvError::Lagged(_)) => {
panic!("recognized_id lagged to handle tributary_event")
}
Err(broadcast::error::RecvError::Closed) => panic!("tributary_event sender closed"),
}
}
}
});
// Also spawn a task to handle slash reports, as this needs such a view of tributaries
tokio::spawn({
let mut raw_db = raw_db.clone();
let key = key.clone();
let tributaries = tributaries.clone();
async move {
'task_loop: loop {
match perform_slash_report_recv.recv().await {
Some(set) => {
let (genesis, validators) = loop {
let specs = specs.read().await;
let Some(spec) = specs.get(&set) else {
// If we don't have this Tributary because it's retired, break and move on
if RetiredTributaryDb::get(&raw_db, set).is_some() {
continue 'task_loop;
}
// This may happen if the task above is simply slow
log::warn!("tributary we don't have yet is supposed to perform a slash report");
continue;
};
break (spec.genesis(), spec.validators());
};
let mut slashes = vec![];
for (validator, _) in validators {
if validator == (<Ristretto as Ciphersuite>::generator() * key.deref()) {
continue;
}
let validator = validator.to_bytes();
let fatally = tributary::FatallySlashed::get(&raw_db, genesis, validator).is_some();
// TODO: Properly type this
let points = if fatally {
u32::MAX
} else {
tributary::SlashPoints::get(&raw_db, genesis, validator).unwrap_or(0)
};
slashes.push(points);
}
let mut tx = Transaction::SlashReport(slashes, Transaction::empty_signed());
tx.sign(&mut OsRng, genesis, &key);
let mut first = true;
loop {
if !first {
sleep(Duration::from_millis(100)).await;
}
first = false;
let tributaries = tributaries.read().await;
let Some(tributary) = tributaries.get(&genesis) else {
// If we don't have this Tributary because it's retired, break and move on
if RetiredTributaryDb::get(&raw_db, set).is_some() {
break;
}
// This may happen if the task above is simply slow
log::warn!("tributary we don't have yet is supposed to perform a slash report");
continue;
};
// This is safe to perform multiple times and solely needs atomicity with regards
// to itself
// TODO: Should this not take a txn accordingly? It's best practice to take a txn,
// yet taking a txn fails to declare its achieved independence
let mut txn = raw_db.txn();
tributary::publish_signed_transaction(&mut txn, tributary, tx).await;
txn.commit();
break;
}
}
None => panic!("perform slash report sender closed"),
}
}
}
});
move |set: ExternalValidatorSet, genesis, id_type, id: Vec<u8>| {
log::debug!("recognized ID {:?} {}", id_type, hex::encode(&id));
let mut raw_db = raw_db.clone();
let key = key.clone();
let tributaries = tributaries.clone();
async move {
// The transactions for these are fired before the preprocesses are actually
// received/saved, creating a race between Tributary ack and the availability of all
// Preprocesses
// This waits until the necessary preprocess is available 0,
let get_preprocess = |raw_db, id_type, id| async move {
loop {
let Some(preprocess) = FirstPreprocessDb::get(raw_db, set.network, id_type, id) else {
log::warn!("waiting for preprocess for recognized ID");
sleep(Duration::from_millis(100)).await;
continue;
};
return preprocess;
}
};
let mut tx = match id_type {
RecognizedIdType::Batch => Transaction::SubstrateSign(SignData {
data: get_preprocess(&raw_db, id_type, &id).await,
plan: SubstrateSignableId::Batch(u32::from_le_bytes(id.try_into().unwrap())),
label: Label::Preprocess,
attempt: 0,
signed: Transaction::empty_signed(),
}),
RecognizedIdType::Plan => Transaction::Sign(SignData {
data: get_preprocess(&raw_db, id_type, &id).await,
plan: id.try_into().unwrap(),
label: Label::Preprocess,
attempt: 0,
signed: Transaction::empty_signed(),
}),
};
tx.sign(&mut OsRng, genesis, &key);
let mut first = true;
loop {
if !first {
sleep(Duration::from_millis(100)).await;
}
first = false;
let tributaries = tributaries.read().await;
let Some(tributary) = tributaries.get(&genesis) else {
// If we don't have this Tributary because it's retired, break and move on
if RetiredTributaryDb::get(&raw_db, set).is_some() {
break;
}
// This may happen if the task above is simply slow
log::warn!("tributary we don't have yet came to consensus on an Batch");
continue;
};
// This is safe to perform multiple times and solely needs atomicity with regards to
// itself
// TODO: Should this not take a txn accordingly? It's best practice to take a txn, yet
// taking a txn fails to declare its achieved independence
let mut txn = raw_db.txn();
tributary::publish_signed_transaction(&mut txn, tributary, tx).await;
txn.commit();
break;
}
}
}
};
// Handle new blocks for each Tributary
{
let raw_db = raw_db.clone();
tokio::spawn(tributary::scanner::scan_tributaries_task(
raw_db,
key.clone(),
recognized_id,
processors.clone(),
serai.clone(),
tributary_event_listener_2,
));
}
// Spawn the heartbeat task, which will trigger syncing if there hasn't been a Tributary block
// in a while (presumably because we're behind)
tokio::spawn(p2p::heartbeat_tributaries_task(p2p.clone(), tributary_event_listener_3));
// Create the Cosign evaluator
let cosign_channel = CosignEvaluator::new(raw_db.clone(), p2p.clone(), serai.clone());
// Handle P2P messages
tokio::spawn(p2p::handle_p2p_task(
p2p.clone(),
cosign_channel.clone(),
tributary_event_listener_4,
));
// Handle all messages from processors
handle_processors(
raw_db,
key,
serai,
processors,
p2p,
cosign_channel,
tributary_event_listener_5,
)
.await;
}
#[tokio::main]
async fn main() {
// Override the panic handler with one which will panic if any tokio task panics
{
let existing = std::panic::take_hook();
std::panic::set_hook(Box::new(move |panic| {
existing(panic);
const MSG: &str = "exiting the process due to a task panicking";
println!("{MSG}");
log::error!("{MSG}");
std::process::exit(1);
}));
}
if std::env::var("RUST_LOG").is_err() {
std::env::set_var("RUST_LOG", serai_env::var("RUST_LOG").unwrap_or_else(|| "info".to_string()));
}
env_logger::init();
log::info!("starting coordinator service...");
#[allow(unused_variables, unreachable_code)]
let db = {
#[cfg(all(feature = "parity-db", feature = "rocksdb"))]
panic!("built with parity-db and rocksdb");
#[cfg(all(feature = "parity-db", not(feature = "rocksdb")))]
let db =
serai_db::new_parity_db(&serai_env::var("DB_PATH").expect("path to DB wasn't specified"));
#[cfg(feature = "rocksdb")]
let db =
serai_db::new_rocksdb(&serai_env::var("DB_PATH").expect("path to DB wasn't specified"));
db
};
let key = {
let mut key_hex = serai_env::var("SERAI_KEY").expect("Serai key wasn't provided");
let mut key_vec = hex::decode(&key_hex).map_err(|_| ()).expect("Serai key wasn't hex-encoded");
key_hex.zeroize();
if key_vec.len() != 32 {
key_vec.zeroize();
panic!("Serai key had an invalid length");
}
let mut key_bytes = [0; 32];
key_bytes.copy_from_slice(&key_vec);
key_vec.zeroize();
let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::from_repr(key_bytes).unwrap());
key_bytes.zeroize();
key
};
let processors = Arc::new(MessageQueue::from_env(Service::Coordinator));
let serai = (async {
loop {
let Ok(serai) = Serai::new(format!(
"http://{}:9944",
serai_env::var("SERAI_HOSTNAME").expect("Serai hostname wasn't provided")
))
.await
else {
log::error!("couldn't connect to the Serai node");
sleep(Duration::from_secs(5)).await;
continue;
};
log::info!("made initial connection to Serai node");
return Arc::new(serai);
}
})
.await;
let p2p = LibP2p::new(serai.clone());
run(db, key, p2p, processors, serai).await
}
================================================
FILE: coordinator/src/p2p.rs
================================================
use core::{time::Duration, fmt};
use std::{
sync::Arc,
io::{self, Read},
collections::{HashSet, HashMap},
time::{SystemTime, Instant},
};
use async_trait::async_trait;
use rand_core::{RngCore, OsRng};
use scale::{Decode, Encode};
use borsh::{BorshSerialize, BorshDeserialize};
use serai_client::{
primitives::ExternalNetworkId, validator_sets::primitives::ExternalValidatorSet, Serai,
};
use serai_db::Db;
use futures_util::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, StreamExt};
use tokio::{
sync::{Mutex, RwLock, mpsc, broadcast},
time::sleep,
};
use libp2p::{
core::multiaddr::{Protocol, Multiaddr},
identity::Keypair,
PeerId,
tcp::Config as TcpConfig,
noise, yamux,
request_response::{
Codec as RrCodecTrait, Message as RrMessage, Event as RrEvent, Config as RrConfig,
Behaviour as RrBehavior, ProtocolSupport,
},
gossipsub::{
IdentTopic, FastMessageId, MessageId, MessageAuthenticity, ValidationMode, ConfigBuilder,
IdentityTransform, AllowAllSubscriptionFilter, Event as GsEvent, PublishError,
Behaviour as GsBehavior,
},
swarm::{NetworkBehaviour, SwarmEvent},
SwarmBuilder,
};
pub(crate) use tributary::{ReadWrite, P2p as TributaryP2p};
use crate::{Transaction, Block, Tributary, ActiveTributary, TributaryEvent};
// Block size limit + 1 KB of space for signatures/metadata
const MAX_LIBP2P_GOSSIP_MESSAGE_SIZE: usize = tributary::BLOCK_SIZE_LIMIT + 1024;
const MAX_LIBP2P_REQRES_MESSAGE_SIZE: usize =
(tributary::BLOCK_SIZE_LIMIT * BLOCKS_PER_BATCH) + 1024;
const MAX_LIBP2P_MESSAGE_SIZE: usize = {
// Manual `max` since `max` isn't a const fn
if MAX_LIBP2P_GOSSIP_MESSAGE_SIZE > MAX_LIBP2P_REQRES_MESSAGE_SIZE {
MAX_LIBP2P_GOSSIP_MESSAGE_SIZE
} else {
MAX_LIBP2P_REQRES_MESSAGE_SIZE
}
};
const LIBP2P_TOPIC: &str = "serai-coordinator";
// Amount of blocks in a minute
const BLOCKS_PER_MINUTE: usize = (60 / (tributary::tendermint::TARGET_BLOCK_TIME / 1000)) as usize;
// Maximum amount of blocks to send in a batch
const BLOCKS_PER_BATCH: usize = BLOCKS_PER_MINUTE + 1;
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, BorshSerialize, BorshDeserialize)]
pub struct CosignedBlock {
pub network: ExternalNetworkId,
pub block_number: u64,
pub block: [u8; 32],
pub signature: [u8; 64],
}
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
pub enum ReqResMessageKind {
KeepAlive,
Heartbeat([u8; 32]),
Block([u8; 32]),
}
impl ReqResMessageKind {
pub fn read<R: Read>(reader: &mut R) -> Option<ReqResMessageKind> {
let mut kind = [0; 1];
reader.read_exact(&mut kind).ok()?;
match kind[0] {
0 => Some(ReqResMessageKind::KeepAlive),
1 => Some({
let mut genesis = [0; 32];
reader.read_exact(&mut genesis).ok()?;
ReqResMessageKind::Heartbeat(genesis)
}),
2 => Some({
let mut genesis = [0; 32];
reader.read_exact(&mut genesis).ok()?;
ReqResMessageKind::Block(genesis)
}),
_ => None,
}
}
pub fn serialize(&self) -> Vec<u8> {
match self {
ReqResMessageKind::KeepAlive => vec![0],
ReqResMessageKind::Heartbeat(genesis) => {
let mut res = vec![1];
res.extend(genesis);
res
}
ReqResMessageKind::Block(genesis) => {
let mut res = vec![2];
res.extend(genesis);
res
}
}
}
}
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
pub enum GossipMessageKind {
Tributary([u8; 32]),
CosignedBlock,
}
impl GossipMessageKind {
pub fn read<R: Read>(reader: &mut R) -> Option<GossipMessageKind> {
let mut kind = [0; 1];
reader.read_exact(&mut kind).ok()?;
match kind[0] {
0 => Some({
let mut genesis = [0; 32];
reader.read_exact(&mut genesis).ok()?;
GossipMessageKind::Tributary(genesis)
}),
1 => Some(GossipMessageKind::CosignedBlock),
_ => None,
}
}
pub fn serialize(&self) -> Vec<u8> {
match self {
GossipMessageKind::Tributary(genesis) => {
let mut res = vec![0];
res.extend(genesis);
res
}
GossipMessageKind::CosignedBlock => {
vec![1]
}
}
}
}
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
pub enum P2pMessageKind {
ReqRes(ReqResMessageKind),
Gossip(GossipMessageKind),
}
impl P2pMessageKind {
fn genesis(&self) -> Option<[u8; 32]> {
match self {
P2pMessageKind::ReqRes(ReqResMessageKind::KeepAlive) |
P2pMessageKind::Gossip(GossipMessageKind::CosignedBlock) => None,
P2pMessageKind::ReqRes(
ReqResMessageKind::Heartbeat(genesis) | ReqResMessageKind::Block(genesis),
) |
P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => Some(*genesis),
}
}
}
impl From<ReqResMessageKind> for P2pMessageKind {
fn from(kind: ReqResMessageKind) -> P2pMessageKind {
P2pMessageKind::ReqRes(kind)
}
}
impl From<GossipMessageKind> for P2pMessageKind {
fn from(kind: GossipMessageKind) -> P2pMessageKind {
P2pMessageKind::Gossip(kind)
}
}
#[derive(Clone, Debug)]
pub struct Message<P: P2p> {
pub sender: P::Id,
pub kind: P2pMessageKind,
pub msg: Vec<u8>,
}
#[derive(Clone, Debug, Encode, Decode)]
pub struct BlockCommit {
pub block: Vec<u8>,
pub commit: Vec<u8>,
}
#[derive(Clone, Debug, Encode, Decode)]
pub struct HeartbeatBatch {
pub blocks: Vec<BlockCommit>,
pub timestamp: u64,
}
#[async_trait]
pub trait P2p: Send + Sync + Clone + fmt::Debug + TributaryP2p {
type Id: Send + Sync + Clone + Copy + fmt::Debug;
async fn subscribe(&self, set: ExternalValidatorSet, genesis: [u8; 32]);
async fn unsubscribe(&self, set: ExternalValidatorSet, genesis: [u8; 32]);
async fn send_raw(&self, to: Self::Id, msg: Vec<u8>);
async fn broadcast_raw(&self, kind: P2pMessageKind, msg: Vec<u8>);
async fn receive(&self) -> Message<Self>;
async fn send(&self, to: Self::Id, kind: ReqResMessageKind, msg: Vec<u8>) {
let mut actual_msg = kind.serialize();
actual_msg.extend(msg);
self.send_raw(to, actual_msg).await;
}
async fn broadcast(&self, kind: impl Send + Into<P2pMessageKind>, msg: Vec<u8>) {
let kind = kind.into();
let mut actual_msg = match kind {
P2pMessageKind::ReqRes(kind) => kind.serialize(),
P2pMessageKind::Gossip(kind) => kind.serialize(),
};
actual_msg.extend(msg);
/*
log::trace!(
"broadcasting p2p message (kind {})",
match kind {
P2pMessageKind::KeepAlive => "KeepAlive".to_string(),
P2pMessageKind::Tributary(genesis) => format!("Tributary({})", hex::encode(genesis)),
P2pMessageKind::Heartbeat(genesis) => format!("Heartbeat({})", hex::encode(genesis)),
P2pMessageKind::Block(genesis) => format!("Block({})", hex::encode(genesis)),
P2pMessageKind::Cosigned
gitextract_j2hgunrb/
├── .gitattributes
├── .github/
│ ├── LICENSE
│ ├── actions/
│ │ ├── bitcoin/
│ │ │ └── action.yml
│ │ ├── build-dependencies/
│ │ │ └── action.yml
│ │ ├── monero/
│ │ │ └── action.yml
│ │ ├── monero-wallet-rpc/
│ │ │ └── action.yml
│ │ └── test-dependencies/
│ │ └── action.yml
│ ├── nightly-version
│ └── workflows/
│ ├── common-tests.yml
│ ├── coordinator-tests.yml
│ ├── crypto-tests.yml
│ ├── daily-deny.yml
│ ├── full-stack-tests.yml
│ ├── lint.yml
│ ├── message-queue-tests.yml
│ ├── mini-tests.yml
│ ├── monthly-nightly-update.yml
│ ├── networks-tests.yml
│ ├── no-std.yml
│ ├── pages.yml
│ ├── processor-tests.yml
│ ├── reproducible-runtime.yml
│ └── tests.yml
├── .gitignore
├── .rustfmt.toml
├── AGPL-3.0
├── CONTRIBUTING.md
├── Cargo.toml
├── LICENSE
├── README.md
├── audits/
│ ├── Cypher Stack crypto March 2023/
│ │ ├── LICENSE
│ │ └── README.md
│ └── Cypher Stack networks bitcoin August 2023/
│ ├── LICENSE
│ └── README.md
├── common/
│ ├── db/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ └── src/
│ │ ├── create_db.rs
│ │ ├── lib.rs
│ │ ├── mem.rs
│ │ ├── parity_db.rs
│ │ └── rocks.rs
│ ├── env/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ └── src/
│ │ └── lib.rs
│ ├── patchable-async-sleep/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── README.md
│ │ └── src/
│ │ └── lib.rs
│ ├── request/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── README.md
│ │ └── src/
│ │ ├── lib.rs
│ │ ├── request.rs
│ │ └── response.rs
│ ├── std-shims/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── README.md
│ │ └── src/
│ │ ├── collections.rs
│ │ ├── io.rs
│ │ ├── lib.rs
│ │ └── sync.rs
│ └── zalloc/
│ ├── Cargo.toml
│ ├── LICENSE
│ ├── build.rs
│ └── src/
│ └── lib.rs
├── coordinator/
│ ├── Cargo.toml
│ ├── LICENSE
│ ├── README.md
│ ├── src/
│ │ ├── cosign_evaluator.rs
│ │ ├── db.rs
│ │ ├── main.rs
│ │ ├── p2p.rs
│ │ ├── processors.rs
│ │ ├── substrate/
│ │ │ ├── cosign.rs
│ │ │ ├── db.rs
│ │ │ └── mod.rs
│ │ ├── tests/
│ │ │ ├── mod.rs
│ │ │ └── tributary/
│ │ │ ├── chain.rs
│ │ │ ├── dkg.rs
│ │ │ ├── handle_p2p.rs
│ │ │ ├── mod.rs
│ │ │ ├── sync.rs
│ │ │ └── tx.rs
│ │ └── tributary/
│ │ ├── db.rs
│ │ ├── handle.rs
│ │ ├── mod.rs
│ │ ├── scanner.rs
│ │ ├── signing_protocol.rs
│ │ ├── spec.rs
│ │ └── transaction.rs
│ └── tributary/
│ ├── Cargo.toml
│ ├── LICENSE
│ ├── README.md
│ ├── src/
│ │ ├── block.rs
│ │ ├── blockchain.rs
│ │ ├── lib.rs
│ │ ├── mempool.rs
│ │ ├── merkle.rs
│ │ ├── provided.rs
│ │ ├── tendermint/
│ │ │ ├── mod.rs
│ │ │ └── tx.rs
│ │ ├── tests/
│ │ │ ├── block.rs
│ │ │ ├── blockchain.rs
│ │ │ ├── mempool.rs
│ │ │ ├── merkle.rs
│ │ │ ├── mod.rs
│ │ │ ├── p2p.rs
│ │ │ ├── tendermint.rs
│ │ │ └── transaction/
│ │ │ ├── mod.rs
│ │ │ ├── signed.rs
│ │ │ └── tendermint.rs
│ │ └── transaction.rs
│ └── tendermint/
│ ├── Cargo.toml
│ ├── LICENSE
│ ├── README.md
│ ├── src/
│ │ ├── block.rs
│ │ ├── ext.rs
│ │ ├── lib.rs
│ │ ├── message_log.rs
│ │ ├── round.rs
│ │ └── time.rs
│ └── tests/
│ └── ext.rs
├── crypto/
│ ├── ciphersuite/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── kp256/
│ │ │ ├── Cargo.toml
│ │ │ ├── LICENSE
│ │ │ ├── README.md
│ │ │ └── src/
│ │ │ └── lib.rs
│ │ └── src/
│ │ ├── lib.md
│ │ └── lib.rs
│ ├── dalek-ff-group/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── README.md
│ │ └── src/
│ │ ├── ciphersuite.rs
│ │ ├── field.rs
│ │ └── lib.rs
│ ├── dkg/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── dealer/
│ │ │ ├── Cargo.toml
│ │ │ ├── LICENSE
│ │ │ ├── README.md
│ │ │ └── src/
│ │ │ └── lib.rs
│ │ ├── musig/
│ │ │ ├── Cargo.toml
│ │ │ ├── LICENSE
│ │ │ ├── README.md
│ │ │ └── src/
│ │ │ ├── lib.rs
│ │ │ └── tests.rs
│ │ ├── pedpop/
│ │ │ ├── Cargo.toml
│ │ │ ├── LICENSE
│ │ │ ├── README.md
│ │ │ └── src/
│ │ │ ├── encryption.rs
│ │ │ ├── lib.rs
│ │ │ └── tests.rs
│ │ ├── promote/
│ │ │ ├── Cargo.toml
│ │ │ ├── LICENSE
│ │ │ ├── README.md
│ │ │ └── src/
│ │ │ ├── lib.rs
│ │ │ └── tests.rs
│ │ ├── recovery/
│ │ │ ├── Cargo.toml
│ │ │ ├── LICENSE
│ │ │ ├── README.md
│ │ │ └── src/
│ │ │ └── lib.rs
│ │ └── src/
│ │ └── lib.rs
│ ├── dleq/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── README.md
│ │ └── src/
│ │ ├── cross_group/
│ │ │ ├── aos.rs
│ │ │ ├── bits.rs
│ │ │ ├── mod.rs
│ │ │ ├── scalar.rs
│ │ │ └── schnorr.rs
│ │ ├── lib.rs
│ │ └── tests/
│ │ ├── cross_group/
│ │ │ ├── aos.rs
│ │ │ ├── mod.rs
│ │ │ ├── scalar.rs
│ │ │ └── schnorr.rs
│ │ └── mod.rs
│ ├── ed448/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── README.md
│ │ └── src/
│ │ ├── backend.rs
│ │ ├── ciphersuite.rs
│ │ ├── field.rs
│ │ ├── lib.rs
│ │ ├── point.rs
│ │ └── scalar.rs
│ ├── ff-group-tests/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── README.md
│ │ └── src/
│ │ ├── field.rs
│ │ ├── group.rs
│ │ ├── lib.rs
│ │ └── prime_field.rs
│ ├── frost/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── README.md
│ │ └── src/
│ │ ├── algorithm.rs
│ │ ├── curve/
│ │ │ ├── dalek.rs
│ │ │ ├── ed448.rs
│ │ │ ├── kp256.rs
│ │ │ └── mod.rs
│ │ ├── lib.rs
│ │ ├── nonce.rs
│ │ ├── sign.rs
│ │ └── tests/
│ │ ├── literal/
│ │ │ ├── dalek.rs
│ │ │ ├── ed448.rs
│ │ │ ├── kp256.rs
│ │ │ ├── mod.rs
│ │ │ └── vectors/
│ │ │ ├── frost-ed25519-sha512.json
│ │ │ ├── frost-ed448-shake256.json
│ │ │ ├── frost-p256-sha256.json
│ │ │ ├── frost-ristretto255-sha512.json
│ │ │ └── frost-secp256k1-sha256.json
│ │ ├── mod.rs
│ │ ├── nonces.rs
│ │ └── vectors.rs
│ ├── multiexp/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── README.md
│ │ └── src/
│ │ ├── batch.rs
│ │ ├── lib.rs
│ │ ├── pippenger.rs
│ │ ├── straus.rs
│ │ └── tests/
│ │ ├── batch.rs
│ │ └── mod.rs
│ ├── schnorr/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── README.md
│ │ └── src/
│ │ ├── aggregate.rs
│ │ ├── lib.rs
│ │ └── tests/
│ │ ├── mod.rs
│ │ └── rfc8032.rs
│ ├── schnorrkel/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── README.md
│ │ └── src/
│ │ ├── lib.rs
│ │ └── tests.rs
│ └── transcript/
│ ├── Cargo.toml
│ ├── LICENSE
│ ├── README.md
│ └── src/
│ ├── lib.rs
│ ├── merlin.rs
│ └── tests.rs
├── deny.toml
├── docs/
│ ├── .gitignore
│ ├── .ruby-version
│ ├── Gemfile
│ ├── _config.yml
│ ├── amm/
│ │ └── index.md
│ ├── cross_chain/
│ │ └── index.md
│ ├── economics/
│ │ ├── genesis.md
│ │ ├── index.md
│ │ ├── post.md
│ │ └── pre.md
│ ├── index.md
│ ├── infrastructure/
│ │ ├── coordinator.md
│ │ ├── index.md
│ │ ├── message_queue.md
│ │ ├── processor.md
│ │ └── serai.md
│ ├── integrating/
│ │ └── index.md
│ ├── protocol_changes/
│ │ └── index.md
│ └── validator/
│ └── index.md
├── message-queue/
│ ├── Cargo.toml
│ ├── LICENSE
│ ├── README.md
│ └── src/
│ ├── client.rs
│ ├── lib.rs
│ ├── main.rs
│ ├── messages.rs
│ └── queue.rs
├── mini/
│ ├── Cargo.toml
│ ├── LICENSE
│ ├── README.md
│ └── src/
│ ├── lib.rs
│ └── tests/
│ ├── activation_race/
│ │ └── mod.rs
│ └── mod.rs
├── networks/
│ ├── bitcoin/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── src/
│ │ │ ├── crypto.rs
│ │ │ ├── lib.rs
│ │ │ ├── rpc.rs
│ │ │ ├── tests/
│ │ │ │ ├── crypto.rs
│ │ │ │ └── mod.rs
│ │ │ └── wallet/
│ │ │ ├── mod.rs
│ │ │ └── send.rs
│ │ └── tests/
│ │ ├── rpc.rs
│ │ ├── runner.rs
│ │ └── wallet.rs
│ └── ethereum/
│ ├── .gitignore
│ ├── Cargo.toml
│ ├── LICENSE
│ ├── README.md
│ ├── alloy-simple-request-transport/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── README.md
│ │ └── src/
│ │ └── lib.rs
│ ├── build.rs
│ ├── contracts/
│ │ ├── Deployer.sol
│ │ ├── IERC20.sol
│ │ ├── Router.sol
│ │ ├── Sandbox.sol
│ │ └── Schnorr.sol
│ ├── relayer/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── README.md
│ │ └── src/
│ │ └── main.rs
│ └── src/
│ ├── abi/
│ │ └── mod.rs
│ ├── crypto.rs
│ ├── deployer.rs
│ ├── erc20.rs
│ ├── lib.rs
│ ├── machine.rs
│ ├── router.rs
│ └── tests/
│ ├── abi/
│ │ └── mod.rs
│ ├── contracts/
│ │ ├── ERC20.sol
│ │ └── Schnorr.sol
│ ├── crypto.rs
│ ├── mod.rs
│ ├── router.rs
│ └── schnorr.rs
├── orchestration/
│ ├── Cargo.toml
│ ├── README.md
│ ├── dev/
│ │ ├── coordinator/
│ │ │ └── .folder
│ │ ├── message-queue/
│ │ │ └── .folder
│ │ ├── networks/
│ │ │ ├── bitcoin/
│ │ │ │ └── run.sh
│ │ │ ├── ethereum/
│ │ │ │ └── run.sh
│ │ │ ├── ethereum-relayer/
│ │ │ │ └── .folder
│ │ │ ├── monero/
│ │ │ │ ├── hashes-v0.18.3.4.txt
│ │ │ │ └── run.sh
│ │ │ └── monero-wallet-rpc/
│ │ │ └── run.sh
│ │ ├── processor/
│ │ │ ├── bitcoin/
│ │ │ │ └── .folder
│ │ │ ├── ethereum/
│ │ │ │ └── .folder
│ │ │ └── monero/
│ │ │ └── .folder
│ │ └── serai/
│ │ └── run.sh
│ ├── runtime/
│ │ └── Dockerfile
│ ├── src/
│ │ ├── coordinator.rs
│ │ ├── docker.rs
│ │ ├── ethereum_relayer.rs
│ │ ├── main.rs
│ │ ├── message_queue.rs
│ │ ├── mimalloc.rs
│ │ ├── networks/
│ │ │ ├── bitcoin.rs
│ │ │ ├── ethereum/
│ │ │ │ ├── consensus/
│ │ │ │ │ ├── lighthouse.rs
│ │ │ │ │ ├── mod.rs
│ │ │ │ │ └── nimbus.rs
│ │ │ │ ├── execution/
│ │ │ │ │ ├── anvil.rs
│ │ │ │ │ ├── mod.rs
│ │ │ │ │ └── reth.rs
│ │ │ │ └── mod.rs
│ │ │ ├── mod.rs
│ │ │ └── monero.rs
│ │ ├── processor.rs
│ │ └── serai.rs
│ └── testnet/
│ ├── coordinator/
│ │ └── .folder
│ ├── message-queue/
│ │ └── .folder
│ ├── networks/
│ │ ├── bitcoin/
│ │ │ └── run.sh
│ │ ├── ethereum/
│ │ │ ├── consensus/
│ │ │ │ ├── lighthouse/
│ │ │ │ │ └── run.sh
│ │ │ │ └── nimbus/
│ │ │ │ └── run.sh
│ │ │ ├── execution/
│ │ │ │ ├── geth/
│ │ │ │ │ └── run.sh
│ │ │ │ └── reth/
│ │ │ │ └── run.sh
│ │ │ └── run.sh
│ │ ├── ethereum-relayer/
│ │ │ └── .folder
│ │ └── monero/
│ │ ├── hashes-v0.18.3.4.txt
│ │ └── run.sh
│ ├── processor/
│ │ ├── bitcoin/
│ │ │ └── .folder
│ │ ├── ethereum/
│ │ │ └── .folder
│ │ └── monero/
│ │ └── .folder
│ └── serai/
│ └── run.sh
├── patches/
│ ├── directories-next/
│ │ ├── Cargo.toml
│ │ └── src/
│ │ └── lib.rs
│ ├── home/
│ │ ├── Cargo.toml
│ │ └── src/
│ │ └── lib.rs
│ ├── matches/
│ │ ├── Cargo.toml
│ │ └── src/
│ │ └── lib.rs
│ └── option-ext/
│ ├── Cargo.toml
│ └── src/
│ └── lib.rs
├── processor/
│ ├── Cargo.toml
│ ├── LICENSE
│ ├── README.md
│ ├── messages/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ └── src/
│ │ └── lib.rs
│ └── src/
│ ├── additional_key.rs
│ ├── batch_signer.rs
│ ├── coordinator.rs
│ ├── cosigner.rs
│ ├── db.rs
│ ├── key_gen.rs
│ ├── lib.rs
│ ├── main.rs
│ ├── multisigs/
│ │ ├── db.rs
│ │ ├── mod.rs
│ │ ├── scanner.rs
│ │ └── scheduler/
│ │ ├── mod.rs
│ │ ├── smart_contract.rs
│ │ └── utxo.rs
│ ├── networks/
│ │ ├── bitcoin.rs
│ │ ├── ethereum.rs
│ │ ├── mod.rs
│ │ └── monero.rs
│ ├── plan.rs
│ ├── signer.rs
│ ├── slash_report_signer.rs
│ └── tests/
│ ├── addresses.rs
│ ├── batch_signer.rs
│ ├── cosigner.rs
│ ├── key_gen.rs
│ ├── literal/
│ │ └── mod.rs
│ ├── mod.rs
│ ├── scanner.rs
│ ├── signer.rs
│ └── wallet.rs
├── rust-toolchain.toml
├── spec/
│ ├── DKG Exclusions.md
│ ├── Getting Started.md
│ ├── Serai.md
│ ├── coordinator/
│ │ ├── Coordinator.md
│ │ └── Tributary.md
│ ├── cryptography/
│ │ ├── Distributed Key Generation.md
│ │ └── FROST.md
│ ├── integrations/
│ │ ├── Bitcoin.md
│ │ ├── Ethereum.md
│ │ ├── Instructions.md
│ │ └── Monero.md
│ ├── policy/
│ │ └── Canonical Chain.md
│ ├── processor/
│ │ ├── Multisig Rotation.md
│ │ ├── Processor.md
│ │ ├── Scanning.md
│ │ └── UTXO Management.md
│ └── protocol/
│ ├── Constants.md
│ ├── In Instructions.md
│ └── Validator Sets.md
├── substrate/
│ ├── abi/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ └── src/
│ │ ├── babe.rs
│ │ ├── coins.rs
│ │ ├── dex.rs
│ │ ├── economic_security.rs
│ │ ├── emissions.rs
│ │ ├── genesis_liquidity.rs
│ │ ├── grandpa.rs
│ │ ├── in_instructions.rs
│ │ ├── lib.rs
│ │ ├── liquidity_tokens.rs
│ │ ├── signals.rs
│ │ ├── system.rs
│ │ ├── timestamp.rs
│ │ ├── tx.rs
│ │ └── validator_sets.rs
│ ├── client/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── src/
│ │ │ ├── lib.rs
│ │ │ ├── networks/
│ │ │ │ ├── bitcoin.rs
│ │ │ │ ├── mod.rs
│ │ │ │ └── monero.rs
│ │ │ ├── serai/
│ │ │ │ ├── coins.rs
│ │ │ │ ├── dex.rs
│ │ │ │ ├── genesis_liquidity.rs
│ │ │ │ ├── in_instructions.rs
│ │ │ │ ├── liquidity_tokens.rs
│ │ │ │ ├── mod.rs
│ │ │ │ └── validator_sets.rs
│ │ │ └── tests/
│ │ │ ├── mod.rs
│ │ │ └── networks/
│ │ │ ├── bitcoin.rs
│ │ │ ├── mod.rs
│ │ │ └── monero.rs
│ │ └── tests/
│ │ ├── batch.rs
│ │ ├── burn.rs
│ │ ├── common/
│ │ │ ├── dex.rs
│ │ │ ├── genesis_liquidity.rs
│ │ │ ├── in_instructions.rs
│ │ │ ├── mod.rs
│ │ │ ├── tx.rs
│ │ │ └── validator_sets.rs
│ │ ├── dex.rs
│ │ ├── dht.rs
│ │ ├── emissions.rs
│ │ ├── genesis_liquidity.rs
│ │ ├── time.rs
│ │ └── validator_sets.rs
│ ├── coins/
│ │ ├── pallet/
│ │ │ ├── Cargo.toml
│ │ │ ├── LICENSE
│ │ │ └── src/
│ │ │ ├── lib.rs
│ │ │ ├── mock.rs
│ │ │ └── tests.rs
│ │ └── primitives/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ └── src/
│ │ └── lib.rs
│ ├── dex/
│ │ └── pallet/
│ │ ├── Cargo.toml
│ │ ├── LICENSE-AGPL3
│ │ ├── LICENSE-APACHE2
│ │ └── src/
│ │ ├── benchmarking.rs
│ │ ├── lib.rs
│ │ ├── mock.rs
│ │ ├── tests.rs
│ │ ├── types.rs
│ │ └── weights.rs
│ ├── economic-security/
│ │ └── pallet/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ └── src/
│ │ └── lib.rs
│ ├── emissions/
│ │ ├── pallet/
│ │ │ ├── Cargo.toml
│ │ │ ├── LICENSE
│ │ │ └── src/
│ │ │ └── lib.rs
│ │ └── primitives/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ └── src/
│ │ └── lib.rs
│ ├── genesis-liquidity/
│ │ ├── pallet/
│ │ │ ├── Cargo.toml
│ │ │ ├── LICENSE
│ │ │ └── src/
│ │ │ └── lib.rs
│ │ └── primitives/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ └── src/
│ │ └── lib.rs
│ ├── in-instructions/
│ │ ├── pallet/
│ │ │ ├── Cargo.toml
│ │ │ ├── LICENSE
│ │ │ └── src/
│ │ │ └── lib.rs
│ │ └── primitives/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ └── src/
│ │ ├── lib.rs
│ │ └── shorthand.rs
│ ├── node/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── build.rs
│ │ └── src/
│ │ ├── chain_spec.rs
│ │ ├── cli.rs
│ │ ├── command.rs
│ │ ├── keystore.rs
│ │ ├── main.rs
│ │ ├── rpc.rs
│ │ └── service.rs
│ ├── primitives/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ └── src/
│ │ ├── account.rs
│ │ ├── amount.rs
│ │ ├── balance.rs
│ │ ├── block.rs
│ │ ├── constants.rs
│ │ ├── lib.rs
│ │ └── networks.rs
│ ├── runtime/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ ├── build.rs
│ │ └── src/
│ │ ├── abi.rs
│ │ └── lib.rs
│ ├── signals/
│ │ ├── pallet/
│ │ │ ├── Cargo.toml
│ │ │ ├── LICENSE
│ │ │ └── src/
│ │ │ └── lib.rs
│ │ └── primitives/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ └── src/
│ │ └── lib.rs
│ └── validator-sets/
│ ├── pallet/
│ │ ├── Cargo.toml
│ │ ├── LICENSE
│ │ └── src/
│ │ └── lib.rs
│ └── primitives/
│ ├── Cargo.toml
│ ├── LICENSE
│ └── src/
│ └── lib.rs
└── tests/
├── coordinator/
│ ├── Cargo.toml
│ ├── LICENSE
│ └── src/
│ ├── lib.rs
│ └── tests/
│ ├── batch.rs
│ ├── key_gen.rs
│ ├── mod.rs
│ ├── rotation.rs
│ └── sign.rs
├── docker/
│ ├── Cargo.toml
│ ├── LICENSE
│ ├── README.md
│ └── src/
│ └── lib.rs
├── full-stack/
│ ├── Cargo.toml
│ ├── LICENSE
│ └── src/
│ ├── lib.rs
│ └── tests/
│ ├── mint_and_burn.rs
│ └── mod.rs
├── message-queue/
│ ├── Cargo.toml
│ ├── LICENSE
│ └── src/
│ └── lib.rs
├── no-std/
│ ├── Cargo.toml
│ ├── LICENSE
│ ├── README.md
│ └── src/
│ └── lib.rs
├── processor/
│ ├── Cargo.toml
│ ├── LICENSE
│ └── src/
│ ├── lib.rs
│ ├── networks.rs
│ └── tests/
│ ├── batch.rs
│ ├── key_gen.rs
│ ├── mod.rs
│ └── send.rs
└── reproducible-runtime/
├── Cargo.toml
├── LICENSE
└── src/
└── lib.rs
Showing preview only (273K chars total). Download the full file or copy to clipboard to get everything.
SYMBOL INDEX (3271 symbols across 285 files)
FILE: common/db/src/create_db.rs
function serai_db_key (line 2) | pub fn serai_db_key(
FILE: common/db/src/lib.rs
type Get (line 18) | pub trait Get {
method get (line 19) | fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>>;
type DbTxn (line 24) | pub trait DbTxn: Send + Get {
method put (line 25) | fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>);
method del (line 26) | fn del(&mut self, key: impl AsRef<[u8]>);
method commit (line 27) | fn commit(self);
type Db (line 31) | pub trait Db: 'static + Send + Sync + Clone + Get {
method key (line 33) | fn key(db_dst: &'static [u8], item_dst: &'static [u8], key: impl AsRef...
method txn (line 38) | fn txn(&mut self) -> Self::Transaction<'_>;
FILE: common/db/src/mem.rs
type MemDbTxn (line 12) | pub struct MemDbTxn<'a>(&'a MemDb, HashMap<Vec<u8>, Vec<u8>>, HashSet<Ve...
method get (line 15) | fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
method put (line 27) | fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) {
method del (line 31) | fn del(&mut self, key: impl AsRef<[u8]>) {
method commit (line 35) | fn commit(mut self) {
type MemDb (line 48) | pub struct MemDb(Arc<RwLock<HashMap<Vec<u8>, Vec<u8>>>>);
method new (line 65) | pub fn new() -> MemDb {
method eq (line 51) | fn eq(&self, other: &MemDb) -> bool {
method default (line 58) | fn default() -> MemDb {
method get (line 71) | fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
type Transaction (line 76) | type Transaction<'a> = MemDbTxn<'a>;
method txn (line 77) | fn txn(&mut self) -> MemDbTxn<'_> {
FILE: common/db/src/parity_db.rs
type Transaction (line 8) | pub struct Transaction<'a>(&'a Arc<ParityDb>, Vec<(u8, Vec<u8>, Option<V...
method get (line 11) | fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
method put (line 22) | fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) {
method del (line 25) | fn del(&mut self, key: impl AsRef<[u8]>) {
method commit (line 28) | fn commit(self) {
method get (line 34) | fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
type Transaction (line 39) | type Transaction<'a> = Transaction<'a>;
method txn (line 40) | fn txn(&mut self) -> Self::Transaction<'_> {
function new_parity_db (line 45) | pub fn new_parity_db(path: &str) -> Arc<ParityDb> {
FILE: common/db/src/rocks.rs
type Transaction (line 11) | pub struct Transaction<'a, T: ThreadMode>(
method get (line 17) | fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
method put (line 22) | fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) {
method del (line 25) | fn del(&mut self, key: impl AsRef<[u8]>) {
method commit (line 28) | fn commit(self) {
method get (line 36) | fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
type Transaction (line 41) | type Transaction<'a> = Transaction<'a, T>;
method txn (line 42) | fn txn(&mut self) -> Self::Transaction<'_> {
type RocksDB (line 49) | pub type RocksDB = Arc<OptimisticTransactionDB<SingleThreaded>>;
function new_rocksdb (line 50) | pub fn new_rocksdb(path: &str) -> RocksDB {
FILE: common/env/src/lib.rs
function var (line 4) | pub fn var(variable: &str) -> Option<String> {
FILE: common/patchable-async-sleep/src/lib.rs
function sleep (line 8) | pub fn sleep(duration: Duration) -> impl core::future::Future<Output = (...
FILE: common/request/src/lib.rs
type Error (line 25) | pub enum Error {
type Connector (line 35) | type Connector = HttpConnector;
type Connector (line 37) | type Connector = HttpsConnector<HttpConnector>;
type Connection (line 40) | enum Connection {
type Client (line 50) | pub struct Client {
method connector (line 55) | fn connector() -> Connector {
method with_connection_pool (line 72) | pub fn with_connection_pool() -> Client {
method without_connection_pool (line 82) | pub fn without_connection_pool(host: &str) -> Result<Client, Error> {
method request (line 98) | pub async fn request<R: Into<Request>>(&self, request: R) -> Result<Re...
FILE: common/request/src/request.rs
type Request (line 10) | pub struct Request(pub(crate) hyper::Request<Full<Bytes>>);
method username_password_from_uri (line 13) | fn username_password_from_uri(&self) -> Result<(String, String), Error> {
method basic_auth (line 32) | pub fn basic_auth(&mut self, username: &str, password: &str) {
method basic_auth_from_uri (line 47) | pub fn basic_auth_from_uri(&mut self) -> Result<(), Error> {
method with_basic_auth (line 59) | pub fn with_basic_auth(&mut self) {
method from (line 64) | fn from(request: hyper::Request<Full<Bytes>>) -> Request {
FILE: common/request/src/response.rs
type Response (line 13) | pub struct Response<'a>(pub(crate) hyper::Response<Incoming>, pub(crate)...
function status (line 15) | pub fn status(&self) -> StatusCode {
function headers (line 18) | pub fn headers(&self) -> &HeaderMap<HeaderValue> {
function body (line 21) | pub async fn body(self) -> Result<impl std::io::Read, Error> {
FILE: common/std-shims/src/io.rs
type ErrorKind (line 10) | pub enum ErrorKind {
type Error (line 15) | pub struct Error {
method new (line 27) | pub fn new<E: 'static + Send + Sync>(kind: ErrorKind, error: E) -> Err...
method other (line 31) | pub fn other<E: 'static + Send + Sync>(error: E) -> Error {
method kind (line 35) | pub fn kind(&self) -> ErrorKind {
method into_inner (line 39) | pub fn into_inner(self) -> Option<Box<dyn Send + Sync>> {
method fmt (line 21) | fn fmt(&self, fmt: &mut Formatter<'_>) -> core::result::Result<(), core:...
type Result (line 44) | pub type Result<T> = core::result::Result<T, Error>;
type Read (line 46) | pub trait Read {
method read (line 47) | fn read(&mut self, buf: &mut [u8]) -> Result<usize>;
method read_exact (line 49) | fn read_exact(&mut self, buf: &mut [u8]) -> Result<()> {
method read (line 59) | fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
type BufRead (line 67) | pub trait BufRead: Read {
method fill_buf (line 68) | fn fill_buf(&mut self) -> Result<&[u8]>;
method consume (line 69) | fn consume(&mut self, amt: usize);
method fill_buf (line 73) | fn fill_buf(&mut self) -> Result<&[u8]> {
method consume (line 76) | fn consume(&mut self, amt: usize) {
type Write (line 81) | pub trait Write {
method write (line 82) | fn write(&mut self, buf: &[u8]) -> Result<usize>;
method write_all (line 83) | fn write_all(&mut self, buf: &[u8]) -> Result<()> {
method write (line 92) | fn write(&mut self, buf: &[u8]) -> Result<usize> {
FILE: common/std-shims/src/lib.rs
type StdShimsDivCeil (line 18) | pub trait StdShimsDivCeil {
method div_ceil (line 19) | fn div_ceil(self, rhs: Self) -> Self;
method div_ceil (line 25) | fn div_ceil(self, rhs: Self) -> Self {
method div_ceil (line 30) | fn div_ceil(self, rhs: Self) -> Self {
method div_ceil (line 35) | fn div_ceil(self, rhs: Self) -> Self {
method div_ceil (line 40) | fn div_ceil(self, rhs: Self) -> Self {
method div_ceil (line 45) | fn div_ceil(self, rhs: Self) -> Self {
method div_ceil (line 50) | fn div_ceil(self, rhs: Self) -> Self {
type StdShimsIoErrorOther (line 59) | pub trait StdShimsIoErrorOther {
method other (line 60) | fn other<E>(error: E) -> Self
method other (line 67) | fn other<E>(error: E) -> Self
FILE: common/std-shims/src/sync.rs
type ShimMutex (line 11) | pub struct ShimMutex<T>(Mutex<T>);
function new (line 13) | pub const fn new(value: T) -> Self {
function lock (line 17) | pub fn lock(&self) -> MutexGuard<'_, T> {
FILE: common/zalloc/build.rs
function main (line 2) | fn main() {
function main (line 8) | fn main() {
FILE: common/zalloc/src/lib.rs
type ZeroizingAlloc (line 18) | pub struct ZeroizingAlloc<T>(pub T);
method allocate (line 27) | fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
method deallocate (line 31) | unsafe fn deallocate(&self, mut ptr: NonNull<u8>, layout: Layout) {
method alloc (line 38) | unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
method dealloc (line 42) | unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
FILE: coordinator/src/cosign_evaluator.rs
type CosignEvaluator (line 37) | pub struct CosignEvaluator<D: Db> {
function update_latest_cosign (line 45) | async fn update_latest_cosign(&self) {
function update_stakes (line 78) | async fn update_stakes(&self) -> Result<(), SeraiError> {
function handle_new_cosign (line 107) | async fn handle_new_cosign(&self, cosign: CosignedBlock) -> Result<(), S...
function new (line 269) | pub fn new<P: P2p>(db: D, p2p: P, serai: Arc<Serai>) -> mpsc::UnboundedS...
FILE: coordinator/src/db.rs
method active_tributaries (line 40) | pub fn active_tributaries<G: Get>(getter: &G) -> (Vec<u8>, Vec<Tributary...
method add_participating_in_tributary (line 52) | pub fn add_participating_in_tributary(txn: &mut impl DbTxn, spec: &Tribu...
method retire_tributary (line 64) | pub fn retire_tributary(txn: &mut impl DbTxn, set: ExternalValidatorSet) {
method save_first_preprocess (line 83) | pub fn save_first_preprocess(
method save_expected_batch (line 99) | pub fn save_expected_batch(txn: &mut impl DbTxn, batch: &Batch) {
method set_handover_batch (line 111) | pub fn set_handover_batch(txn: &mut impl DbTxn, set: ExternalValidatorSe...
method queue (line 117) | pub fn queue(txn: &mut impl DbTxn, set: ExternalValidatorSet, batch: &Tr...
method take (line 123) | pub fn take(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Vec<Tran...
FILE: coordinator/src/main.rs
type ActiveTributary (line 77) | pub struct ActiveTributary<D: Db, P: P2p> {
type TributaryEvent (line 83) | pub enum TributaryEvent<D: Db, P: P2p> {
function add_tributary (line 89) | async fn add_tributary<D: Db, Pro: Processors, P: P2p>(
function handle_processor_message (line 144) | async fn handle_processor_message<D: Db, P: P2p>(
function handle_processor_messages (line 785) | async fn handle_processor_messages<D: Db, Pro: Processors, P: P2p>(
function handle_cosigns_and_batch_publication (line 839) | async fn handle_cosigns_and_batch_publication<D: Db, P: P2p>(
function handle_processors (line 971) | pub async fn handle_processors<D: Db, Pro: Processors, P: P2p>(
function run (line 1015) | pub async fn run<D: Db, Pro: Processors, P: P2p>(
function main (line 1316) | async fn main() {
FILE: coordinator/src/p2p.rs
constant MAX_LIBP2P_GOSSIP_MESSAGE_SIZE (line 50) | const MAX_LIBP2P_GOSSIP_MESSAGE_SIZE: usize = tributary::BLOCK_SIZE_LIMI...
constant MAX_LIBP2P_REQRES_MESSAGE_SIZE (line 52) | const MAX_LIBP2P_REQRES_MESSAGE_SIZE: usize =
constant MAX_LIBP2P_MESSAGE_SIZE (line 55) | const MAX_LIBP2P_MESSAGE_SIZE: usize = {
constant LIBP2P_TOPIC (line 64) | const LIBP2P_TOPIC: &str = "serai-coordinator";
constant BLOCKS_PER_MINUTE (line 67) | const BLOCKS_PER_MINUTE: usize = (60 / (tributary::tendermint::TARGET_BL...
constant BLOCKS_PER_BATCH (line 70) | const BLOCKS_PER_BATCH: usize = BLOCKS_PER_MINUTE + 1;
type CosignedBlock (line 73) | pub struct CosignedBlock {
type ReqResMessageKind (line 81) | pub enum ReqResMessageKind {
method read (line 88) | pub fn read<R: Read>(reader: &mut R) -> Option<ReqResMessageKind> {
method serialize (line 107) | pub fn serialize(&self) -> Vec<u8> {
type GossipMessageKind (line 125) | pub enum GossipMessageKind {
method read (line 131) | pub fn read<R: Read>(reader: &mut R) -> Option<GossipMessageKind> {
method serialize (line 145) | pub fn serialize(&self) -> Vec<u8> {
type P2pMessageKind (line 160) | pub enum P2pMessageKind {
method genesis (line 166) | fn genesis(&self) -> Option<[u8; 32]> {
method from (line 179) | fn from(kind: ReqResMessageKind) -> P2pMessageKind {
method from (line 185) | fn from(kind: GossipMessageKind) -> P2pMessageKind {
type Message (line 191) | pub struct Message<P: P2p> {
type BlockCommit (line 198) | pub struct BlockCommit {
type HeartbeatBatch (line 204) | pub struct HeartbeatBatch {
type P2p (line 210) | pub trait P2p: Send + Sync + Clone + fmt::Debug + TributaryP2p {
method subscribe (line 213) | async fn subscribe(&self, set: ExternalValidatorSet, genesis: [u8; 32]);
method unsubscribe (line 214) | async fn unsubscribe(&self, set: ExternalValidatorSet, genesis: [u8; 3...
method send_raw (line 216) | async fn send_raw(&self, to: Self::Id, msg: Vec<u8>);
method broadcast_raw (line 217) | async fn broadcast_raw(&self, kind: P2pMessageKind, msg: Vec<u8>);
method receive (line 218) | async fn receive(&self) -> Message<Self>;
method send (line 220) | async fn send(&self, to: Self::Id, kind: ReqResMessageKind, msg: Vec<u...
method broadcast (line 225) | async fn broadcast(&self, kind: impl Send + Into<P2pMessageKind>, msg:...
type Id (line 780) | type Id = PeerId;
method subscribe (line 782) | async fn subscribe(&self, set: ExternalValidatorSet, genesis: [u8; 32]) {
method unsubscribe (line 791) | async fn unsubscribe(&self, set: ExternalValidatorSet, genesis: [u8; 3...
method send_raw (line 800) | async fn send_raw(&self, peer: Self::Id, msg: Vec<u8>) {
method broadcast_raw (line 804) | async fn broadcast_raw(&self, kind: P2pMessageKind, msg: Vec<u8>) {
method receive (line 815) | async fn receive(&self) -> Message<Self> {
type RrCodec (line 249) | struct RrCodec;
type Protocol (line 252) | type Protocol = &'static str;
type Request (line 253) | type Request = Vec<u8>;
type Response (line 254) | type Response = Vec<u8>;
method read_request (line 256) | async fn read_request<R: Send + Unpin + AsyncRead>(
method read_response (line 274) | async fn read_response<R: Send + Unpin + AsyncRead>(
method write_request (line 281) | async fn write_request<W: Send + Unpin + AsyncWrite>(
method write_response (line 295) | async fn write_response<W: Send + Unpin + AsyncWrite>(
type Behavior (line 306) | struct Behavior {
type LibP2p (line 313) | pub struct LibP2p {
method fmt (line 320) | fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
method new (line 327) | pub fn new(serai: Arc<Serai>) -> Self {
method broadcast (line 822) | async fn broadcast(&self, genesis: [u8; 32], msg: Vec<u8>) {
function heartbeat_tributaries_task (line 827) | pub async fn heartbeat_tributaries_task<D: Db, P: P2p>(
function handle_p2p_task (line 875) | pub async fn handle_p2p_task<D: Db, P: P2p>(
FILE: coordinator/src/processors.rs
type Message (line 9) | pub struct Message {
type Processors (line 16) | pub trait Processors: 'static + Send + Sync + Clone {
method send (line 17) | async fn send(&self, network: ExternalNetworkId, msg: impl Send + Into...
method recv (line 18) | async fn recv(&self, network: ExternalNetworkId) -> Message;
method ack (line 19) | async fn ack(&self, msg: Message);
method send (line 24) | async fn send(&self, network: ExternalNetworkId, msg: impl Send + Into...
method recv (line 31) | async fn recv(&self, network: ExternalNetworkId) -> Message {
method ack (line 43) | async fn ack(&self, msg: Message) {
FILE: coordinator/src/substrate/cosign.rs
constant COSIGN_DISTANCE (line 34) | const COSIGN_DISTANCE: u64 = 5 * 60 / 6;
type HasEvents (line 37) | enum HasEvents {
method set_intended_cosign (line 54) | pub fn set_intended_cosign(txn: &mut impl DbTxn, intended: u64) {
method set_skipped_cosign (line 59) | pub fn set_skipped_cosign(txn: &mut impl DbTxn, skipped: u64) {
method latest_cosigned_block (line 67) | pub fn latest_cosigned_block(getter: &impl Get) -> u64 {
method append_cosign (line 80) | pub fn append_cosign(
function block_has_events (line 90) | async fn block_has_events(
function potentially_cosign_block (line 124) | async fn potentially_cosign_block(
function advance_cosign_protocol_inner (line 189) | async fn advance_cosign_protocol_inner(
function advance_cosign_protocol (line 321) | pub async fn advance_cosign_protocol(
FILE: coordinator/src/substrate/db.rs
type HandledEvent (line 18) | pub struct HandledEvent;
method next_to_handle_event (line 20) | fn next_to_handle_event(getter: &impl Get, block: [u8; 32]) -> u32 {
method is_unhandled (line 23) | pub fn is_unhandled(getter: &impl Get, block: [u8; 32], event_id: u32)...
method handle_event (line 28) | pub fn handle_event(txn: &mut impl DbTxn, block: [u8; 32], index: u32) {
FILE: coordinator/src/substrate/mod.rs
function in_set (line 41) | async fn in_set(
function handle_new_set (line 53) | async fn handle_new_set<D: Db>(
function handle_batch_and_burns (line 117) | async fn handle_batch_and_burns<Pro: Processors>(
function handle_block (line 208) | async fn handle_block<D: Db, Pro: Processors>(
function handle_new_blocks (line 333) | async fn handle_new_blocks<D: Db, Pro: Processors>(
function scan_task (line 386) | pub async fn scan_task<D: Db, Pro: Processors>(
function expected_next_batch (line 489) | pub(crate) async fn expected_next_batch(
function verify_published_batches (line 517) | pub(crate) async fn verify_published_batches<D: Db>(
FILE: coordinator/src/tests/mod.rs
type MemProcessors (line 23) | pub struct MemProcessors(pub Arc<RwLock<HashMap<ExternalNetworkId, VecDe...
method new (line 26) | pub fn new() -> MemProcessors {
method send (line 33) | async fn send(&self, network: ExternalNetworkId, msg: impl Send + Into<C...
method recv (line 38) | async fn recv(&self, _: ExternalNetworkId) -> Message {
method ack (line 41) | async fn ack(&self, _: Message) {
type LocalP2p (line 48) | pub struct LocalP2p(
method new (line 54) | pub fn new(validators: usize) -> Vec<LocalP2p> {
type Id (line 66) | type Id = usize;
method subscribe (line 68) | async fn subscribe(&self, _set: ExternalValidatorSet, _genesis: [u8; 32]...
method unsubscribe (line 69) | async fn unsubscribe(&self, _set: ExternalValidatorSet, _genesis: [u8; 3...
method send_raw (line 71) | async fn send_raw(&self, to: Self::Id, msg: Vec<u8>) {
method broadcast_raw (line 77) | async fn broadcast_raw(&self, kind: P2pMessageKind, msg: Vec<u8>) {
method receive (line 104) | async fn receive(&self) -> P2pMessage<Self> {
method broadcast (line 117) | async fn broadcast(&self, genesis: [u8; 32], msg: Vec<u8>) {
FILE: coordinator/src/tests/tributary/chain.rs
function new_keys (line 35) | pub fn new_keys<R: RngCore + CryptoRng>(
function new_spec (line 45) | pub fn new_spec<R: RngCore + CryptoRng>(
function new_tributaries (line 71) | pub async fn new_tributaries(
function run_tributaries (line 97) | pub async fn run_tributaries(
function wait_for_tx_inclusion (line 119) | pub async fn wait_for_tx_inclusion(
function tributary_test (line 159) | async fn tributary_test() {
FILE: coordinator/src/tests/tributary/dkg.rs
function dkg_test (line 40) | async fn dkg_test() {
FILE: coordinator/src/tests/tributary/handle_p2p.rs
function handle_p2p_test (line 26) | async fn handle_p2p_test() {
FILE: coordinator/src/tests/tributary/mod.rs
method publish_set_keys (line 32) | async fn publish_set_keys(
function random_u32 (line 44) | fn random_u32<R: RngCore>(rng: &mut R) -> u32 {
function random_vec (line 48) | fn random_vec<R: RngCore>(rng: &mut R, limit: usize) -> Vec<u8> {
function random_sign_data (line 55) | fn random_sign_data<R: RngCore, Id: Clone + PartialEq + Eq + Debug + Enc...
function test_read_write (line 77) | fn test_read_write<RW: Eq + Debug + ReadWrite>(value: &RW) {
function tx_size_limit (line 82) | fn tx_size_limit() {
function serialize_sign_data (line 108) | fn serialize_sign_data() {
function serialize_transaction (line 146) | fn serialize_transaction() {
FILE: coordinator/src/tests/tributary/sync.rs
function sync_test (line 29) | async fn sync_test() {
FILE: coordinator/src/tests/tributary/tx.rs
function tx_test (line 22) | async fn tx_test() {
FILE: coordinator/src/tributary/db.rs
type Topic (line 21) | pub enum Topic {
type DataSpecification (line 30) | pub struct DataSpecification {
type DataSet (line 36) | pub enum DataSet {
type Accumulation (line 41) | pub enum Accumulation {
method get_as_keys (line 89) | pub fn get_as_keys(getter: &impl Get, genesis: [u8; 32]) -> Vec<<Ristret...
method set_fatally_slashed (line 99) | pub fn set_fatally_slashed(txn: &mut impl DbTxn, genesis: [u8; 32], acco...
method recognize_topic (line 114) | pub fn recognize_topic(txn: &mut impl DbTxn, genesis: [u8; 32], topic: T...
method start_next_attempt (line 118) | pub fn start_next_attempt(txn: &mut impl DbTxn, genesis: [u8; 32], topic...
method attempt (line 125) | pub fn attempt(getter: &impl Get, genesis: [u8; 32], topic: Topic) -> Op...
method schedule_reattempt (line 142) | pub fn schedule_reattempt(
method take (line 176) | pub fn take(txn: &mut impl DbTxn, genesis: [u8; 32], block_number: u32) ...
method take_signed_transaction (line 186) | pub fn take_signed_transaction(
FILE: coordinator/src/tributary/handle.rs
function dkg_confirmation_nonces (line 36) | pub fn dkg_confirmation_nonces(
function generated_key_pair (line 47) | pub fn generated_key_pair<D: Db>(
function unflatten (line 62) | fn unflatten(
function accumulate (line 90) | fn accumulate(
function handle_data (line 169) | fn handle_data(
function check_sign_data_len (line 220) | fn check_sign_data_len(
function handle_application_tx (line 244) | pub(crate) async fn handle_application_tx(&mut self, tx: Transaction) {
FILE: coordinator/src/tributary/mod.rs
function removed_as_of_dkg_attempt (line 28) | pub fn removed_as_of_dkg_attempt(
function removed_as_of_set_keys (line 42) | pub fn removed_as_of_set_keys(
function publish_signed_transaction (line 61) | pub async fn publish_signed_transaction<D: Db, P: crate::P2p>(
FILE: coordinator/src/tributary/scanner.rs
type RecognizedIdType (line 33) | pub enum RecognizedIdType {
type RIDTrait (line 39) | pub trait RIDTrait {
method recognized_id (line 40) | async fn recognized_id(
method recognized_id (line 54) | async fn recognized_id(
type PublishSeraiTransaction (line 66) | pub trait PublishSeraiTransaction {
method publish_set_keys (line 67) | async fn publish_set_keys(
method publish_set_keys (line 129) | async fn publish_set_keys(
type PTTTrait (line 160) | pub trait PTTTrait {
method publish_tributary_tx (line 161) | async fn publish_tributary_tx(&self, tx: Transaction);
method publish_tributary_tx (line 165) | async fn publish_tributary_tx(&self, tx: Transaction) {
type TributaryBlockHandler (line 170) | pub struct TributaryBlockHandler<
function fatal_slash (line 203) | pub fn fatal_slash(&mut self, slashing: [u8; 32], reason: &str) {
function handle (line 216) | async fn handle(mut self) {
function handle_new_blocks (line 661) | pub(crate) async fn handle_new_blocks<
function scan_tributaries_task (line 722) | pub(crate) async fn scan_tributaries_task<
FILE: coordinator/src/tributary/signing_protocol.rs
type SigningProtocol (line 92) | struct SigningProtocol<'a, T: DbTxn, C: Encode> {
function preprocess_internal (line 100) | fn preprocess_internal(
function share_internal (line 150) | fn share_internal(
function complete_internal (line 183) | fn complete_internal(
function threshold_i_map_to_keys_and_musig_i_map (line 208) | fn threshold_i_map_to_keys_and_musig_i_map(
type DkgConfirmerSigningProtocol (line 252) | type DkgConfirmerSigningProtocol<'a, T> = SigningProtocol<'a, T, (&'stat...
type DkgConfirmer (line 254) | pub(crate) struct DkgConfirmer<'a, T: DbTxn> {
function new (line 263) | pub(crate) fn new<'a>(
function signing_protocol (line 274) | fn signing_protocol(&mut self) -> DkgConfirmerSigningProtocol<'_, T> {
function preprocess_internal (line 279) | fn preprocess_internal(&mut self) -> (AlgorithmSignMachine<Ristretto, Sc...
function preprocess (line 284) | pub(crate) fn preprocess(&mut self) -> [u8; 64] {
function share_internal (line 288) | fn share_internal(
function share (line 304) | pub(crate) fn share(
function complete (line 312) | pub(crate) fn complete(
FILE: coordinator/src/tributary/spec.rs
function borsh_serialize_validators (line 15) | fn borsh_serialize_validators<W: io::Write>(
function borsh_deserialize_validators (line 28) | fn borsh_deserialize_validators<R: io::Read>(
type TributarySpec (line 44) | pub struct TributarySpec {
method new (line 56) | pub fn new(
method set (line 72) | pub fn set(&self) -> ExternalValidatorSet {
method genesis (line 76) | pub fn genesis(&self) -> [u8; 32] {
method start_time (line 88) | pub fn start_time(&self) -> u64 {
method n (line 92) | pub fn n(&self, removed_validators: &[<Ristretto as Ciphersuite>::G]) ...
method t (line 100) | pub fn t(&self) -> u16 {
method i (line 105) | pub fn i(
method reverse_lookup_i (line 141) | pub fn reverse_lookup_i(
method validators (line 154) | pub fn validators(&self) -> Vec<(<Ristretto as Ciphersuite>::G, u64)> {
FILE: coordinator/src/tributary/transaction.rs
type Label (line 27) | pub enum Label {
method nonce (line 34) | pub fn nonce(&self) -> u32 {
type SignData (line 43) | pub struct SignData<Id: Clone + PartialEq + Eq + Debug + Encode + Decode> {
method fmt (line 54) | fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fm...
function read (line 66) | pub(crate) fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
function write (line 104) | pub(crate) fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<(...
type Transaction (line 133) | pub enum Transaction {
method empty_signed (line 628) | pub fn empty_signed() -> Signed {
method sign (line 640) | pub fn sign<R: RngCore + CryptoRng>(
method sign_completed_challenge (line 703) | pub fn sign_completed_challenge(&self) -> <Ristretto as Ciphersuite>::F {
method fmt (line 199) | fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fm...
method read (line 260) | fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
method write (line 447) | fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
method kind (line 572) | fn kind(&self) -> TransactionKind<'_> {
method hash (line 604) | fn hash(&self) -> [u8; 32] {
method verify (line 613) | fn verify(&self) -> Result<(), TransactionError> {
FILE: coordinator/tributary/src/block.rs
type BlockError (line 22) | pub enum BlockError {
type BlockHeader (line 53) | pub struct BlockHeader {
method hash (line 73) | pub fn hash(&self) -> [u8; 32] {
method read (line 59) | fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
method write (line 66) | fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
type Block (line 79) | pub struct Block<T: TransactionTrait> {
method read (line 85) | fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
method write (line 100) | fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
function new (line 114) | pub(crate) fn new(parent: [u8; 32], provided: Vec<T>, mempool: Vec<Trans...
function parent (line 162) | pub fn parent(&self) -> [u8; 32] {
function hash (line 166) | pub fn hash(&self) -> [u8; 32] {
function verify (line 171) | pub(crate) fn verify<N: Network, G: GAIN>(
FILE: coordinator/tributary/src/blockchain.rs
type Blockchain (line 18) | pub(crate) struct Blockchain<D: Db, T: TransactionTrait> {
function tip_key (line 33) | fn tip_key(genesis: [u8; 32]) -> Vec<u8> {
function block_number_key (line 36) | fn block_number_key(&self) -> Vec<u8> {
function block_key (line 39) | fn block_key(genesis: &[u8], hash: &[u8; 32]) -> Vec<u8> {
function block_hash_key (line 42) | fn block_hash_key(genesis: &[u8], block_number: u64) -> Vec<u8> {
function commit_key (line 45) | fn commit_key(genesis: &[u8], hash: &[u8; 32]) -> Vec<u8> {
function block_after_key (line 48) | fn block_after_key(genesis: &[u8], hash: &[u8; 32]) -> Vec<u8> {
function unsigned_included_key (line 51) | fn unsigned_included_key(genesis: &[u8], hash: &[u8; 32]) -> Vec<u8> {
function provided_included_key (line 54) | fn provided_included_key(genesis: &[u8], hash: &[u8; 32]) -> Vec<u8> {
function next_nonce_key (line 57) | fn next_nonce_key(
function new (line 69) | pub(crate) fn new(
function tip (line 99) | pub(crate) fn tip(&self) -> [u8; 32] {
function block_number (line 103) | pub(crate) fn block_number(&self) -> u64 {
function block_from_db (line 107) | pub(crate) fn block_from_db(db: &D, genesis: [u8; 32], block: &[u8; 32])...
function commit_from_db (line 112) | pub(crate) fn commit_from_db(db: &D, genesis: [u8; 32], block: &[u8; 32]...
function block_hash_from_db (line 116) | pub(crate) fn block_hash_from_db(db: &D, genesis: [u8; 32], block: u64) ...
function commit (line 120) | pub(crate) fn commit(&self, block: &[u8; 32]) -> Option<Vec<u8>> {
function block_hash (line 124) | pub(crate) fn block_hash(&self, block: u64) -> Option<[u8; 32]> {
function commit_by_block_number (line 128) | pub(crate) fn commit_by_block_number(&self, block: u64) -> Option<Vec<u8...
function block_after (line 132) | pub(crate) fn block_after(db: &D, genesis: [u8; 32], block: &[u8; 32]) -...
function locally_provided_txs_in_block (line 136) | pub(crate) fn locally_provided_txs_in_block(
function tip_from_db (line 151) | pub(crate) fn tip_from_db(db: &D, genesis: [u8; 32]) -> [u8; 32] {
function add_transaction (line 155) | pub(crate) fn add_transaction<N: Network>(
function provide_transaction (line 193) | pub(crate) fn provide_transaction(&mut self, tx: T) -> Result<(), Provid...
function next_nonce (line 197) | pub(crate) fn next_nonce(
function build_block (line 219) | pub(crate) fn build_block<N: Network>(&mut self, schema: &N::SignatureSc...
function verify_block (line 230) | pub(crate) fn verify_block<N: Network>(
function add_block (line 276) | pub(crate) fn add_block<N: Network>(
FILE: coordinator/tributary/src/lib.rs
constant TRANSACTION_SIZE_LIMIT (line 54) | pub const TRANSACTION_SIZE_LIMIT: usize = 3_000_000;
constant ACCOUNT_MEMPOOL_LIMIT (line 56) | pub const ACCOUNT_MEMPOOL_LIMIT: u32 = 50;
constant BLOCK_SIZE_LIMIT (line 60) | pub const BLOCK_SIZE_LIMIT: usize = 3_001_000;
constant TENDERMINT_MESSAGE (line 62) | pub(crate) const TENDERMINT_MESSAGE: u8 = 0;
constant TRANSACTION_MESSAGE (line 63) | pub(crate) const TRANSACTION_MESSAGE: u8 = 1;
type Transaction (line 67) | pub enum Transaction<T: TransactionTrait> {
function hash (line 103) | pub fn hash(&self) -> [u8; 32] {
function kind (line 110) | pub fn kind(&self) -> TransactionKind<'_> {
type ReadWrite (line 119) | pub trait ReadWrite: Sized {
method read (line 73) | fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
method write (line 88) | fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
method read (line 120) | fn read<R: io::Read>(reader: &mut R) -> io::Result<Self>;
method write (line 121) | fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()>;
method serialize (line 123) | fn serialize(&self) -> Vec<u8> {
type P2p (line 132) | pub trait P2p: 'static + Send + Sync + Clone + Debug {
method broadcast (line 138) | async fn broadcast(&self, genesis: [u8; 32], msg: Vec<u8>);
method broadcast (line 143) | async fn broadcast(&self, genesis: [u8; 32], msg: Vec<u8>) {
type Tributary (line 149) | pub struct Tributary<D: Db, T: TransactionTrait, P: P2p> {
function new (line 161) | pub async fn new(
function block_time (line 213) | pub fn block_time() -> u32 {
function genesis (line 217) | pub fn genesis(&self) -> [u8; 32] {
function block_number (line 221) | pub async fn block_number(&self) -> u64 {
function tip (line 224) | pub async fn tip(&self) -> [u8; 32] {
function reader (line 228) | pub fn reader(&self) -> TributaryReader<D, T> {
function provide_transaction (line 232) | pub async fn provide_transaction(&self, tx: T) -> Result<(), ProvidedErr...
function next_nonce (line 236) | pub async fn next_nonce(
function add_transaction (line 247) | pub async fn add_transaction(&self, tx: T) -> Result<bool, TransactionEr...
function sync_block_internal (line 262) | async fn sync_block_internal(
function sync_block (line 302) | pub async fn sync_block(&self, block: Block<T>, commit: Vec<u8>) -> bool {
function handle_message (line 308) | pub async fn handle_message(&self, msg: &[u8]) -> bool {
function next_block_notification (line 345) | pub async fn next_block_notification(
type TributaryReader (line 355) | pub struct TributaryReader<D: Db, T: TransactionTrait>(D, [u8; 32], Phan...
function genesis (line 357) | pub fn genesis(&self) -> [u8; 32] {
function block (line 363) | pub fn block(&self, hash: &[u8; 32]) -> Option<Block<T>> {
function commit (line 366) | pub fn commit(&self, hash: &[u8; 32]) -> Option<Vec<u8>> {
function parsed_commit (line 369) | pub fn parsed_commit(&self, hash: &[u8; 32]) -> Option<Commit<Validators...
function block_after (line 372) | pub fn block_after(&self, hash: &[u8; 32]) -> Option<[u8; 32]> {
function time_of_block (line 375) | pub fn time_of_block(&self, hash: &[u8; 32]) -> Option<u64> {
function locally_provided_txs_in_block (line 381) | pub fn locally_provided_txs_in_block(&self, hash: &[u8; 32], order: &str...
function tip (line 386) | pub fn tip(&self) -> [u8; 32] {
FILE: coordinator/tributary/src/mempool.rs
type Mempool (line 20) | pub(crate) struct Mempool<D: Db, T: TransactionTrait> {
function transaction_key (line 30) | fn transaction_key(&self, hash: &[u8]) -> Vec<u8> {
function current_mempool_key (line 33) | fn current_mempool_key(&self) -> Vec<u8> {
function save_tx (line 38) | fn save_tx(&mut self, tx: Transaction<T>) {
function unsigned_already_exist (line 53) | fn unsigned_already_exist(
function new (line 61) | pub(crate) fn new(db: D, genesis: [u8; 32]) -> Self {
function add (line 108) | pub(crate) fn add<
function next_nonce_in_mempool (line 180) | pub(crate) fn next_nonce_in_mempool(
function block (line 189) | pub(crate) fn block(&mut self) -> Vec<Transaction<T>> {
function remove (line 222) | pub(crate) fn remove(&mut self, tx: &[u8; 32]) {
function txs (line 257) | pub(crate) fn txs(&self) -> &HashMap<[u8; 32], Transaction<T>> {
FILE: coordinator/tributary/src/merkle.rs
function merkle (line 3) | pub(crate) fn merkle(hash_args: &[[u8; 32]]) -> [u8; 32] {
FILE: coordinator/tributary/src/provided.rs
type ProvidedError (line 10) | pub enum ProvidedError {
type ProvidedTransactions (line 26) | pub struct ProvidedTransactions<D: Db, T: Transaction> {
function transaction_key (line 34) | fn transaction_key(&self, hash: &[u8]) -> Vec<u8> {
function current_provided_key (line 37) | fn current_provided_key(&self) -> Vec<u8> {
function locally_provided_quantity_key (line 40) | pub(crate) fn locally_provided_quantity_key(genesis: &[u8; 32], order: &...
function on_chain_provided_quantity_key (line 43) | pub(crate) fn on_chain_provided_quantity_key(genesis: &[u8; 32], order: ...
function block_provided_quantity_key (line 46) | pub(crate) fn block_provided_quantity_key(
function on_chain_provided_key (line 54) | pub(crate) fn on_chain_provided_key(genesis: &[u8; 32], order: &str, id:...
function new (line 62) | pub(crate) fn new(db: D, genesis: [u8; 32]) -> Self {
function provide (line 89) | pub(crate) fn provide(&mut self, tx: T) -> Result<(), ProvidedError> {
function complete (line 148) | pub(crate) fn complete(
FILE: coordinator/tributary/src/tendermint/mod.rs
constant DST (line 49) | const DST: &[u8] = b"Tributary Tendermint Commit Aggregator";
function challenge (line 51) | fn challenge(
type Signer (line 67) | pub struct Signer {
method new (line 73) | pub(crate) fn new(genesis: [u8; 32], key: Zeroizing<<Ristretto as Ciph...
type ValidatorId (line 80) | type ValidatorId = [u8; 32];
type Signature (line 81) | type Signature = [u8; 64];
method validator_id (line 84) | async fn validator_id(&self) -> Option<Self::ValidatorId> {
method sign (line 89) | async fn sign(&self, msg: &[u8]) -> Self::Signature {
type Validators (line 126) | pub struct Validators {
method new (line 134) | pub(crate) fn new(
type ValidatorId (line 162) | type ValidatorId = [u8; 32];
type Signature (line 163) | type Signature = [u8; 64];
type AggregateSignature (line 164) | type AggregateSignature = Vec<u8>;
type Signer (line 165) | type Signer = Arc<Signer>;
method verify (line 168) | fn verify(&self, validator: Self::ValidatorId, msg: &[u8], sig: &Self::S...
method aggregate (line 181) | fn aggregate(
method verify_aggregate (line 201) | fn verify_aggregate(
type ValidatorId (line 233) | type ValidatorId = [u8; 32];
method total_weight (line 235) | fn total_weight(&self) -> u64 {
method weight (line 238) | fn weight(&self, validator: Self::ValidatorId) -> u64 {
method proposer (line 241) | fn proposer(&self, block: BlockNumber, round: RoundNumber) -> Self::Vali...
type TendermintBlock (line 255) | pub struct TendermintBlock(pub Vec<u8>);
type Id (line 257) | type Id = [u8; 32];
method id (line 258) | fn id(&self) -> Self::Id {
type TendermintNetwork (line 264) | pub struct TendermintNetwork<D: Db, T: TransactionTrait, P: P2p> {
constant BLOCK_PROCESSING_TIME (line 274) | pub const BLOCK_PROCESSING_TIME: u32 = 999;
constant LATENCY_TIME (line 275) | pub const LATENCY_TIME: u32 = 1667;
constant TARGET_BLOCK_TIME (line 276) | pub const TARGET_BLOCK_TIME: u32 = BLOCK_PROCESSING_TIME + (3 * LATENCY_...
type Db (line 280) | type Db = D;
type ValidatorId (line 282) | type ValidatorId = [u8; 32];
type SignatureScheme (line 283) | type SignatureScheme = Arc<Validators>;
type Weights (line 284) | type Weights = Arc<Validators>;
type Block (line 285) | type Block = TendermintBlock;
constant BLOCK_PROCESSING_TIME (line 291) | const BLOCK_PROCESSING_TIME: u32 = BLOCK_PROCESSING_TIME;
constant LATENCY_TIME (line 292) | const LATENCY_TIME: u32 = LATENCY_TIME;
method signer (line 294) | fn signer(&self) -> Arc<Signer> {
method signature_scheme (line 297) | fn signature_scheme(&self) -> Arc<Validators> {
method weights (line 300) | fn weights(&self) -> Arc<Validators> {
method broadcast (line 304) | async fn broadcast(&mut self, msg: SignedMessageFor<Self>) {
method slash (line 310) | async fn slash(&mut self, validator: Self::ValidatorId, slash_event: Sla...
method validate (line 345) | async fn validate(&self, block: &Self::Block) -> Result<(), TendermintBl...
method add_block (line 362) | async fn add_block(
FILE: coordinator/tributary/src/tendermint/tx.rs
type TendermintTx (line 24) | pub enum TendermintTx {
method read (line 29) | fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
method write (line 35) | fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
method kind (line 43) | fn kind(&self) -> TransactionKind<'_> {
method hash (line 49) | fn hash(&self) -> [u8; 32] {
method sig_hash (line 53) | fn sig_hash(&self, _genesis: [u8; 32]) -> <Ristretto as Ciphersuite>::F {
method verify (line 59) | fn verify(&self) -> Result<(), TransactionError> {
function verify_tendermint_tx (line 64) | pub(crate) fn verify_tendermint_tx<N: Network>(
FILE: coordinator/tributary/src/tests/block.rs
type N (line 22) | type N = TendermintNetwork<MemDb, NonceTransaction, DummyP2p>;
type NonceTransaction (line 27) | struct NonceTransaction(u32, u8, Signed);
method new (line 30) | fn new(nonce: u32, distinguisher: u8) -> Self {
method read (line 47) | fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
method write (line 58) | fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
method kind (line 65) | fn kind(&self) -> TransactionKind<'_> {
method hash (line 69) | fn hash(&self) -> [u8; 32] {
method verify (line 73) | fn verify(&self) -> Result<(), TransactionError> {
function empty_block (line 79) | fn empty_block() {
function duplicate_nonces (line 102) | fn duplicate_nonces() {
FILE: coordinator/tributary/src/tests/blockchain.rs
type N (line 30) | type N = TendermintNetwork<MemDb, SignedTransaction, DummyP2p>;
function new_blockchain (line 32) | fn new_blockchain<T: TransactionTrait>(
function block_addition (line 44) | fn block_addition() {
function invalid_block (line 63) | fn invalid_block() {
function signed_transaction (line 150) | fn signed_transaction() {
function provided_transaction (line 206) | fn provided_transaction() {
function tendermint_evidence_tx (line 340) | async fn tendermint_evidence_tx() {
function block_tx_ordering (line 397) | async fn block_tx_ordering() {
FILE: coordinator/tributary/src/tests/mempool.rs
type N (line 20) | type N = TendermintNetwork<MemDb, SignedTransaction, DummyP2p>;
function new_mempool (line 22) | fn new_mempool<T: TransactionTrait>() -> ([u8; 32], MemDb, Mempool<MemDb...
function mempool_addition (line 30) | async fn mempool_addition() {
function too_many_mempool (line 161) | fn too_many_mempool() {
FILE: coordinator/tributary/src/tests/merkle.rs
function merkle (line 6) | fn merkle() {
FILE: coordinator/tributary/src/tests/p2p.rs
type DummyP2p (line 4) | pub struct DummyP2p;
method broadcast (line 8) | async fn broadcast(&self, _: [u8; 32], _: Vec<u8>) {
FILE: coordinator/tributary/src/tests/tendermint.rs
function assert_target_block_time (line 8) | fn assert_target_block_time() {
FILE: coordinator/tributary/src/tests/transaction/mod.rs
function random_signed (line 35) | pub fn random_signed<R: RngCore + CryptoRng>(rng: &mut R) -> Signed {
function random_signed_with_nonce (line 46) | pub fn random_signed_with_nonce<R: RngCore + CryptoRng>(rng: &mut R, non...
type ProvidedTransaction (line 53) | pub struct ProvidedTransaction(pub Vec<u8>);
method read (line 56) | fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
method write (line 64) | fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
method kind (line 71) | fn kind(&self) -> TransactionKind<'_> {
method hash (line 79) | fn hash(&self) -> [u8; 32] {
method verify (line 83) | fn verify(&self) -> Result<(), TransactionError> {
function random_provided_transaction (line 88) | pub fn random_provided_transaction<R: RngCore + CryptoRng>(
type SignedTransaction (line 103) | pub struct SignedTransaction(pub Vec<u8>, pub Signed);
method read (line 106) | fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
method write (line 115) | fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
method kind (line 123) | fn kind(&self) -> TransactionKind<'_> {
method hash (line 127) | fn hash(&self) -> [u8; 32] {
method verify (line 132) | fn verify(&self) -> Result<(), TransactionError> {
function signed_transaction (line 137) | pub fn signed_transaction<R: RngCore + CryptoRng>(
function random_signed_transaction (line 160) | pub fn random_signed_transaction<R: RngCore + CryptoRng>(
function new_genesis (line 173) | pub fn new_genesis() -> [u8; 32] {
function tendermint_meta (line 179) | pub async fn tendermint_meta() -> ([u8; 32], Signer, [u8; 32], Arc<Valid...
function signed_from_data (line 194) | pub async fn signed_from_data<N: Network>(
function random_evidence_tx (line 211) | pub async fn random_evidence_tx<N: Network>(
FILE: coordinator/tributary/src/tests/transaction/signed.rs
function serialize_signed (line 15) | fn serialize_signed() {
function sig_hash (line 21) | fn sig_hash() {
function signed_transaction (line 31) | fn signed_transaction() {
function invalid_nonce (line 81) | fn invalid_nonce() {
FILE: coordinator/tributary/src/tests/transaction/tendermint.rs
type N (line 31) | type N = TendermintNetwork<MemDb, SignedTransaction, DummyP2p>;
function serialize_tendermint (line 34) | async fn serialize_tendermint() {
function invalid_valid_round (line 43) | async fn invalid_valid_round() {
function invalid_precommit_signature (line 80) | async fn invalid_precommit_signature() {
function evidence_with_prevote (line 129) | async fn evidence_with_prevote() {
function conflicting_msgs_evidence_tx (line 174) | async fn conflicting_msgs_evidence_tx() {
FILE: coordinator/tributary/src/transaction.rs
type TransactionError (line 19) | pub enum TransactionError {
type Signed (line 45) | pub struct Signed {
method read_without_nonce (line 86) | pub fn read_without_nonce<R: io::Read>(reader: &mut R, nonce: u32) -> ...
method write_without_nonce (line 101) | pub fn write_without_nonce<W: io::Write>(&self, writer: &mut W) -> io:...
method read (line 52) | fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
method write (line 74) | fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
type TransactionKind (line 113) | pub enum TransactionKind<'a> {
type Transaction (line 146) | pub trait Transaction: 'static + Send + Sync + Clone + Eq + Debug + Read...
method kind (line 148) | fn kind(&self) -> TransactionKind<'_>;
method hash (line 153) | fn hash(&self) -> [u8; 32];
method verify (line 156) | fn verify(&self) -> Result<(), TransactionError>;
method sig_hash (line 163) | fn sig_hash(&self, genesis: [u8; 32]) -> <Ristretto as Ciphersuite>::F {
type GAIN (line 185) | pub trait GAIN: FnMut(&<Ristretto as Ciphersuite>::G, &[u8]) -> Option<u...
function verify_transaction (line 188) | pub(crate) fn verify_transaction<F: GAIN, T: Transaction>(
FILE: coordinator/tributary/tendermint/src/block.rs
type BlockData (line 16) | pub(crate) struct BlockData<N: Network> {
function new (line 39) | pub(crate) fn new(
function round (line 67) | pub(crate) fn round(&self) -> &RoundData<N> {
function round_mut (line 71) | pub(crate) fn round_mut(&mut self) -> &mut RoundData<N> {
function populate_end_time (line 78) | pub(crate) fn populate_end_time(&mut self, round: RoundNumber) {
function new_round (line 92) | pub(crate) fn new_round(
function message (line 128) | pub(crate) fn message(&mut self, data: DataFor<N>) -> Option<MessageFor<...
FILE: coordinator/tributary/tendermint/src/ext.rs
type ValidatorId (line 13) | pub trait ValidatorId:
type Signature (line 24) | pub trait Signature: Send + Sync + Clone + PartialEq + Eq + Debug + Enco...
type BlockNumber (line 31) | pub struct BlockNumber(pub u64);
type RoundNumber (line 34) | pub struct RoundNumber(pub u32);
type Signer (line 38) | pub trait Signer: Send + Sync {
method validator_id (line 45) | async fn validator_id(&self) -> Option<Self::ValidatorId>;
method sign (line 47) | async fn sign(&self, msg: &[u8]) -> Self::Signature;
type ValidatorId (line 52) | type ValidatorId = S::ValidatorId;
type Signature (line 53) | type Signature = S::Signature;
method validator_id (line 55) | async fn validator_id(&self) -> Option<Self::ValidatorId> {
method sign (line 59) | async fn sign(&self, msg: &[u8]) -> Self::Signature {
type SignatureScheme (line 65) | pub trait SignatureScheme: Send + Sync + Clone {
method verify (line 81) | fn verify(&self, validator: Self::ValidatorId, msg: &[u8], sig: &Self:...
method aggregate (line 85) | fn aggregate(
method verify_aggregate (line 93) | fn verify_aggregate(
type ValidatorId (line 102) | type ValidatorId = S::ValidatorId;
type Signature (line 103) | type Signature = S::Signature;
type AggregateSignature (line 104) | type AggregateSignature = S::AggregateSignature;
type Signer (line 105) | type Signer = S::Signer;
method verify (line 107) | fn verify(&self, validator: Self::ValidatorId, msg: &[u8], sig: &Self:...
method aggregate (line 111) | fn aggregate(
method verify_aggregate (line 121) | fn verify_aggregate(
type Commit (line 135) | pub struct Commit<S: SignatureScheme> {
method clone (line 145) | fn clone(&self) -> Self {
type Weights (line 155) | pub trait Weights: Send + Sync {
method total_weight (line 159) | fn total_weight(&self) -> u64;
method weight (line 161) | fn weight(&self, validator: Self::ValidatorId) -> u64;
method threshold (line 163) | fn threshold(&self) -> u64 {
method fault_threshold (line 167) | fn fault_threshold(&self) -> u64 {
method proposer (line 172) | fn proposer(&self, block: BlockNumber, round: RoundNumber) -> Self::Va...
type ValidatorId (line 176) | type ValidatorId = W::ValidatorId;
method total_weight (line 178) | fn total_weight(&self) -> u64 {
method weight (line 182) | fn weight(&self, validator: Self::ValidatorId) -> u64 {
method proposer (line 186) | fn proposer(&self, block: BlockNumber, round: RoundNumber) -> Self::Va...
type BlockError (line 193) | pub enum BlockError {
type Block (line 204) | pub trait Block: Send + Sync + Clone + PartialEq + Eq + Debug + Encode +...
method id (line 209) | fn id(&self) -> Self::Id;
type Network (line 214) | pub trait Network: Sized + Send + Sync {
constant BLOCK_PROCESSING_TIME (line 232) | const BLOCK_PROCESSING_TIME: u32;
constant LATENCY_TIME (line 236) | const LATENCY_TIME: u32;
method block_time (line 239) | fn block_time() -> u32 {
method signer (line 247) | fn signer(&self) -> <Self::SignatureScheme as SignatureScheme>::Signer;
method signature_scheme (line 249) | fn signature_scheme(&self) -> Self::SignatureScheme;
method weights (line 251) | fn weights(&self) -> Self::Weights;
method verify_commit (line 256) | fn verify_commit(
method broadcast (line 283) | async fn broadcast(&mut self, msg: SignedMessageFor<Self>);
method slash (line 288) | async fn slash(&mut self, validator: Self::ValidatorId, slash_event: S...
method validate (line 291) | async fn validate(&self, block: &Self::Block) -> Result<(), BlockError>;
method add_block (line 301) | async fn add_block(
FILE: coordinator/tributary/tendermint/src/lib.rs
constant MESSAGE_TAPE_KEY (line 37) | const MESSAGE_TAPE_KEY: &[u8] = b"tendermint-machine-message_tape";
function message_tape_key (line 38) | fn message_tape_key(genesis: [u8; 32]) -> Vec<u8> {
function commit_msg (line 42) | pub fn commit_msg(end_time: u64, id: &[u8]) -> Vec<u8> {
type Step (line 47) | pub enum Step {
type Data (line 54) | pub enum Data<B: Block, S: Signature> {
method eq (line 61) | fn eq(&self, other: &Data<B, S>) -> bool {
function hash (line 75) | fn hash<H: core::hash::Hasher>(&self, state: &mut H) {
function step (line 86) | pub fn step(&self) -> Step {
type Message (line 96) | pub struct Message<V: ValidatorId, B: Block, S: Signature> {
type SignedMessage (line 106) | pub struct SignedMessage<V: ValidatorId, B: Block, S: Signature> {
function block (line 113) | pub fn block(&self) -> BlockNumber {
function verify_signature (line 118) | pub fn verify_signature<Scheme: SignatureScheme<ValidatorId = V, Signatu...
type SlashReason (line 127) | pub enum SlashReason {
type Evidence (line 134) | pub enum Evidence {
type TendermintError (line 141) | pub enum TendermintError {
type DataFor (line 149) | pub type DataFor<N> =
type MessageFor (line 151) | pub(crate) type MessageFor<N> = Message<
type SignedMessageFor (line 157) | pub type SignedMessageFor<N> = SignedMessage<
function decode_signed_message (line 163) | pub fn decode_signed_message<N: Network>(mut data: &[u8]) -> Option<Sign...
function decode_and_verify_signed_message (line 167) | fn decode_and_verify_signed_message<N: Network>(
function verify_tendermint_evidence (line 181) | pub fn verify_tendermint_evidence<N: Network>(
type SlashEvent (line 248) | pub enum SlashEvent {
type Upons (line 256) | struct Upons {
type TendermintMachine (line 264) | pub struct TendermintMachine<N: Network> {
type SyncedBlock (line 285) | pub struct SyncedBlock<N: Network> {
type SyncedBlockSender (line 291) | pub type SyncedBlockSender<N> = mpsc::UnboundedSender<SyncedBlock<N>>;
type SyncedBlockResultReceiver (line 292) | pub type SyncedBlockResultReceiver = mpsc::UnboundedReceiver<bool>;
type MessageSender (line 294) | pub type MessageSender<N> = mpsc::UnboundedSender<SignedMessageFor<N>>;
type TendermintHandle (line 297) | pub struct TendermintHandle<N: Network> {
function broadcast (line 314) | fn broadcast(&mut self, data: DataFor<N>) {
function round (line 324) | fn round(&mut self, round: RoundNumber, time: Option<CanonicalInstant>) ...
function reset (line 350) | async fn reset(&mut self, end_round: RoundNumber, proposal: Option<N::Bl...
function reset_by_commit (line 398) | async fn reset_by_commit(
function slash (line 421) | async fn slash(&mut self, validator: N::ValidatorId, slash_event: SlashE...
function proposal_for_round (line 431) | fn proposal_for_round(&self, round: RoundNumber) -> Option<(Option<Round...
function upon_proposal_without_valid_round (line 436) | fn upon_proposal_without_valid_round(&mut self) {
function upon_proposal_with_valid_round (line 462) | fn upon_proposal_with_valid_round(&mut self) {
function upon_prevotes (line 506) | fn upon_prevotes(&mut self) {
function upon_successful_current_round_prevotes (line 518) | async fn upon_successful_current_round_prevotes(&mut self) {
function upon_negative_current_round_prevotes (line 554) | fn upon_negative_current_round_prevotes(&mut self) {
function upon_precommits (line 568) | fn upon_precommits(&mut self) {
function all_current_round_upons (line 580) | async fn all_current_round_upons(&mut self) {
function upon_successful_precommits (line 590) | async fn upon_successful_precommits(&mut self, round: RoundNumber) -> bo...
function all_any_round_upons (line 649) | async fn all_any_round_upons(&mut self, round: RoundNumber) -> bool {
function verify_precommit_signature (line 657) | async fn verify_precommit_signature(
function message (line 686) | async fn message(&mut self, signed: &SignedMessageFor<N>) -> Result<(), ...
function new (line 850) | pub async fn new(
function run (line 937) | pub async fn run(mut self) {
FILE: coordinator/tributary/tendermint/src/message_log.rs
type RoundLog (line 7) | type RoundLog<N> = HashMap<<N as Network>::ValidatorId, HashMap<Step, Si...
type MessageLog (line 8) | pub(crate) struct MessageLog<N: Network> {
function new (line 17) | pub(crate) fn new(weights: Arc<N::Weights>) -> MessageLog<N> {
function log (line 28) | pub(crate) fn log(&mut self, signed: SignedMessageFor<N>) -> Result<bool...
function round_participation (line 61) | pub(crate) fn round_participation(&self, round: RoundNumber) -> u64 {
function has_participation (line 66) | pub(crate) fn has_participation(&self, round: RoundNumber, step: Step) -...
function has_consensus (line 71) | pub(crate) fn has_consensus(&self, round: RoundNumber, data: &DataFor<N>...
FILE: coordinator/tributary/tendermint/src/round.rs
type RoundData (line 16) | pub struct RoundData<N: Network> {
function new (line 25) | pub fn new(number: RoundNumber, start_time: CanonicalInstant) -> Self {
function timeout (line 35) | fn timeout(&self, step: Step) -> CanonicalInstant {
function end_time (line 49) | pub fn end_time(&self) -> CanonicalInstant {
function set_timeout (line 53) | pub(crate) fn set_timeout(&mut self, step: Step) {
function timeout_future (line 59) | pub(crate) async fn timeout_future(&self) -> Step {
FILE: coordinator/tributary/tendermint/src/time.rs
type CanonicalInstant (line 5) | pub struct CanonicalInstant {
method new (line 17) | pub fn new(time: u64) -> CanonicalInstant {
method canonical (line 30) | pub fn canonical(&self) -> u64 {
method instant (line 34) | pub fn instant(&self) -> Instant {
type Output (line 40) | type Output = CanonicalInstant;
method add (line 41) | fn add(self, duration: Duration) -> CanonicalInstant {
function sys_time (line 12) | pub(crate) fn sys_time(time: u64) -> SystemTime {
FILE: coordinator/tributary/tendermint/tests/ext.rs
type TestValidatorId (line 20) | type TestValidatorId = u16;
type TestBlockId (line 21) | type TestBlockId = [u8; 4];
type TestSigner (line 23) | struct TestSigner(u16);
type ValidatorId (line 26) | type ValidatorId = TestValidatorId;
type Signature (line 27) | type Signature = [u8; 32];
method validator_id (line 29) | async fn validator_id(&self) -> Option<TestValidatorId> {
method sign (line 33) | async fn sign(&self, msg: &[u8]) -> [u8; 32] {
type TestSignatureScheme (line 42) | struct TestSignatureScheme;
type ValidatorId (line 44) | type ValidatorId = TestValidatorId;
type Signature (line 45) | type Signature = [u8; 32];
type AggregateSignature (line 46) | type AggregateSignature = Vec<[u8; 32]>;
type Signer (line 47) | type Signer = TestSigner;
method verify (line 50) | fn verify(&self, validator: u16, msg: &[u8], sig: &[u8; 32]) -> bool {
method aggregate (line 54) | fn aggregate(
method verify_aggregate (line 64) | fn verify_aggregate(
type TestWeights (line 78) | struct TestWeights;
type ValidatorId (line 80) | type ValidatorId = TestValidatorId;
method total_weight (line 82) | fn total_weight(&self) -> u64 {
method weight (line 85) | fn weight(&self, id: TestValidatorId) -> u64 {
method proposer (line 89) | fn proposer(&self, number: BlockNumber, round: RoundNumber) -> TestValid...
type TestBlock (line 95) | struct TestBlock {
type Id (line 101) | type Id = TestBlockId;
method id (line 103) | fn id(&self) -> TestBlockId {
type TestNetwork (line 109) | struct TestNetwork(
method new (line 165) | async fn new(
type Db (line 116) | type Db = MemDb;
type ValidatorId (line 118) | type ValidatorId = TestValidatorId;
type SignatureScheme (line 119) | type SignatureScheme = TestSignatureScheme;
type Weights (line 120) | type Weights = TestWeights;
type Block (line 121) | type Block = TestBlock;
constant BLOCK_PROCESSING_TIME (line 123) | const BLOCK_PROCESSING_TIME: u32 = 2000;
constant LATENCY_TIME (line 124) | const LATENCY_TIME: u32 = 1000;
method signer (line 126) | fn signer(&self) -> TestSigner {
method signature_scheme (line 130) | fn signature_scheme(&self) -> TestSignatureScheme {
method weights (line 134) | fn weights(&self) -> TestWeights {
method broadcast (line 138) | async fn broadcast(&mut self, msg: SignedMessageFor<Self>) {
method slash (line 144) | async fn slash(&mut self, id: TestValidatorId, event: SlashEvent) {
method validate (line 148) | async fn validate(&self, block: &TestBlock) -> Result<(), BlockError> {
method add_block (line 152) | async fn add_block(
function test_machine (line 194) | async fn test_machine() {
function test_machine_with_historic_start_time (line 200) | async fn test_machine_with_historic_start_time() {
FILE: crypto/ciphersuite/kp256/src/lib.rs
function test_oversize_dst (line 97) | fn test_oversize_dst<C: Ciphersuite>() {
type Secp256k1 (line 112) | pub struct Secp256k1;
function test_secp256k1 (line 115) | fn test_secp256k1() {
type P256 (line 147) | pub struct P256;
function test_p256 (line 150) | fn test_p256() {
FILE: crypto/ciphersuite/src/lib.rs
type Ciphersuite (line 30) | pub trait Ciphersuite:
constant ID (line 43) | const ID: &'static [u8];
method generator (line 47) | fn generator() -> Self::G;
method hash_to_F (line 58) | fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F;
method random_nonzero_F (line 62) | fn random_nonzero_F<R: RngCore + CryptoRng>(rng: &mut R) -> Self::F {
method read_F (line 74) | fn read_F<R: Read>(reader: &mut R) -> io::Result<Self::F> {
method read_G (line 91) | fn read_G<R: Read>(reader: &mut R) -> io::Result<Self::G> {
FILE: crypto/dalek-ff-group/src/ciphersuite.rs
type Ristretto (line 44) | pub struct Ristretto;
function test_ristretto (line 47) | fn test_ristretto() {
type Ed25519 (line 72) | pub struct Ed25519;
function test_ed25519 (line 75) | fn test_ed25519() {
FILE: crypto/dalek-ff-group/src/field.rs
constant MODULUS (line 26) | const MODULUS: U256 = U256::from_u8(1).shl_vartime(255).saturating_sub(&...
constant WIDE_MODULUS (line 27) | const WIDE_MODULUS: U512 = U256::ZERO.concat(&MODULUS);
type ResidueType (line 35) | type ResidueType = Residue<FieldModulus, { FieldModulus::LIMBS }>;
type FieldElement (line 40) | pub struct FieldElement(ResidueType);
method from_u256 (line 223) | pub const fn from_u256(u256: &U256) -> Self {
method wide_reduce (line 230) | pub fn wide_reduce(value: [u8; 64]) -> Self {
method pow (line 235) | pub fn pow(&self, other: FieldElement) -> FieldElement {
method sqrt_ratio_i (line 278) | pub fn sqrt_ratio_i(u: FieldElement, v: FieldElement) -> (Choice, Fiel...
method from_uniform_bytes (line 310) | fn from_uniform_bytes(bytes: &[u8; 64]) -> Self {
method sum (line 316) | fn sum<I: Iterator<Item = FieldElement>>(iter: I) -> FieldElement {
method sum (line 326) | fn sum<I: Iterator<Item = &'a FieldElement>>(iter: I) -> FieldElement {
method product (line 332) | fn product<I: Iterator<Item = FieldElement>>(iter: I) -> FieldElement {
method product (line 342) | fn product<I: Iterator<Item = &'a FieldElement>>(iter: I) -> FieldElem...
constant SQRT_M1 (line 45) | const SQRT_M1: FieldElement = FieldElement(
constant MOD_3_8 (line 51) | const MOD_3_8: FieldElement = FieldElement(ResidueType::new(
constant MOD_5_8 (line 56) | const MOD_5_8: FieldElement = FieldElement(ResidueType::sub(&MOD_3_8.0, ...
function reduce (line 58) | fn reduce(x: U512) -> ResidueType {
type Output (line 90) | type Output = Self;
method neg (line 91) | fn neg(self) -> Self::Output {
type Output (line 97) | type Output = FieldElement;
method neg (line 98) | fn neg(self) -> Self::Output {
constant ZERO (line 104) | const ZERO: Self = Self(ResidueType::ZERO);
constant ONE (line 105) | const ONE: Self = Self(ResidueType::ONE);
method random (line 107) | fn random(mut rng: impl RngCore) -> Self {
method square (line 113) | fn square(&self) -> Self {
method double (line 116) | fn double(&self) -> Self {
method invert (line 120) | fn invert(&self) -> CtOption<Self> {
method sqrt (line 127) | fn sqrt(&self) -> CtOption<Self> {
method sqrt_ratio (line 134) | fn sqrt_ratio(u: &FieldElement, v: &FieldElement) -> (Choice, FieldEleme...
type Repr (line 159) | type Repr = [u8; 32];
constant MODULUS (line 162) | const MODULUS: &'static str = "7ffffffffffffffffffffffffffffffffffffffff...
constant NUM_BITS (line 164) | const NUM_BITS: u32 = 255;
constant CAPACITY (line 165) | const CAPACITY: u32 = 254;
constant TWO_INV (line 167) | const TWO_INV: Self = FieldElement(ResidueType::new(&U256::from_u8(2)).i...
constant MULTIPLICATIVE_GENERATOR (line 171) | const MULTIPLICATIVE_GENERATOR: Self = Self(ResidueType::new(&U256::from...
constant S (line 174) | const S: u32 = 2;
constant ROOT_OF_UNITY (line 178) | const ROOT_OF_UNITY: Self = FieldElement(ResidueType::new(&U256::from_be...
constant ROOT_OF_UNITY_INV (line 182) | const ROOT_OF_UNITY_INV: Self = FieldElement(Self::ROOT_OF_UNITY.0.inver...
constant DELTA (line 186) | const DELTA: Self = FieldElement(ResidueType::new(&U256::from_be_hex(
method from_repr (line 190) | fn from_repr(bytes: [u8; 32]) -> CtOption<Self> {
method to_repr (line 194) | fn to_repr(&self) -> [u8; 32] {
method is_odd (line 198) | fn is_odd(&self) -> Choice {
method from_u128 (line 202) | fn from_u128(num: u128) -> Self {
type ReprBits (line 208) | type ReprBits = [u8; 32];
method to_le_bits (line 210) | fn to_le_bits(&self) -> FieldBits<Self::ReprBits> {
method char_le_bits (line 214) | fn char_le_bits() -> FieldBits<Self::ReprBits> {
function test_wide_modulus (line 348) | fn test_wide_modulus() {
function test_sqrt_m1 (line 355) | fn test_sqrt_m1() {
function test_field (line 372) | fn test_field() {
FILE: crypto/dalek-ff-group/src/lib.rs
function black_box (line 47) | pub(crate) fn black_box<T>(val: T) -> T {
function black_box (line 54) | pub(crate) fn black_box<T>(val: T) -> T {
function u8_from_bool (line 60) | fn u8_from_bool(bit_ref: &mut bool) -> u8 {
function choice (line 74) | fn choice(mut value: bool) -> Choice {
type Scalar (line 179) | pub struct Scalar(pub DScalar);
method pow (line 201) | pub fn pow(&self, other: Scalar) -> Scalar {
method from_bytes_mod_order_wide (line 239) | pub fn from_bytes_mod_order_wide(bytes: &[u8; 64]) -> Scalar {
method from_hash (line 244) | pub fn from_hash<D: Digest<OutputSize = U64> + HashMarker>(hash: D) ->...
method from_uniform_bytes (line 329) | fn from_uniform_bytes(bytes: &[u8; 64]) -> Self {
method sum (line 335) | fn sum<I: Iterator<Item = Scalar>>(iter: I) -> Scalar {
method sum (line 341) | fn sum<I: Iterator<Item = &'a Scalar>>(iter: I) -> Scalar {
method product (line 347) | fn product<I: Iterator<Item = Scalar>>(iter: I) -> Scalar {
method product (line 353) | fn product<I: Iterator<Item = &'a Scalar>>(iter: I) -> Scalar {
constant ZERO (line 254) | const ZERO: Scalar = Scalar(DScalar::ZERO);
constant ONE (line 255) | const ONE: Scalar = Scalar(DScalar::ONE);
method random (line 257) | fn random(rng: impl RngCore) -> Self {
method square (line 261) | fn square(&self) -> Self {
method double (line 264) | fn double(&self) -> Self {
method invert (line 267) | fn invert(&self) -> CtOption<Self> {
method sqrt (line 271) | fn sqrt(&self) -> CtOption<Self> {
method sqrt_ratio (line 275) | fn sqrt_ratio(num: &Self, div: &Self) -> (Choice, Self) {
type Repr (line 282) | type Repr = [u8; 32];
constant MODULUS (line 284) | const MODULUS: &'static str = <DScalar as PrimeField>::MODULUS;
constant NUM_BITS (line 286) | const NUM_BITS: u32 = <DScalar as PrimeField>::NUM_BITS;
constant CAPACITY (line 287) | const CAPACITY: u32 = <DScalar as PrimeField>::CAPACITY;
constant TWO_INV (line 289) | const TWO_INV: Scalar = Scalar(<DScalar as PrimeField>::TWO_INV);
constant MULTIPLICATIVE_GENERATOR (line 291) | const MULTIPLICATIVE_GENERATOR: Scalar =
constant S (line 293) | const S: u32 = <DScalar as PrimeField>::S;
constant ROOT_OF_UNITY (line 295) | const ROOT_OF_UNITY: Scalar = Scalar(<DScalar as PrimeField>::ROOT_OF_UN...
constant ROOT_OF_UNITY_INV (line 296) | const ROOT_OF_UNITY_INV: Scalar = Scalar(<DScalar as PrimeField>::ROOT_O...
constant DELTA (line 298) | const DELTA: Scalar = Scalar(<DScalar as PrimeField>::DELTA);
method from_repr (line 300) | fn from_repr(bytes: [u8; 32]) -> CtOption<Self> {
method to_repr (line 303) | fn to_repr(&self) -> [u8; 32] {
method is_odd (line 307) | fn is_odd(&self) -> Choice {
method from_u128 (line 311) | fn from_u128(num: u128) -> Self {
type ReprBits (line 317) | type ReprBits = [u8; 32];
method to_le_bits (line 319) | fn to_le_bits(&self) -> FieldBits<Self::ReprBits> {
method char_le_bits (line 323) | fn char_le_bits() -> FieldBits<Self::ReprBits> {
method mul_by_cofactor (line 481) | pub fn mul_by_cofactor(&self) -> EdwardsPoint {
function test_ed25519_group (line 497) | fn test_ed25519_group() {
function test_ristretto_group (line 502) | fn test_ristretto_group() {
FILE: crypto/dkg/dealer/src/lib.rs
function key_gen (line 18) | pub fn key_gen<R: RngCore + CryptoRng, C: Ciphersuite>(
FILE: crypto/dkg/musig/src/lib.rs
type MusigError (line 23) | pub enum MusigError<C: Ciphersuite> {
function check_keys (line 46) | fn check_keys<C: Ciphersuite>(keys: &[C::G]) -> Result<u16, MusigError<C...
function binding_factor_transcript (line 65) | fn binding_factor_transcript<C: Ciphersuite>(
function binding_factor (line 81) | fn binding_factor<C: Ciphersuite>(mut transcript: Vec<u8>, i: u16) -> C:...
function musig_key_multiexp (line 87) | fn musig_key_multiexp<C: Ciphersuite>(
function musig_key_vartime (line 103) | pub fn musig_key_vartime<C: Ciphersuite>(
function musig_key (line 111) | pub fn musig_key<C: Ciphersuite>(context: [u8; 32], keys: &[C::G]) -> Re...
function musig (line 116) | pub fn musig<C: Ciphersuite>(
FILE: crypto/dkg/musig/src/tests.rs
function test_musig (line 14) | pub fn test_musig() {
FILE: crypto/dkg/pedpop/src/encryption.rs
type ReadWrite (line 29) | pub trait ReadWrite: Sized {
method read (line 30) | fn read<R: io::Read>(reader: &mut R, params: ThresholdParams) -> io::R...
method write (line 31) | fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()>;
method serialize (line 33) | fn serialize(&self) -> Vec<u8> {
type Message (line 40) | pub trait Message: Clone + PartialEq + Eq + fmt::Debug + Zeroize + ReadW...
type Encryptable (line 43) | pub trait Encryptable: Clone + AsRef<[u8]> + AsMut<[u8]> + Zeroize + Rea...
type EncryptionKeyMessage (line 50) | pub struct EncryptionKeyMessage<C: Ciphersuite, M: Message> {
function read (line 57) | pub fn read<R: io::Read>(reader: &mut R, params: ThresholdParams) -> io:...
function write (line 61) | pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
function serialize (line 66) | pub fn serialize(&self) -> Vec<u8> {
function enc_key (line 73) | pub(crate) fn enc_key(&self) -> C::G {
type EncryptedMessage (line 81) | pub struct EncryptedMessage<C: Ciphersuite, E: Encryptable> {
function ecdh (line 95) | fn ecdh<C: Ciphersuite>(private: &Zeroizing<C::F>, public: C::G) -> Zero...
function cipher (line 101) | fn cipher<C: Ciphersuite>(context: [u8; 32], ecdh: &Zeroizing<C::G>) -> ...
function encrypt (line 135) | fn encrypt<R: RngCore + CryptoRng, C: Ciphersuite, E: Encryptable>(
function read (line 171) | pub fn read<R: io::Read>(reader: &mut R, params: ThresholdParams) -> io:...
function write (line 179) | pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
function serialize (line 185) | pub fn serialize(&self) -> Vec<u8> {
function invalidate_pop (line 192) | pub(crate) fn invalidate_pop(&mut self) {
function invalidate_msg (line 197) | pub(crate) fn invalidate_msg<R: RngCore + CryptoRng>(
function invalidate_share_serialization (line 219) | pub(crate) fn invalidate_share_serialization<R: RngCore + CryptoRng>(
function invalidate_share_value (line 243) | pub(crate) fn invalidate_share_value<R: RngCore + CryptoRng>(
type EncryptionKeyProof (line 261) | pub struct EncryptionKeyProof<C: Ciphersuite> {
function read (line 267) | pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
function write (line 271) | pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
function serialize (line 276) | pub fn serialize(&self) -> Vec<u8> {
function invalidate_key (line 283) | pub(crate) fn invalidate_key(&mut self) {
function invalidate_dleq (line 288) | pub(crate) fn invalidate_dleq(&mut self) {
function pop_challenge (line 302) | fn pop_challenge<C: Ciphersuite>(
function encryption_key_transcript (line 326) | fn encryption_key_transcript(context: [u8; 32]) -> RecommendedTranscript {
type DecryptionError (line 333) | pub(crate) enum DecryptionError {
type Decryption (line 342) | pub(crate) struct Decryption<C: Ciphersuite> {
function new (line 348) | pub(crate) fn new(context: [u8; 32]) -> Self {
function register (line 351) | pub(crate) fn register<M: Message>(
function decrypt_with_proof (line 366) | pub(crate) fn decrypt_with_proof<E: Encryptable>(
type Encryption (line 402) | pub(crate) struct Encryption<C: Ciphersuite> {
function fmt (line 411) | fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
method zeroize (line 423) | fn zeroize(&mut self) {
function new (line 433) | pub(crate) fn new<R: RngCore + CryptoRng>(
function registration (line 448) | pub(crate) fn registration<M: Message>(&self, msg: M) -> EncryptionKeyMe...
function register (line 452) | pub(crate) fn register<M: Message>(
function encrypt (line 460) | pub(crate) fn encrypt<R: RngCore + CryptoRng, E: Encryptable>(
function decrypt (line 469) | pub(crate) fn decrypt<R: RngCore + CryptoRng, I: Copy + Zeroize, E: Encr...
function into_decryption (line 503) | pub(crate) fn into_decryption(self) -> Decryption<C> {
FILE: crypto/dkg/pedpop/src/lib.rs
type PedPoPError (line 38) | pub enum PedPoPError<C: Ciphersuite> {
function validate_map (line 57) | fn validate_map<T, C: Ciphersuite>(
function challenge (line 86) | fn challenge<C: Ciphersuite>(context: [u8; 32], l: Participant, R: &[u8]...
type Commitments (line 103) | pub struct Commitments<C: Ciphersuite> {
method read (line 110) | fn read<R: Read>(reader: &mut R, params: ThresholdParams) -> io::Result<...
method write (line 130) | fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
type KeyGenMachine (line 138) | pub struct KeyGenMachine<C: Ciphersuite> {
function new (line 148) | pub fn new(params: ThresholdParams, context: [u8; 32]) -> KeyGenMachine<...
function generate_coefficients (line 156) | pub fn generate_coefficients<R: RngCore + CryptoRng>(
function polynomial (line 205) | fn polynomial<F: PrimeField + Zeroize>(
type SecretShare (line 231) | pub struct SecretShare<F: PrimeField>(F::Repr);
function as_ref (line 233) | fn as_ref(&self) -> &[u8] {
function as_mut (line 238) | fn as_mut(&mut self) -> &mut [u8] {
function fmt (line 243) | fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
method zeroize (line 248) | fn zeroize(&mut self) {
method drop (line 257) | fn drop(&mut self) {
method read (line 264) | fn read<R: Read>(reader: &mut R, _: ThresholdParams) -> io::Result<Self> {
method write (line 270) | fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
type SecretShareMachine (line 277) | pub struct SecretShareMachine<C: Ciphersuite> {
function fmt (line 286) | fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
function verify_r1 (line 300) | fn verify_r1<R: RngCore + CryptoRng>(
function generate_secret_shares (line 347) | pub fn generate_secret_shares<R: RngCore + CryptoRng>(
type KeyMachine (line 388) | pub struct KeyMachine<C: Ciphersuite> {
function fmt (line 396) | fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
method zeroize (line 407) | fn zeroize(&mut self) {
function exponential (line 420) | fn exponential<C: Ciphersuite>(i: Participant, values: &[C::G]) -> Vec<(...
function share_verification_statements (line 430) | fn share_verification_statements<C: Ciphersuite>(
type BatchId (line 452) | enum BatchId {
function calculate_share (line 463) | pub fn calculate_share<R: RngCore + CryptoRng>(
type BlameMachine (line 536) | pub struct BlameMachine<C: Ciphersuite> {
function fmt (line 543) | fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
method zeroize (line 553) | fn zeroize(&mut self) {
function complete (line 571) | pub fn complete(self) -> ThresholdKeys<C> {
function blame_internal (line 575) | fn blame_internal(
function blame (line 623) | pub fn blame(
type AdditionalBlameMachine (line 637) | pub struct AdditionalBlameMachine<C: Ciphersuite>(BlameMachine<C>);
function new (line 649) | pub fn new(
function blame (line 674) | pub fn blame(
FILE: crypto/dkg/pedpop/src/tests.rs
constant THRESHOLD (line 10) | const THRESHOLD: u16 = 3;
constant PARTICIPANTS (line 11) | const PARTICIPANTS: u16 = 5;
function clone_without (line 14) | fn clone_without<K: Clone + core::cmp::Eq + core::hash::Hash, V: Clone>(
type PedPoPEncryptedMessage (line 23) | type PedPoPEncryptedMessage<C> = EncryptedMessage<C, SecretShare<<C as C...
type PedPoPSecretShares (line 24) | type PedPoPSecretShares<C> = HashMap<Participant, PedPoPEncryptedMessage...
constant CONTEXT (line 26) | const CONTEXT: [u8; 32] = *b"DKG Test Key Generation ";
function commit_enc_keys_and_shares (line 30) | fn commit_enc_keys_and_shares<R: RngCore + CryptoRng, C: Ciphersuite>(
function generate_secret_shares (line 83) | fn generate_secret_shares<C: Ciphersuite>(
function pedpop_gen (line 98) | fn pedpop_gen<R: RngCore + CryptoRng, C: Ciphersuite>(
constant ONE (line 141) | const ONE: Participant = Participant::new(1).unwrap();
constant TWO (line 142) | const TWO: Participant = Participant::new(2).unwrap();
function test_pedpop (line 145) | fn test_pedpop() {
function test_blame (line 149) | fn test_blame(
function invalid_encryption_pop_blame (line 176) | fn invalid_encryption_pop_blame() {
function invalid_ecdh_blame (line 208) | fn invalid_ecdh_blame() {
function invalid_dleq_blame (line 249) | fn invalid_dleq_blame() {
function invalid_share_serialization_blame (line 283) | fn invalid_share_serialization_blame() {
function invalid_share_value_blame (line 316) | fn invalid_share_value_blame() {
FILE: crypto/dkg/promote/src/lib.rs
type PromotionError (line 26) | pub enum PromotionError {
function transcript (line 52) | fn transcript<G: GroupEncoding>(key: &G, i: Participant) -> RecommendedT...
type GeneratorProof (line 61) | pub struct GeneratorProof<C: Ciphersuite> {
function write (line 67) | pub fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
function read (line 72) | pub fn read<R: Read>(reader: &mut R) -> io::Result<GeneratorProof<C>> {
function serialize (line 79) | pub fn serialize(&self) -> Vec<u8> {
type GeneratorPromotion (line 91) | pub struct GeneratorPromotion<C1: Ciphersuite, C2: Ciphersuite> {
function promote (line 101) | pub fn promote<R: RngCore + CryptoRng>(
function complete (line 120) | pub fn complete(
FILE: crypto/dkg/promote/src/tests.rs
type AltGenerator (line 18) | struct AltGenerator<C: Ciphersuite> {
type F (line 23) | type F = C::F;
type G (line 24) | type G = C::G;
type H (line 25) | type H = C::H;
constant ID (line 27) | const ID: &'static [u8] = b"Alternate Ciphersuite";
method generator (line 29) | fn generator() -> Self::G {
method hash_to_F (line 33) | fn hash_to_F(dst: &[u8], data: &[u8]) -> Self::F {
function clone_without (line 39) | pub fn clone_without<K: Clone + core::cmp::Eq + core::hash::Hash, V: Clo...
function test_generator_promotion (line 50) | fn test_generator_promotion() {
FILE: crypto/dkg/recovery/src/lib.rs
type RecoveryError (line 18) | pub enum RecoveryError {
function recover_key (line 37) | pub fn recover_key<C: Ciphersuite>(
FILE: crypto/dkg/src/lib.rs
type Participant (line 26) | pub struct Participant(u16);
method new (line 29) | pub const fn new(i: u16) -> Option<Participant> {
method to_bytes (line 39) | pub const fn to_bytes(&self) -> [u8; 2] {
method fmt (line 51) | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
method deserialize_reader (line 122) | fn deserialize_reader<R: io::Read>(reader: &mut R) -> io::Result<Self> {
function from (line 45) | fn from(participant: Participant) -> u16 {
type DkgError (line 58) | pub enum DkgError {
type ThresholdParams (line 131) | pub struct ThresholdParams {
method new (line 166) | pub const fn new(t: u16, n: u16, i: Participant) -> Result<ThresholdPa...
method t (line 182) | pub const fn t(&self) -> u16 {
method n (line 186) | pub const fn n(&self) -> u16 {
method i (line 190) | pub const fn i(&self) -> Participant {
method all_participant_indexes (line 195) | pub fn all_participant_indexes(&self) -> impl Iterator<Item = Particip...
method deserialize_reader (line 202) | fn deserialize_reader<R: io::Read>(reader: &mut R) -> io::Result<Self> {
type AllParticipantIndexes (line 141) | struct AllParticipantIndexes {
type Item (line 146) | type Item = Participant;
method next (line 147) | fn next(&mut self) -> Option<Participant> {
type Interpolation (line 212) | pub enum Interpolation<F: Zeroize + PrimeField> {
function interpolation_factor (line 226) | fn interpolation_factor(&self, i: Participant, included: &[Participant])...
type ThresholdCore (line 258) | struct ThresholdCore<C: Ciphersuite> {
function fmt (line 267) | fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
method zeroize (line 279) | fn zeroize(&mut self) {
type ThresholdKeys (line 292) | pub struct ThresholdKeys<C: Ciphersuite> {
type ThresholdView (line 305) | pub struct ThresholdView<C: Ciphersuite> {
function fmt (line 317) | fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
method zeroize (line 332) | fn zeroize(&mut self) {
function new (line 349) | pub fn new(
function scale (line 400) | pub fn scale(mut self, scalar: C::F) -> Option<ThresholdKeys<C>> {
function offset (line 414) | pub fn offset(mut self, offset: C::F) -> ThresholdKeys<C> {
function current_scalar (line 420) | pub fn current_scalar(&self) -> C::F {
function current_offset (line 425) | pub fn current_offset(&self) -> C::F {
function params (line 430) | pub fn params(&self) -> ThresholdParams {
function original_group_key (line 435) | pub fn original_group_key(&self) -> C::G {
function interpolation (line 440) | pub fn interpolation(&self) -> &Interpolation<C::F> {
function group_key (line 445) | pub fn group_key(&self) -> C::G {
function original_secret_share (line 450) | pub fn original_secret_share(&self) -> &Zeroizing<C::F> {
function original_verification_share (line 457) | pub fn original_verification_share(&self, l: Participant) -> C::G {
function view (line 463) | pub fn view(&self, mut included: Vec<Participant>) -> Result<ThresholdVi...
function write (line 538) | pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
function serialize (line 567) | pub fn serialize(&self) -> Zeroizing<Vec<u8>> {
function read (line 574) | pub fn read<R: io::Read>(reader: &mut R) -> io::Result<ThresholdKeys<C>> {
function scalar (line 637) | pub fn scalar(&self) -> C::F {
function offset (line 642) | pub fn offset(&self) -> C::F {
function group_key (line 647) | pub fn group_key(&self) -> C::G {
function included (line 652) | pub fn included(&self) -> &[Participant] {
function interpolation_factor (line 657) | pub fn interpolation_factor(&self, participant: Participant) -> Option<C...
function secret_share (line 665) | pub fn secret_share(&self) -> &Zeroizing<C::F> {
function original_verification_share (line 672) | pub fn original_verification_share(&self, l: Participant) -> C::G {
function verification_share (line 680) | pub fn verification_share(&self, l: Participant) -> C::G {
FILE: crypto/dleq/src/cross_group/aos.rs
type Re (line 28) | pub(crate) enum Re<G0: PrimeGroup, G1: PrimeGroup> {
function R_default (line 40) | pub(crate) fn R_default() -> Re<G0, G1> {
function e_default (line 44) | pub(crate) fn e_default() -> Re<G0, G1> {
type Aos (line 51) | pub(crate) struct Aos<G0: PrimeGroup + Zeroize, G1: PrimeGroup + Zeroize...
function nonces (line 63) | fn nonces<T: Transcript>(mut transcript: T, nonces: (G0, G1)) -> (G0::Sc...
function R (line 72) | fn R(
function R_batch (line 82) | fn R_batch(
function R_nonces (line 92) | fn R_nonces<T: Transcript>(
function prove (line 103) | pub(crate) fn prove<R: RngCore + CryptoRng, T: Clone + Transcript>(
function verify (line 158) | pub(crate) fn verify<R: RngCore + CryptoRng, T: Clone + Transcript>(
function write (line 212) | pub(crate) fn write<W: Write>(&self, w: &mut W) -> std::io::Result<()> {
function read (line 232) | pub(crate) fn read<R: Read>(r: &mut R, mut Re_0: Re<G0, G1>) -> std::io:...
FILE: crypto/dleq/src/cross_group/bits.rs
type BitSignature (line 21) | pub(crate) enum BitSignature {
method to_u8 (line 29) | pub(crate) const fn to_u8(&self) -> u8 {
method from (line 38) | pub(crate) const fn from(algorithm: u8) -> BitSignature {
method bits (line 48) | pub(crate) const fn bits(&self) -> u8 {
method ring_len (line 55) | pub(crate) const fn ring_len(&self) -> usize {
method aos_form (line 59) | fn aos_form<G0: PrimeGroup, G1: PrimeGroup>(&self) -> Re<G0, G1> {
type Bits (line 68) | pub(crate) struct Bits<
function transcript (line 85) | fn transcript<T: Transcript>(transcript: &mut T, i: usize, commitments: ...
function ring (line 92) | fn ring(pow_2: (G0, G1), commitments: (G0, G1)) -> Vec<(G0, G1)> {
function shift (line 100) | fn shift(pow_2: &mut (G0, G1)) {
function prove (line 107) | pub(crate) fn prove<R: RngCore + CryptoRng, T: Clone + Transcript>(
function verify (line 138) | pub(crate) fn verify<R: RngCore + CryptoRng, T: Clone + Transcript>(
function write (line 162) | pub(crate) fn write<W: Write>(&self, w: &mut W) -> std::io::Result<()> {
function read (line 169) | pub(crate) fn read<R: Read>(r: &mut R) -> std::io::Result<Self> {
FILE: crypto/dleq/src/cross_group/mod.rs
function black_box (line 37) | fn black_box<T>(val: T) -> T {
function u8_from_bool (line 41) | fn u8_from_bool(bit_ref: &mut bool) -> u8 {
function read_point (line 55) | pub(crate) fn read_point<R: Read, G: PrimeGroup>(r: &mut R) -> io::Resul...
type Generators (line 69) | pub struct Generators<G: PrimeGroup> {
function new (line 80) | pub fn new(primary: G, alt: G) -> Option<Generators<G>> {
function transcript (line 87) | fn transcript<T: Transcript>(&self, transcript: &mut T) {
type DLEqError (line 96) | pub enum DLEqError {
type __DLEqProof (line 114) | pub struct __DLEqProof<
function transcript (line 207) | pub(crate) fn transcript<T: Transcript>(
function blinding_key (line 220) | pub(crate) fn blinding_key<R: RngCore + CryptoRng, F: PrimeField>(
function reconstruct_keys (line 230) | fn reconstruct_keys(&self) -> (G0, G1) {
function prove_internal (line 245) | fn prove_internal<R: RngCore + CryptoRng, T: Clone + Transcript>(
function prove (line 347) | pub fn prove<R: RngCore + CryptoRng, T: Clone + Transcript, D: Digest + ...
function prove_without_bias (line 371) | pub fn prove_without_bias<R: RngCore + CryptoRng, T: Clone + Transcript>(
function verify (line 382) | pub fn verify<R: RngCore + CryptoRng, T: Clone + Transcript>(
function write (line 430) | pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
function read (line 443) | pub fn read<R: Read>(r: &mut R) -> io::Result<Self> {
FILE: crypto/dleq/src/cross_group/scalar.rs
function scalar_normalize (line 10) | pub fn scalar_normalize<F0: PrimeFieldBits + Zeroize, F1: PrimeFieldBits>(
function scalar_convert (line 52) | pub fn scalar_convert<F0: PrimeFieldBits + Zeroize, F1: PrimeFieldBits>(
function mutual_scalar_from_bytes (line 63) | pub fn mutual_scalar_from_bytes<F0: PrimeFieldBits + Zeroize, F1: PrimeF...
FILE: crypto/dleq/src/cross_group/schnorr.rs
type SchnorrPoK (line 26) | pub(crate) struct SchnorrPoK<G: PrimeGroup + Zeroize> {
function hra (line 34) | fn hra<T: Transcript>(transcript: &mut T, generator: G, R: G, A: G) -> G...
function prove (line 42) | pub(crate) fn prove<R: RngCore + CryptoRng, T: Transcript>(
function verify (line 59) | pub(crate) fn verify<R: RngCore + CryptoRng, T: Transcript>(
function write (line 79) | pub fn write<W: Write>(&self, w: &mut W) -> std::io::Result<()> {
function read (line 85) | pub fn read<R: Read>(r: &mut R) -> std::io::Result<SchnorrPoK<G>> {
FILE: crypto/dleq/src/lib.rs
function challenge (line 28) | pub(crate) fn challenge<T: Transcript, F: PrimeField>(transcript: &mut T...
function read_scalar (line 89) | fn read_scalar<R: Read, F: PrimeField>(r: &mut R) -> io::Result<F> {
type DLEqError (line 101) | pub enum DLEqError {
type DLEqProof (line 108) | pub struct DLEqProof<G: PrimeGroup<Scalar: Zeroize>> {
function transcript (line 115) | fn transcript<T: Transcript>(transcript: &mut T, generator: G, nonce: G,...
function prove (line 123) | pub fn prove<R: RngCore + CryptoRng, T: Transcript>(
function verify_statement (line 146) | fn verify_statement<T: Transcript>(
function verify (line 160) | pub fn verify<T: Transcript>(
function write (line 184) | pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
function read (line 191) | pub fn read<R: Read>(r: &mut R) -> io::Result<DLEqProof<G>> {
function serialize (line 197) | pub fn serialize(&self) -> Vec<u8> {
type MultiDLEqProof (line 210) | pub struct MultiDLEqProof<G: PrimeGroup<Scalar: Zeroize>> {
function prove (line 221) | pub fn prove<R: RngCore + CryptoRng, T: Transcript>(
function verify (line 264) | pub fn verify<T: Transcript>(
function write (line 298) | pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
function read (line 308) | pub fn read<R: Read>(r: &mut R, discrete_logs: usize) -> io::Result<Mult...
function serialize (line 319) | pub fn serialize(&self) -> Vec<u8> {
FILE: crypto/dleq/src/tests/cross_group/aos.rs
function test_aos_serialization (line 14) | fn test_aos_serialization<const RING_LEN: usize>(proof: &Aos<G0, G1, RIN...
function test_aos (line 21) | fn test_aos<const RING_LEN: usize>(default: &Re<G0, G1>) {
function test_aos_e (line 57) | fn test_aos_e() {
function test_aos_R (line 64) | fn test_aos_R() {
FILE: crypto/dleq/src/tests/cross_group/mod.rs
type G0 (line 28) | type G0 = ProjectivePoint;
type G1 (line 29) | type G1 = EdwardsPoint;
function transcript (line 31) | pub(crate) fn transcript() -> RecommendedTranscript {
function generators (line 35) | pub(crate) fn generators() -> (Generators<G0>, Generators<G1>) {
function test_rejection_sampling (line 156) | fn test_rejection_sampling() {
function test_remainder (line 175) | fn test_remainder() {
FILE: crypto/dleq/src/tests/cross_group/scalar.rs
function test_scalar (line 11) | fn test_scalar() {
FILE: crypto/dleq/src/tests/cross_group/schnorr.rs
function test_schnorr (line 17) | fn test_schnorr<G: PrimeGroup<Scalar: PrimeFieldBits + Zeroize> + Zeroiz...
function test_secp256k1 (line 36) | fn test_secp256k1() {
function test_ed25519 (line 41) | fn test_ed25519() {
FILE: crypto/dleq/src/tests/mod.rs
function generators (line 21) | fn generators() -> [k256::ProjectivePoint; 5] {
function test_dleq (line 45) | fn test_dleq() {
function test_multi_dleq (line 105) | fn test_multi_dleq() {
FILE: crypto/ed448/src/backend.rs
function black_box (line 6) | pub(crate) fn black_box<T>(val: T) -> T {
function black_box (line 13) | pub(crate) fn black_box<T>(val: T) -> T {
function u8_from_bool (line 19) | pub(crate) fn u8_from_bool(bit_ref: &mut bool) -> u8 {
FILE: crypto/ed448/src/ciphersuite.rs
type Shake256_114 (line 18) | pub struct Shake256_114(Shake256);
type BlockSize (line 20) | type BlockSize = <Shake256 as BlockSizeUser>::BlockSize;
method block_size (line 21) | fn block_size() -> usize {
type OutputSize (line 26) | type OutputSize = U114;
method output_size (line 27) | fn output_size() -> usize {
method update (line 32) | fn update(&mut self, data: &[u8]) {
method chain (line 35) | fn chain(mut self, data: impl AsRef<[u8]>) -> Self {
method finalize_fixed (line 41) | fn finalize_fixed(self) -> Output<Self> {
method finalize_into (line 46) | fn finalize_into(self, out: &mut Output<Self>) {
type Ed448 (line 59) | pub struct Ed448;
type F (line 61) | type F = Scalar;
type G (line 62) | type G = Point;
type H (line 63) | type H = Shake256_114;
constant ID (line 65) | const ID: &'static [u8] = b"ed448";
method generator (line 67) | fn generator() -> Self::G {
method hash_to_F (line 71) | fn hash_to_F(dst: &[u8], data: &[u8]) -> Self::F {
function test_ed448 (line 77) | fn test_ed448() {
FILE: crypto/ed448/src/field.rs
constant MODULUS_STR (line 8) | const MODULUS_STR: &str = concat!(
type ResidueType (line 14) | pub(crate) type ResidueType = Residue<FieldModulus, { FieldModulus::LIMB...
type FieldElement (line 18) | pub struct FieldElement(pub(crate) ResidueType);
constant MODULUS (line 23) | pub(crate) const MODULUS: U448 = U448::from_be_hex(MODULUS_STR);
constant WIDE_MODULUS (line 25) | const WIDE_MODULUS: U896 = U896::from_be_hex(concat!(
constant Q_4 (line 32) | pub(crate) const Q_4: FieldElement = FieldElement(ResidueType::new(
function test_field (line 51) | fn test_field() {
FILE: crypto/ed448/src/point.rs
constant D (line 25) | const D: FieldElement =
constant G_Y (line 28) | const G_Y: FieldElement = FieldElement(Residue::new(&U448::from_be_hex(c...
constant G_X (line 33) | const G_X: FieldElement = FieldElement(Residue::new(&U448::from_be_hex(c...
function recover_x (line 38) | fn recover_x(y: FieldElement) -> CtOption<FieldElement> {
type Point (line 54) | pub struct Point {
type Output (line 135) | type Output = Point;
method add (line 136) | fn add(self, other: &Point) -> Point {
method add_assign (line 142) | fn add_assign(&mut self, other: &Point) {
type Output (line 169) | type Output = Point;
method sub (line 170) | fn sub(self, other: &Point) -> Point {
method sub_assign (line 176) | fn sub_assign(&mut self, other: &Point) {
method sum (line 218) | fn sum<I: Iterator<Item = Point>>(iter: I) -> Point {
method sum (line 228) | fn sum<I: Iterator<Item = &'a Point>>(iter: I) -> Point {
type Output (line 234) | type Output = Point;
method mul (line 235) | fn mul(self, mut other: Scalar) -> Point {
method mul_assign (line 276) | fn mul_assign(&mut self, other: Scalar) {
type Output (line 282) | type Output = Point;
method mul (line 283) | fn mul(self, other: &Scalar) -> Point {
method mul_assign (line 289) | fn mul_assign(&mut self, other: &Scalar) {
method is_torsion_free (line 295) | fn is_torsion_free(&self) -> Choice {
method zeroize (line 61) | fn zeroize(&mut self) {
constant G (line 72) | const G: Point = Point { x: G_X, y: G_Y, z: FieldElement::ONE };
method ct_eq (line 75) | fn ct_eq(&self, other: &Self) -> Choice {
method eq (line 87) | fn eq(&self, other: &Point) -> bool {
method conditional_select (line 95) | fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
type Output (line 105) | type Output = Point;
method add (line 106) | fn add(self, other: Self) -> Self {
method add_assign (line 129) | fn add_assign(&mut self, other: Point) {
type Output (line 148) | type Output = Point;
method neg (line 149) | fn neg(self) -> Self {
type Output (line 155) | type Output = Point;
method sub (line 157) | fn sub(self, other: Self) -> Self {
method sub_assign (line 163) | fn sub_assign(&mut self, other: Point) {
type Scalar (line 182) | type Scalar = Scalar;
method random (line 183) | fn random(mut rng: impl RngCore) -> Self {
method identity (line 194) | fn identity() -> Self {
method generator (line 197) | fn generator() -> Self {
method is_identity (line 200) | fn is_identity(&self) -> Choice {
method double (line 203) | fn double(&self) -> Self {
type Repr (line 301) | type Repr = <FieldElement as PrimeField>::Repr;
method from_bytes (line 303) | fn from_bytes(bytes: &Self::Repr) -> CtOption<Self> {
method from_bytes_unchecked (line 321) | fn from_bytes_unchecked(bytes: &Self::Repr) -> CtOption<Self> {
method to_bytes (line 325) | fn to_bytes(&self) -> Self::Repr {
function test_group (line 340) | fn test_group() {
function generator (line 345) | fn generator() {
function torsion (line 352) | fn torsion() {
function vector (line 371) | fn vector() {
function random (line 415) | fn random() {
FILE: crypto/ed448/src/scalar.rs
constant MODULUS_STR (line 8) | const MODULUS_STR: &str = concat!(
type ResidueType (line 14) | type ResidueType = Residue<ScalarModulus, { ScalarModulus::LIMBS }>;
type Scalar (line 18) | pub struct Scalar(pub(crate) ResidueType);
method wide_reduce (line 56) | pub fn wide_reduce(bytes: [u8; 114]) -> Scalar {
constant MODULUS (line 23) | pub(crate) const MODULUS: U448 = U448::from_be_hex(MODULUS_STR);
constant WIDE_MODULUS (line 25) | const WIDE_MODULUS: U896 = U896::from_be_hex(concat!(
constant WIDE_REDUCTION_MODULUS (line 32) | const WIDE_REDUCTION_MODULUS: NonZero<U1024> = NonZero::from_uint(U1024:...
function test_scalar (line 67) | fn test_scalar() {
FILE: crypto/ff-group-tests/src/field.rs
function test_eq (line 6) | pub fn test_eq<F: Field>() {
function test_conditional_select (line 21) | pub fn test_conditional_select<F: Field>() {
function test_add (line 29) | pub fn test_add<F: Field>() {
function test_sum (line 41) | pub fn test_sum<F: Field>() {
function test_sub (line 53) | pub fn test_sub<F: Field>() {
function test_neg (line 64) | pub fn test_neg<F: Field>() {
function test_mul (line 72) | pub fn test_mul<F: Field>() {
function test_product (line 81) | pub fn test_product<F: Field>() {
function test_square (line 95) | pub fn test_square<F: Field>() {
function test_invert (line 105) | pub fn test_invert<F: Field>() {
function test_sqrt (line 115) | pub fn test_sqrt<F: Field>() {
function test_is_zero (line 171) | pub fn test_is_zero<F: Field>() {
function test_cube (line 177) | pub fn test_cube<F: Field>() {
function test_random (line 185) | pub fn test_random<R: RngCore, F: Field>(rng: &mut R) {
function test_field (line 204) | pub fn test_field<R: RngCore, F: Field>(rng: &mut R) {
FILE: crypto/ff-group-tests/src/group.rs
function test_eq (line 11) | pub fn test_eq<G: Group>() {
function test_identity (line 18) | pub fn test_identity<G: Group>() {
function test_generator (line 32) | pub fn test_generator<G: Group>() {
function test_double (line 41) | pub fn test_double<G: Group>() {
function test_add (line 51) | pub fn test_add<G: Group>() {
function test_sum (line 67) | pub fn test_sum<G: Group>() {
function test_neg (line 81) | pub fn test_neg<G: Group>() {
function test_sub (line 91) | pub fn test_sub<G: Group>() {
function test_mul (line 98) | pub fn test_mul<G: Group>() {
function test_order (line 110) | pub fn test_order<G: Group>() {
function test_random (line 117) | pub fn test_random<R: RngCore, G: Group>(rng: &mut R) {
function test_group (line 139) | pub fn test_group<R: RngCore, G: Group>(rng: &mut R) {
function test_encoding (line 156) | pub fn test_encoding<G: PrimeGroup>() {
function test_prime_group (line 176) | pub fn test_prime_group<R: RngCore, G: PrimeGroup>(rng: &mut R) {
function test_prime_group_bits (line 183) | pub fn test_prime_group_bits<R: RngCore, G: PrimeGroup<Scalar: PrimeFiel...
function test_k256 (line 197) | fn test_k256() {
function test_p256 (line 202) | fn test_p256() {
function test_bls12_381 (line 207) | fn test_bls12_381() {
function test_pallas_vesta (line 213) | fn test_pallas_vesta() {
FILE: crypto/ff-group-tests/src/prime_field.rs
function test_zero (line 8) | pub fn test_zero<F: PrimeField>() {
function test_one (line 13) | pub fn test_one<F: PrimeField>() {
function test_from_u64 (line 18) | pub fn test_from_u64<F: PrimeField>() {
function test_from_u128 (line 26) | pub fn test_from_u128<F: PrimeField>() {
function test_is_odd (line 38) | pub fn test_is_odd<F: PrimeField>() {
function test_encoding (line 64) | pub fn test_encoding<F: PrimeField>() {
function test_prime_field (line 106) | pub fn test_prime_field<R: RngCore, F: PrimeField>(rng: &mut R) {
function test_to_le_bits (line 123) | pub fn test_to_le_bits<F: PrimeField + PrimeFieldBits>() {
function test_char_le_bits (line 150) | pub fn test_char_le_bits<F: PrimeField + PrimeFieldBits>() {
function test_num_bits (line 168) | pub fn test_num_bits<F: PrimeField + PrimeFieldBits>() {
function test_capacity (line 184) | pub fn test_capacity<F: PrimeField + PrimeFieldBits>() {
function pow (line 215) | fn pow<F: PrimeFieldBits>(base: F, exp: F) -> F {
function test_pow (line 228) | pub fn test_pow<F: PrimeFieldBits>() {
function test_inv_consts (line 287) | pub fn test_inv_consts<F: PrimeFieldBits>() {
function test_s (line 297) | pub fn test_s<F: PrimeFieldBits>() {
function test_root_of_unity (line 311) | pub fn test_root_of_unity<F: PrimeFieldBits>() {
function test_delta (line 341) | pub fn test_delta<F: PrimeFieldBits>() {
function test_prime_field_bits (line 350) | pub fn test_prime_field_bits<R: RngCore, F: PrimeFieldBits>(rng: &mut R) {
FILE: crypto/frost/src/algorithm.rs
type WriteAddendum (line 13) | pub trait WriteAddendum {
method write (line 14) | fn write<W: Write>(&self, writer: &mut W) -> io::Result<()>;
method write (line 18) | fn write<W: Write>(&self, _: &mut W) -> io::Result<()> {
type Addendum (line 24) | pub trait Addendum: Send + Sync + Clone + PartialEq + Debug + WriteAdden...
type Algorithm (line 28) | pub trait Algorithm<C: Curve>: Send + Sync {
method transcript (line 38) | fn transcript(&mut self) -> &mut Self::Transcript;
method nonces (line 49) | fn nonces(&self) -> Vec<Vec<C::G>>;
method preprocess_addendum (line 52) | fn preprocess_addendum<R: RngCore + CryptoRng>(
method read_addendum (line 59) | fn read_addendum<R: Read>(&self, reader: &mut R) -> io::Result<Self::A...
method process_addendum (line 62) | fn process_addendum(
method sign_share (line 73) | fn sign_share(
method verify (line 83) | fn verify(&self, group_key: C::G, nonces: &[Vec<C::G>], sum: C::F) -> ...
method verify_share (line 89) | fn verify_share(
type IetfTranscript (line 103) | pub struct IetfTranscript(pub(crate) Vec<u8>);
type Hram (line 130) | pub trait Hram<C: Curve>: Send + Sync + Clone {
method hram (line 134) | fn hram(R: &C::G, A: &C::G, m: &[u8]) -> C::F;
type Schnorr (line 141) | pub struct Schnorr<C: Curve, T: Sync + Clone + Debug + Transcript, H: Hr...
type IetfSchnorr (line 155) | pub type IetfSchnorr<C, H> = Schnorr<C, IetfTranscript, H>;
function new (line 159) | pub fn new(transcript: T) -> Schnorr<C, T, H> {
function ietf (line 168) | pub fn ietf() -> IetfSchnorr<C, H> {
type Transcript (line 174) | type Transcript = T;
type Challenge (line 105) | type Challenge = Vec<u8>;
method new (line 107) | fn new(_: &'static [u8]) -> IetfTranscript {
method domain_separate (line 111) | fn domain_separate(&mut self, _: &[u8]) {}
method append_message (line 113) | fn append_message<M: AsRef<[u8]>>(&mut self, _: &'static [u8], message...
method challenge (line 117) | fn challenge(&mut self, _: &'static [u8]) -> Vec<u8> {
method rng_seed (line 122) | fn rng_seed(&mut self, _: &'static [u8]) -> [u8; 32] {
type Addendum (line 175) | type Addendum = ();
type Signature (line 176) | type Signature = SchnorrSignature<C>;
function transcript (line 178) | fn transcript(&mut self) -> &mut Self::Transcript {
function nonces (line 182) | fn nonces(&self) -> Vec<Vec<C::G>> {
function preprocess_addendum (line 186) | fn preprocess_addendum<R: RngCore + CryptoRng>(&mut self, _: &mut R, _: ...
function read_addendum (line 188) | fn read_addendum<R: Read>(&self, _: &mut R) -> io::Result<Self::Addendum> {
function process_addendum (line 192) | fn process_addendum(
function sign_share (line 201) | fn sign_share(
function verify (line 214) | fn verify(&self, group_key: C::G, nonces: &[Vec<C::G>], sum: C::F) -> Op...
function verify_share (line 219) | fn verify_share(
FILE: crypto/frost/src/curve/ed448.rs
constant CONTEXT (line 9) | const CONTEXT: &[u8] = b"FROST-ED448-SHAKE256-v1";
constant CONTEXT (line 12) | const CONTEXT: &'static [u8] = CONTEXT;
type Ietf8032Ed448Hram (line 17) | pub(crate) struct Ietf8032Ed448Hram;
method hram (line 20) | pub(crate) fn hram(context: &[u8], R: &Point, A: &Point, m: &[u8]) -> ...
type IetfEd448Hram (line 39) | pub struct IetfEd448Hram;
method hram (line 42) | fn hram(R: &Point, A: &Point, m: &[u8]) -> Scalar {
FILE: crypto/frost/src/curve/mod.rs
type Curve (line 44) | pub trait Curve: Ciphersuite {
constant CONTEXT (line 46) | const CONTEXT: &'static [u8];
method hash (line 49) | fn hash(dst: &[u8], data: &[u8]) -> Output<Self::H> {
method hash_to_F (line 56) | fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F {
method hash_msg (line 61) | fn hash_msg(msg: &[u8]) -> Output<Self::H> {
method hash_commitments (line 66) | fn hash_commitments(commitments: &[u8]) -> Output<Self::H> {
method hash_binding_factor (line 89) | fn hash_binding_factor(binding: &[u8]) -> Self::F {
method random_nonce (line 94) | fn random_nonce<R: RngCore + CryptoRng>(
method read_G (line 125) | fn read_G<R: Read>(reader: &mut R) -> io::Result<Self::G> {
FILE: crypto/frost/src/lib.rs
type FrostError (line 28) | pub enum FrostError {
function validate_map (line 50) | pub fn validate_map<T>(
FILE: crypto/frost/src/nonce.rs
type Nonce (line 28) | pub(crate) struct Nonce<C: Curve>(pub(crate) [Zeroizing<C::F>; 2]);
type GeneratorCommitments (line 32) | pub(crate) struct GeneratorCommitments<C: Curve>(pub(crate) [C::G; 2]);
function read (line 34) | fn read<R: Read>(reader: &mut R) -> io::Result<GeneratorCommitments<C>> {
function write (line 38) | fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
type NonceCommitments (line 46) | pub(crate) struct NonceCommitments<C: Curve> {
function new (line 53) | pub(crate) fn new<R: RngCore + CryptoRng>(
function read (line 74) | fn read<R: Read>(reader: &mut R, generators: &[C::G]) -> io::Result<Nonc...
function write (line 82) | fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
function transcript (line 89) | fn transcript<T: Transcript>(&self, t: &mut T) {
type Commitments (line 100) | pub(crate) struct Commitments<C: Curve> {
function new (line 107) | pub(crate) fn new<R: RngCore + CryptoRng>(
function transcript (line 126) | pub(crate) fn transcript<T: Transcript>(&self, t: &mut T) {
function read (line 133) | pub(crate) fn read<R: Read>(reader: &mut R, generators: &[Vec<C::G>]) ->...
function write (line 141) | pub(crate) fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
type IndividualBinding (line 149) | pub(crate) struct IndividualBinding<C: Curve> {
type BindingFactor (line 154) | pub(crate) struct BindingFactor<C: Curve>(pub(crate) HashMap<Participant...
function insert (line 157) | pub(crate) fn insert(&mut self, i: Participant, commitments: Commitments...
function calculate_binding_factors (line 161) | pub(crate) fn calculate_binding_factors<T: Clone + Transcript>(&mut self...
function binding_factors (line 175) | pub(crate) fn binding_factors(&self, i: Participant) -> &[C::F] {
function bound (line 180) | pub(crate) fn bound(&self, l: Participant) -> Vec<Vec<C::G>> {
function nonces (line 194) | pub(crate) fn nonces(&self, planned_nonces: &[Vec<C::G>]) -> Vec<Vec<C::...
FILE: crypto/frost/src/sign.rs
type Writable (line 30) | pub trait Writable {
method write (line 31) | fn write<W: Write>(&self, writer: &mut W) -> io::Result<()>;
method serialize (line 33) | fn serialize(&self) -> Vec<u8> {
method write (line 41) | fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
method write (line 77) | fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
method write (line 185) | fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
type Params (line 51) | struct Params<C: Curve, A: Algorithm<C>> {
function new (line 59) | fn new(algorithm: A, keys: ThresholdKeys<C>) -> Params<C, A> {
function multisig_params (line 63) | fn multisig_params(&self) -> ThresholdParams {
type Preprocess (line 70) | pub struct Preprocess<C: Curve, A: Addendum> {
type CachedPreprocess (line 92) | pub struct CachedPreprocess(pub Zeroizing<[u8; 32]>);
type PreprocessMachine (line 95) | pub trait PreprocessMachine: Send {
method preprocess (line 106) | fn preprocess<R: RngCore + CryptoRng>(self, rng: &mut R)
type Preprocess (line 167) | type Preprocess = Preprocess<C, A::Addendum>;
type Signature (line 168) | type Signature = A::Signature;
type SignMachine (line 169) | type SignMachine = AlgorithmSignMachine<C, A>;
method preprocess (line 171) | fn preprocess<R: RngCore + CryptoRng>(
type AlgorithmMachine (line 111) | pub struct AlgorithmMachine<C: Curve, A: Algorithm<C>> {
function new (line 117) | pub fn new(algorithm: A, keys: ThresholdKeys<C>) -> AlgorithmMachine<C, ...
function seeded_preprocess (line 121) | fn seeded_preprocess(
function unsafe_override_preprocess (line 147) | pub(crate) fn unsafe_override_preprocess(
type SignatureShare (line 183) | pub struct SignatureShare<C: Curve>(C::F);
function invalidate (line 191) | pub(crate) fn invalidate(&mut self) {
type SignMachine (line 197) | pub trait SignMachine<S>: Send + Sync + Sized {
method cache (line 214) | fn cache(self) -> CachedPreprocess;
method from_cache (line 220) | fn from_cache(
method read_preprocess (line 230) | fn read_preprocess<R: Read>(&self, reader: &mut R) -> io::Result<Self:...
method sign (line 237) | fn sign(
type AlgorithmSignMachine (line 246) | pub struct AlgorithmSignMachine<C: Curve, A: Algorithm<C>> {
type Params (line 258) | type Params = A;
type Keys (line 259) | type Keys = ThresholdKeys<C>;
type Preprocess (line 260) | type Preprocess = Preprocess<C, A::Addendum>;
type SignatureShare (line 261) | type SignatureShare = SignatureShare<C>;
type SignatureMachine (line 262) | type SignatureMachine = AlgorithmSignatureMachine<C, A>;
method read_share (line 420) | fn read_share<R: Read>(&self, reader: &mut R) -> io::Result<Self::Sign...
method complete (line 424) | fn complete(self, shares: HashMap<Participant, Self::SignatureShare>) ...
function cache (line 264) | fn cache(self) -> CachedPreprocess {
function from_cache (line 268) | fn from_cache(
function read_preprocess (line 276) | fn read_preprocess<R: Read>(&self, reader: &mut R) -> io::Result<Self::P...
function sign (line 283) | fn sign(
type SignatureMachine (line 415) | pub trait SignatureMachine<S>: Send + Sync {
method read_share (line 420) | fn read_share<R: Read>(&self, reader: &mut R) -> io::Result<Self::Sign...
method complete (line 424) | fn complete(self, shares: HashMap<Participant, Self::SignatureShare>) ...
type AlgorithmSignatureMachine (line 431) | pub struct AlgorithmSignatureMachine<C: Curve, A: Algorithm<C>> {
type SignatureShare (line 441) | type SignatureShare = SignatureShare<C>;
function read_share (line 443) | fn read_share<R: Read>(&self, reader: &mut R) -> io::Result<SignatureSha...
function complete (line 447) | fn complete(
FILE: crypto/frost/src/tests/literal/dalek.rs
function ristretto_vectors (line 10) | fn ristretto_vectors() {
function ed25519_vectors (line 24) | fn ed25519_vectors() {
FILE: crypto/frost/src/tests/literal/ed448.rs
function ed448_8032_vector (line 21) | fn ed448_8032_vector() {
function ed448_vectors (line 57) | fn ed448_vectors() {
FILE: crypto/frost/src/tests/literal/kp256.rs
function secp256k1_vectors (line 13) | fn secp256k1_vectors() {
function p256_vectors (line 27) | fn p256_vectors() {
FILE: crypto/frost/src/tests/mod.rs
constant PARTICIPANTS (line 26) | pub const PARTICIPANTS: u16 = 5;
constant THRESHOLD (line 28) | pub const THRESHOLD: u16 = ((PARTICIPANTS * 2) / 3) + 1;
function key_gen (line 31) | pub fn key_gen<R: RngCore + CryptoRng, C: Ciphersuite>(
function clone_without (line 43) | pub fn clone_without<K: Clone + core::cmp::Eq + core::hash::Hash, V: Clo...
function algorithm_machines_without_clone (line 53) | pub fn algorithm_machines_without_clone<R: RngCore, C: Curve, A: Algorit...
function algorithm_machines (line 77) | pub fn algorithm_machines<R: RngCore, C: Curve, A: Clone + Algorithm<C>>(
function preprocess (line 93) | pub(crate) fn preprocess<
function preprocess_and_shares (line 123) | pub(crate) fn preprocess_and_shares<
function sign_internal (line 155) | fn sign_internal<
function sign_without_caching (line 181) | pub fn sign_without_caching<R: RngCore + CryptoRng, M: PreprocessMachine>(
function sign_without_clone (line 191) | pub fn sign_without_clone<R: RngCore + CryptoRng, M: PreprocessMachine>(
function sign (line 221) | pub fn sign<
function test_schnorr_with_keys (line 236) | pub fn test_schnorr_with_keys<R: RngCore + CryptoRng, C: Curve, H: Hram<...
function test_schnorr (line 249) | pub fn test_schnorr<R: RngCore + CryptoRng, C: Curve, H: Hram<C>>(rng: &...
function test_offset_schnorr (line 255) | pub fn test_offset_schnorr<R: RngCore + CryptoRng, C: Curve, H: Hram<C>>...
function test_schnorr_blame (line 276) | pub fn test_schnorr_blame<R: RngCore + CryptoRng, C: Curve, H: Hram<C>>(...
function test_ciphersuite (line 298) | pub fn test_ciphersuite<R: RngCore + CryptoRng, C: Curve, H: Hram<C>>(rn...
FILE: crypto/frost/src/tests/nonces.rs
type MultiNonce (line 19) | struct MultiNonce<C: Curve> {
function new (line 25) | fn new() -> MultiNonce<C> {
function nonces (line 33) | fn nonces<C: Curve>() -> Vec<Vec<C::G>> {
function verify_nonces (line 40) | fn verify_nonces<C: Curve>(nonces: &[Vec<C::G>]) {
type Transcript (line 59) | type Transcript = RecommendedTranscript;
type Addendum (line 60) | type Addendum = ();
type Signature (line 61) | type Signature = ();
function transcript (line 63) | fn transcript(&mut self) -> &mut Self::Transcript {
function nonces (line 67) | fn nonces(&self) -> Vec<Vec<C::G>> {
function preprocess_addendum (line 71) | fn preprocess_addendum<R: RngCore + CryptoRng>(&mut self, _: &mut R, _: ...
function read_addendum (line 73) | fn read_addendum<R: Read>(&self, _: &mut R) -> io::Result<Self::Addendum> {
function process_addendum (line 77) | fn process_addendum(
function sign_share (line 86) | fn sign_share(
function verify (line 125) | fn verify(&self, _: C::G, nonces: &[Vec<C::G>], sum: C::F) -> Option<Sel...
function verify_share (line 140) | fn verify_share(&self, _: C::G, _: &[Vec<C::G>], _: C::F) -> Result<Vec<...
function test_multi_nonce (line 151) | pub fn test_multi_nonce<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
FILE: crypto/frost/src/tests/vectors.rs
type Vectors (line 26) | pub struct Vectors {
method from (line 50) | fn from(value: serde_json::Value) -> Vectors {
function vectors_to_multisig_keys (line 108) | fn vectors_to_multisig_keys<C: Curve>(vectors: &Vectors) -> HashMap<Part...
function test_with_vectors (line 145) | pub fn test_with_vectors<R: RngCore + CryptoRng, C: Curve, H: Hram<C>>(
FILE: crypto/multiexp/src/batch.rs
function flat (line 15) | fn flat<Id: Copy + Zeroize, G: Zeroize + Group<Scalar: Zeroize + PrimeFi...
type BatchVerifier (line 24) | pub struct BatchVerifier<Id: Copy + Zeroize, G: Zeroize + Group<Scalar: ...
function new (line 34) | pub fn new(capacity: usize) -> BatchVerifier<Id, G> {
function queue (line 39) | pub fn queue<R: RngCore + CryptoRng, I: IntoIterator<Item = (G::Scalar, ...
function verify (line 93) | pub fn verify(&self) -> bool {
function verify_vartime (line 99) | pub fn verify_vartime(&self) -> bool {
function blame_vartime (line 108) | pub fn blame_vartime(&self) -> Option<Id> {
function verify_with_vartime_blame (line 127) | pub fn verify_with_vartime_blame(&self) -> Result<(), Id> {
function verify_vartime_with_vartime_blame (line 137) | pub fn verify_vartime_with_vartime_blame(&self) -> Result<(), Id> {
FILE: crypto/multiexp/src/lib.rs
function black_box (line 35) | fn black_box<T>(val: T) -> T {
function u8_from_bool (line 39) | fn u8_from_bool(bit_ref: &mut bool) -> u8 {
function prep_bits (line 54) | pub(crate) fn prep_bits<G: Group<Scalar: PrimeFieldBits>>(
type Algorithm (line 77) | enum Algorithm {
function algorithm (line 129) | fn algorithm(len: usize) -> Algorithm {
function multiexp (line 180) | pub fn multiexp<G: Zeroize + Group<Scalar: Zeroize + PrimeFieldBits>>(
function multiexp_vartime (line 194) | pub fn multiexp_vartime<G: Group<Scalar: PrimeFieldBits>>(pairs: &[(G::S...
FILE: crypto/multiexp/src/pippenger.rs
function pippenger (line 10) | pub(crate) fn pippenger<G: Zeroize + Group<Scalar: PrimeFieldBits>>(
function pippenger_vartime (line 42) | pub(crate) fn pippenger_vartime<G: Group<Scalar: PrimeFieldBits>>(
FILE: crypto/multiexp/src/straus.rs
function prep_tables (line 11) | fn prep_tables<G: Group>(pairs: &[(G::Scalar, G)], window: u8) -> Vec<Ve...
function straus (line 27) | pub(crate) fn straus<G: Zeroize + Group<Scalar: PrimeFieldBits>>(
function straus_vartime (line 52) | pub(crate) fn straus_vartime<G: Group<Scalar: PrimeFieldBits>>(
FILE: crypto/multiexp/src/tests/batch.rs
function test_batch (line 12) | pub(crate) fn test_batch<G: Zeroize + Group<Scalar: Zeroize + PrimeField...
FILE: crypto/multiexp/src/tests/mod.rs
function benchmark_internal (line 21) | fn benchmark_internal<G: Zeroize + Group<Scalar: Zeroize + PrimeFieldBit...
function test_multiexp (line 86) | fn test_multiexp<G: Zeroize + Group<Scalar: Zeroize + PrimeFieldBits>>() {
function test_secp256k1 (line 129) | fn test_secp256k1() {
function test_ed25519 (line 136) | fn test_ed25519() {
function benchmark (line 144) | fn benchmark() {
FILE: crypto/schnorr/src/aggregate.rs
function weight (line 22) | fn weight<D: Send + Clone + SecureDigest, F: PrimeField>(digest: &mut Di...
type SchnorrAggregate (line 70) | pub struct SchnorrAggregate<C: Ciphersuite> {
function read (line 77) | pub fn read<R: Read>(reader: &mut R) -> io::Result<Self> {
function write (line 93) | pub fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
function serialize (line 107) | pub fn serialize(&self) -> Vec<u8> {
function Rs (line 114) | pub fn Rs(&self) -> &[C::G] {
function verify (line 127) | pub fn verify(&self, dst: &'static [u8], keys_and_challenges: &[(C::G, C...
type SchnorrAggregator (line 152) | pub struct SchnorrAggregator<C: Ciphersuite> {
function new (line 162) | pub fn new(dst: &'static [u8]) -> Self {
function aggregate (line 169) | pub fn aggregate(&mut self, challenge: C::F, sig: SchnorrSignature<C>) {
function complete (line 175) | pub fn complete(mut self) -> Option<SchnorrAggregate<C>> {
FILE: crypto/schnorr/src/lib.rs
type SchnorrSignature (line 44) | pub struct SchnorrSignature<C: Ciphersuite> {
function read (line 51) | pub fn read<R: Read>(reader: &mut R) -> io::Result<Self> {
function write (line 56) | pub fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
function serialize (line 62) | pub fn serialize(&self) -> Vec<u8> {
function sign (line 74) | pub fn sign(
function batch_statements (line 88) | pub fn batch_statements(&self, public_key: C::G, challenge: C::F) -> [(C...
function verify (line 108) | pub fn verify(&self, public_key: C::G, challenge: C::F) -> bool {
function batch_verify (line 117) | pub fn batch_verify<R: RngCore + CryptoRng, I: Copy + Zeroize>(
FILE: crypto/schnorr/src/tests/mod.rs
function sign (line 19) | pub(crate) fn sign<C: Ciphersuite>() {
function verify (line 30) | pub(crate) fn verify<C: Ciphersuite>() {
function batch_verify (line 35) | pub(crate) fn batch_verify<C: Ciphersuite>() {
function aggregate (line 81) | pub(crate) fn aggregate<C: Ciphersuite>() {
function test (line 117) | fn test() {
FILE: crypto/schnorr/src/tests/rfc8032.rs
constant VECTORS (line 15) | const VECTORS: [(&str, &str, &str); 5] = [
function test_rfc8032 (line 48) | fn test_rfc8032() {
FILE: crypto/schnorrkel/src/lib.rs
type RistrettoPoint (line 31) | type RistrettoPoint = <Ristretto as Ciphersuite>::G;
type Scalar (line 32) | type Scalar = <Ristretto as Ciphersuite>::F;
type SchnorrkelHram (line 38) | struct SchnorrkelHram;
method hram (line 41) | fn hram(R: &RistrettoPoint, A: &RistrettoPoint, m: &[u8]) -> Scalar {
type Schnorrkel (line 58) | pub struct Schnorrkel {
method new (line 68) | pub fn new(context: &'static [u8]) -> Schnorrkel {
type Transcript (line 78) | type Transcript = MerlinTranscript;
type Addendum (line 79) | type Addendum = ();
type Signature (line 80) | type Signature = Signature;
method transcript (line 82) | fn transcript(&mut self) -> &mut Self::Transcript {
method nonces (line 86) | fn nonces(&self) -> Vec<Vec<<Ristretto as Ciphersuite>::G>> {
method preprocess_addendum (line 90) | fn preprocess_addendum<R: RngCore + CryptoRng>(
method read_addendum (line 97) | fn read_addendum<R: Read>(&self, _: &mut R) -> io::Result<Self::Addend...
method process_addendum (line 101) | fn process_addendum(
method sign_share (line 110) | fn sign_share(
method verify (line 132) | fn verify(
method verify_share (line 148) | fn verify_share(
FILE: crypto/schnorrkel/src/tests.rs
function test (line 14) | fn test() {
FILE: crypto/transcript/src/lib.rs
type Transcript (line 28) | pub trait Transcript: Send + Clone {
method new (line 32) | fn new(name: &'static [u8]) -> Self;
method domain_separate (line 35) | fn domain_separate(&mut self, label: &'static [u8]);
method append_message (line 38) | fn append_message<M: AsRef<[u8]>>(&mut self, label: &'static [u8], mes...
method challenge (line 44) | fn challenge(&mut self, label: &'static [u8]) -> Self::Challenge;
method rng_seed (line 53) | fn rng_seed(&mut self, label: &'static [u8]) -> [u8; 32];
type Challenge (line 108) | type Challenge = Output<D>;
method new (line 110) | fn new(name: &'static [u8]) -> Self {
method domain_separate (line 116) | fn domain_separate(&mut self, label: &'static [u8]) {
method append_message (line 120) | fn append_message<M: AsRef<[u8]>>(&mut self, label: &'static [u8], mes...
method challenge (line 125) | fn challenge(&mut self, label: &'static [u8]) -> Self::Challenge {
method rng_seed (line 136) | fn rng_seed(&mut self, label: &'static [u8]) -> [u8; 32] {
type DigestTranscriptMember (line 57) | enum DigestTranscriptMember {
method as_u8 (line 68) | fn as_u8(&self) -> u8 {
type SecureDigest (line 83) | pub trait SecureDigest: Digest + HashMarker {}
type DigestTranscript (line 96) | pub struct DigestTranscript<D: Send + Clone + SecureDigest>(D);
function append (line 99) | fn append(&mut self, kind: DigestTranscriptMember, value: &[u8]) {
method zeroize (line 150) | fn zeroize(&mut self) {
type RecommendedTranscript (line 177) | pub type RecommendedTranscript = DigestTranscript<blake2::Blake2b512>;
FILE: crypto/transcript/src/merlin.rs
type MerlinTranscript (line 16) | pub struct MerlinTranscript(merlin::Transcript);
method fmt (line 19) | fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), core::fmt::Error> {
type Challenge (line 29) | type Challenge = [u8; 64];
method new (line 31) | fn new(name: &'static [u8]) -> Self {
method domain_separate (line 35) | fn domain_separate(&mut self, label: &'static [u8]) {
method append_message (line 39) | fn append_message<M: AsRef<[u8]>>(&mut self, label: &'static [u8], messa...
method challenge (line 47) | fn challenge(&mut self, label: &'static [u8]) -> Self::Challenge {
method rng_seed (line 53) | fn rng_seed(&mut self, label: &'static [u8]) -> [u8; 32] {
FILE: crypto/transcript/src/tests.rs
function test_transcript (line 6) | pub fn test_transcript<T: Transcript<Challenge: PartialEq>>() {
function test_digest (line 87) | fn test_digest() {
function test_recommended (line 94) | fn test_recommended() {
function test_merlin (line 100) | fn test_merlin() {
FILE: message-queue/src/client.rs
type MessageQueue (line 23) | pub struct MessageQueue {
method new (line 31) | pub fn new(
method from_env (line 46) | pub fn from_env(service: Service) -> MessageQueue {
method send (line 69) | async fn send(socket: &mut TcpStream, msg: MessageQueueRequest) -> bool {
method queue (line 82) | pub async fn queue(&self, metadata: Metadata, msg: Vec<u8>) {
method next (line 120) | pub async fn next(&self, from: Service) -> QueuedMessage {
method ack (line 208) | pub async fn ack(&self, from: Service, id: u64) {
FILE: message-queue/src/main.rs
type Db (line 24) | pub(crate) type Db = Arc<serai_db::ParityDb>;
type Db (line 26) | pub(crate) type Db = serai_db::RocksDB;
function queue_message (line 60) | pub(crate) fn queue_message(
function get_next_message (line 120) | pub(crate) fn get_next_message(from: Service, to: Service) -> Option<Que...
function ack_message (line 132) | pub(crate) fn ack_message(from: Service, to: Service, id: u64, sig: Schn...
function main (line 151) | async fn main() {
FILE: message-queue/src/messages.rs
type Service (line 10) | pub enum Service {
type QueuedMessage (line 16) | pub struct QueuedMessage {
type Metadata (line 24) | pub struct Metadata {
type MessageQueueRequest (line 31) | pub enum MessageQueueRequest {
function message_challenge (line 37) | pub fn message_challenge(
function ack_challenge (line 58) | pub fn ack_challenge(
FILE: message-queue/src/queue.rs
type Queue (line 6) | pub(crate) struct Queue<D: Db>(pub(crate) D, pub(crate) Service, pub(cra...
function key (line 8) | fn key(domain: &'static [u8], key: impl AsRef<[u8]>) -> Vec<u8> {
function message_count_key (line 12) | fn message_count_key(&self) -> Vec<u8> {
function message_count (line 15) | pub(crate) fn message_count(&self) -> u64 {
function last_acknowledged_key (line 22) | fn last_acknowledged_key(&self) -> Vec<u8> {
function last_acknowledged (line 25) | pub(crate) fn last_acknowledged(&self) -> Option<u64> {
function message_key (line 32) | fn message_key(&self, id: u64) -> Vec<u8> {
function queue_message (line 37) | pub(crate) fn queue_message(
function get_message (line 53) | pub(crate) fn get_message(&self, id: u64) -> Option<QueuedMessage> {
function ack_message (line 62) | pub(crate) fn ack_message(&mut self, id: u64) {
FILE: mini/src/lib.rs
type Batch (line 12) | pub struct Batch {
type Event (line 18) | pub enum Event {
constant BATCH_FTL (line 27) | const BATCH_FTL: u64 = 3;
type Serai (line 30) | pub struct Serai {
method new (line 42) | pub fn new(ticks: usize, mut queued_key: bool) -> Serai {
method exhausted (line 97) | pub fn exhausted(&self) -> bool {
method join (line 101) | pub fn join(self) -> Vec<Event> {
type Processor (line 109) | pub struct Processor {
method new (line 114) | pub fn new(serai: Serai, blocks: u64) -> Processor {
method join (line 147) | pub fn join(self) -> Serai {
FILE: mini/src/tests/activation_race/mod.rs
function activation_race (line 9) | fn activation_race() {
function sequential_solves_activation_race (line 73) | fn sequential_solves_activation_race() {
function ftl_solves_activation_race (line 145) | fn ftl_solves_activation_race() {
FILE: networks/bitcoin/src/crypto.rs
function x (line 13) | fn x(key: &ProjectivePoint) -> [u8; 32] {
function x_only (line 21) | pub(crate) fn x_only(key: &ProjectivePoint) -> XOnlyPublicKey {
function needs_negation (line 26) | pub(crate) fn needs_negation(key: &ProjectivePoint) -> Choice {
type Hram (line 56) | pub struct Hram;
method hram (line 59) | fn hram(R: &ProjectivePoint, A: &ProjectivePoint, m: &[u8]) -> Scalar {
type Schnorr (line 85) | pub struct Schnorr(FrostSchnorr<Secp256k1, Hram>);
method new (line 89) | pub fn new() -> Schnorr {
type Transcript (line 95) | type Transcript = <FrostSchnorr<Secp256k1, Hram> as Algorithm<Secp256k...
type Addendum (line 96) | type Addendum = ();
type Signature (line 97) | type Signature = [u8; 64];
method transcript (line 99) | fn transcript(&mut self) -> &mut Self::Transcript {
method nonces (line 103) | fn nonces(&self) -> Vec<Vec<ProjectivePoint>> {
method preprocess_addendum (line 107) | fn preprocess_addendum<R: RngCore + CryptoRng>(
method read_addendum (line 115) | fn read_addendum<R: io::Read>(&self, reader: &mut R) -> io::Result<Sel...
method process_addendum (line 119) | fn process_addendum(
method sign_share (line 128) | fn sign_share(
method verify (line 139) | fn verify(
method verify_share (line 152) | fn verify_share(
FILE: networks/bitcoin/src/rpc.rs
type Error (line 18) | pub struct Error {
type RpcResponse (line 25) | enum RpcResponse<T> {
type Rpc (line 32) | pub struct Rpc {
method new (line 64) | pub async fn new(url: String) -> Result<Rpc, RpcError> {
method rpc_call (line 105) | pub async fn rpc_call<Response: DeserializeOwned + Debug>(
method get_latest_block_number (line 141) | pub async fn get_latest_block_number(&self) -> Result<usize, RpcError> {
method get_block_hash (line 151) | pub async fn get_block_hash(&self, number: usize) -> Result<[u8; 32], ...
method get_block_number (line 163) | pub async fn get_block_number(&self, hash: &[u8; 32]) -> Result<usize,...
method get_block (line 172) | pub async fn get_block(&self, hash: &[u8; 32]) -> Result<Block, RpcErr...
method send_raw_transaction (line 189) | pub async fn send_raw_transaction(&self, tx: &Transaction) -> Result<T...
method get_transaction (line 211) | pub async fn get_transaction(&self, hash: &[u8; 32]) -> Result<Transac...
type RpcError (line 38) | pub enum RpcError {
FILE: networks/bitcoin/src/tests/crypto.rs
function test_algorithm (line 18) | fn test_algorithm() {
FILE: networks/bitcoin/src/wallet/mod.rs
function tweak_keys (line 46) | pub fn tweak_keys(keys: ThresholdKeys<Secp256k1>) -> ThresholdKeys<Secp2...
function p2tr_script_buf (line 80) | pub fn p2tr_script_buf(key: ProjectivePoint) -> Option<ScriptBuf> {
type ReceivedOutput (line 90) | pub struct ReceivedOutput {
method offset (line 101) | pub fn offset(&self) -> Scalar {
method output (line 106) | pub fn output(&self) -> &TxOut {
method outpoint (line 111) | pub fn outpoint(&self) -> &OutPoint {
method value (line 116) | pub fn value(&self) -> u64 {
method read (line 122) | pub fn read<R: Read>(r: &mut R) -> io::Result<ReceivedOutput> {
method write (line 137) | pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
method serialize (line 144) | pub fn serialize(&self) -> Vec<u8> {
type Scanner (line 153) | pub struct Scanner {
method new (line 162) | pub fn new(key: ProjectivePoint) -> Option<Scanner> {
method register_offset (line 180) | pub fn register_offset(&mut self, mut offset: Scalar) -> Option<Scalar> {
method scan_transaction (line 199) | pub fn scan_transaction(&self, tx: &Transaction) -> Vec<ReceivedOutput> {
method scan_block (line 221) | pub fn scan_block(&self, block: &Block) -> Vec<ReceivedOutput> {
FILE: networks/bitcoin/src/wallet/send.rs
constant DUST (line 32) | pub const DUST: u64 = 546;
type TransactionError (line 35) | pub enum TransactionError {
type SignableTransaction (line 54) | pub struct SignableTransaction {
method calculate_weight_vbytes (line 62) | fn calculate_weight_vbytes(
method needed_fee (line 133) | pub fn needed_fee(&self) -> u64 {
method fee (line 138) | pub fn fee(&self) -> u64 {
method new (line 150) | pub fn new(
method txid (line 259) | pub fn txid(&self) -> [u8; 32] {
method transaction (line 266) | pub fn transaction(&self) -> &Transaction {
method multisig (line 273) | pub fn multisig(self, keys: &ThresholdKeys<Secp256k1>) -> Option<Trans...
type TransactionMachine (line 292) | pub struct TransactionMachine {
type Preprocess (line 298) | type Preprocess = Vec<Preprocess<Secp256k1, ()>>;
type Signature (line 299) | type Signature = Transaction;
type SignMachine (line 300) | type SignMachine = TransactionSignMachine;
method preprocess (line 302) | fn preprocess<R: RngCore + CryptoRng>(
type TransactionSignMachine (line 321) | pub struct TransactionSignMachine {
type Params (line 327) | type Params = ();
type Keys (line 328) | type Keys = ThresholdKeys<Secp256k1>;
type Preprocess (line 329) | type Preprocess = Vec<Preprocess<Secp256k1, ()>>;
type SignatureShare (line 330) | type SignatureShare = Vec<SignatureShare<Secp256k1>>;
type SignatureMachine (line 331) | type SignatureMachine = TransactionSignatureMachine;
method cache (line 333) | fn cache(self) -> CachedPreprocess {
method from_cache (line 340) | fn from_cache(
method read_preprocess (line 351) | fn read_preprocess<R: Read>(&self, reader: &mut R) -> io::Result<Self:...
method sign (line 355) | fn sign(
type TransactionSignatureMachine (line 401) | pub struct TransactionSignatureMachine {
type SignatureShare (line 407) | type SignatureShare = Vec<SignatureShare<Secp256k1>>;
method read_share (line 409) | fn read_share<R: Read>(&self, reader: &mut R) -> io::Result<Self::Sign...
method complete (line 413) | fn complete(
FILE: networks/bitcoin/tests/runner.rs
function rpc (line 11) | pub(crate) async fn rpc() -> Rpc {
FILE: networks/bitcoin/tests/wallet.rs
constant FEE (line 34) | const FEE: u64 = 20;
function is_even (line 36) | fn is_even(key: ProjectivePoint) -> bool {
function send_and_get_output (line 40) | async fn send_and_get_output(rpc: &Rpc, scanner: &Scanner, key: Projecti...
function keys (line 80) | fn keys() -> (HashMap<Participant, ThresholdKeys<Secp256k1>>, Projective...
function sign (line 89) | fn sign(
FILE: networks/ethereum/alloy-simple-request-transport/src/lib.rs
type SimpleRequest (line 15) | pub struct SimpleRequest {
method new (line 21) | pub fn new(url: String) -> Self {
type Response (line 27) | type Response = ResponsePacket;
type Error (line 28) | type Error = TransportError;
type Future (line 29) | type Future = TransportFut<'static>;
method poll_ready (line 32) | fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> task::Poll<Re...
method call (line 37) | fn call(&mut self, req: RequestPacket) -> Self::Future {
FILE: networks/ethereum/build.rs
function main (line 3) | fn main() {
FILE: networks/ethereum/relayer/src/main.rs
function main (line 9) | async fn main() {
FILE: networks/ethereum/src/crypto.rs
function keccak256 (line 21) | pub(crate) fn keccak256(data: &[u8]) -> [u8; 32] {
function hash_to_scalar (line 25) | pub(crate) fn hash_to_scalar(data: &[u8]) -> Scalar {
function address (line 29) | pub fn address(point: &ProjectivePoint) -> [u8; 20] {
function deterministically_sign (line 39) | pub fn deterministically_sign(tx: &TxLegacy) -> Signed<TxLegacy> {
type PublicKey (line 68) | pub struct PublicKey {
method new (line 79) | pub fn new(A: ProjectivePoint) -> Option<PublicKey> {
method point (line 100) | pub fn point(&self) -> ProjectivePoint {
method eth_repr (line 104) | pub(crate) fn eth_repr(&self) -> [u8; 32] {
method from_eth_repr (line 109) | pub(crate) fn from_eth_repr(repr: [u8; 32]) -> Option<Self> {
type EthereumHram (line 118) | pub struct EthereumHram {}
method hram (line 121) | fn hram(R: &ProjectivePoint, A: &ProjectivePoint, m: &[u8]) -> Scalar {
type Signature (line 134) | pub struct Signature {
method verify (line 139) | pub fn verify(&self, public_key: &PublicKey, message: &[u8]) -> bool {
method new (line 148) | pub fn new(
method c (line 163) | pub fn c(&self) -> Scalar {
method s (line 166) | pub fn s(&self) -> Scalar {
method to_bytes (line 170) | pub fn to_bytes(&self) -> [u8; 64] {
method from_bytes (line 177) | pub fn from_bytes(bytes: [u8; 64]) -> std::io::Result<Self> {
method from (line 185) | fn from(sig: &Signature) -> AbiSignature {
FILE: networks/ethereum/src/deployer.rs
type Deployer (line 25) | pub struct Deployer;
method deployment_tx (line 32) | pub fn deployment_tx() -> Signed<TxLegacy> {
method address (line 52) | pub fn address() -> [u8; 20] {
method new (line 59) | pub async fn new(provider: Arc<RootProvider<SimpleRequest>>) -> Result...
method deploy_router (line 70) | pub fn deploy_router(&self, key: &PublicKey) -> TxLegacy {
method find_router (line 82) | pub async fn find_router(
FILE: networks/ethereum/src/erc20.rs
type TopLevelErc20Transfer (line 16) | pub struct TopLevelErc20Transfer {
type Erc20 (line 25) | pub struct Erc20(Arc<RootProvider<SimpleRequest>>, Address);
method new (line 28) | pub fn new(provider: Arc<RootProvider<SimpleRequest>>, address: [u8; 2...
method top_level_transfers (line 32) | pub async fn top_level_transfers(
FILE: networks/ethereum/src/lib.rs
type Error (line 30) | pub enum Error {
FILE: networks/ethereum/src/machine.rs
type Call (line 29) | pub struct Call {
method read (line 35) | pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
method write (line 65) | fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
method from (line 76) | fn from(call: Call) -> AbiCall {
type OutInstructionTarget (line 82) | pub enum OutInstructionTarget {
method read (line 87) | fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
method write (line 112) | fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
type OutInstruction (line 133) | pub struct OutInstruction {
method read (line 138) | fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
method write (line 149) | fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
method from (line 155) | fn from(instruction: OutInstruction) -> AbiOutInstruction {
type RouterCommand (line 170) | pub enum RouterCommand {
method msg (line 176) | pub fn msg(&self) -> Vec<u8> {
method read (line 189) | pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
method write (line 233) | pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
method serialize (line 254) | pub fn serialize(&self) -> Vec<u8> {
type SignedRouterCommand (line 262) | pub struct SignedRouterCommand {
method new (line 268) | pub fn new(key: &PublicKey, command: RouterCommand, signature: &[u8; 6...
method command (line 279) | pub fn command(&self) -> &RouterCommand {
method signature (line 283) | pub fn signature(&self) -> &Signature {
method read (line 287) | pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
method write (line 297) | pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
type RouterCommandMachine (line 303) | pub struct RouterCommandMachine {
method new (line 310) | pub fn new(keys: ThresholdKeys<Secp256k1>, command: RouterCommand) -> ...
type Preprocess (line 327) | type Preprocess = Preprocess<Secp256k1, ()>;
type Signature (line 328) | type Signature = SignedRouterCommand;
type SignMachine (line 329) | type SignMachine = RouterCommandSignMachine;
method preprocess (line 331) | fn preprocess<R: RngCore + CryptoRng>(
type RouterCommandSignMachine (line 341) | pub struct RouterCommandSignMachine {
type Params (line 348) | type Params = ();
type Keys (line 349) | type Keys = ThresholdKeys<Secp256k1>;
type Preprocess (line 350) | type Preprocess = Preprocess<Secp256k1, ()>;
type SignatureShare (line 351) | type SignatureShare = SignatureShare<Secp256k1>;
type SignatureMachine (line 352) | type SignatureMachine = RouterCommandSignatureMachine;
method cache (line 354) | fn cache(self) -> CachedPreprocess {
method from_cache (line 361) | fn from_cache(
method read_preprocess (line 372) | fn read_preprocess<R: Read>(&self, reader: &mut R) -> io::Result<Self:...
method sign (line 376) | fn sign(
type RouterCommandSignatureMachine (line 391) | pub struct RouterCommandSignatureMachine {
type SignatureShare (line 399) | type SignatureShare = SignatureShare<Secp256k1>;
method read_share (line 401) | fn read_share<R: Read>(&self, reader: &mut R) -> io::Result<Self::Sign...
method complete (line 405) | fn complete(
FILE: networks/ethereum/src/router.rs
type Coin (line 29) | pub enum Coin {
method read (line 35) | pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
method write (line 49) | pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
type InInstruction (line 61) | pub struct InInstruction {
method read (line 71) | pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
method write (line 104) | pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
type Executed (line 127) | pub struct Executed {
type Router (line 135) | pub struct Router(Arc<RootProvider<SimpleRequest>>, Address);
method code (line 137) | pub(crate) fn code() -> Vec<u8> {
method init_code (line 142) | pub(crate) fn init_code(key: &PublicKey) -> Vec<u8> {
method new (line 150) | pub(crate) fn new(provider: Arc<RootProvider<SimpleRequest>>, address:...
method address (line 154) | pub fn address(&self) -> [u8; 20] {
method serai_key (line 160) | pub async fn serai_key(&self, at: [u8; 32]) -> Result<PublicKey, Error> {
method update_serai_key_message (line 176) | pub(crate) fn update_serai_key_message(chain_id: U256, nonce: U256, ke...
method update_serai_key (line 185) | pub fn update_serai_key(&self, public_key: &PublicKey, sig: &Signature...
method nonce (line 199) | pub async fn nonce(&self, at: [u8; 32]) -> Result<U256, Error> {
method execute_message (line 215) | pub(crate) fn execute_message(
method execute (line 224) | pub fn execute(&self, outs: &[abi::OutInstruction], sig: &Signature) -...
method key_at_end_of_block (line 234) | pub async fn key_at_end_of_block(&self, block: u64) -> Result<Option<P...
method in_instructions (line 259) | pub async fn in_instructions(
method executed_commands (line 376) | pub async fn executed_commands(&self, block: u64) -> Result<Vec<Execut...
method key_updated_filter (line 436) | pub fn key_updated_filter(&self) -> Filter {
method executed_filter (line 440) | pub fn executed_filter(&self) -> Filter {
FILE: networks/ethereum/src/tests/crypto.rs
function ecrecover (line 22) | pub(crate) fn ecrecover(message: Scalar, odd_y: bool, r: Scalar, s: Scal...
function test_ecrecover (line 36) | fn test_ecrecover() {
function test_signing (line 71) | fn test_signing() {
function preprocess_signature_for_ecrecover (line 82) | pub fn preprocess_signature_for_ecrecover(
function test_ecrecover_hack (line 95) | fn test_ecrecover_hack() {
FILE: networks/ethereum/src/tests/mod.rs
function key_gen (line 32) | pub fn key_gen() -> (HashMap<Participant, ThresholdKeys<Secp256k1>>, Pub...
function send (line 50) | pub async fn send(
function fund_account (line 79) | pub async fn fund_account(
function deploy_contract (line 93) | pub async fn deploy_contract(
FILE: networks/ethereum/src/tests/router.rs
function setup_test (line 30) | async fn setup_test() -> (
function latest_block_hash (line 86) | async fn latest_block_hash(client: &RootProvider<SimpleRequest>) -> [u8;...
function test_deploy_contract (line 98) | async fn test_deploy_contract() {
function hash_and_sign (line 107) | pub fn hash_and_sign(
function test_router_update_serai_key (line 120) | async fn test_router_update_serai_key() {
function test_router_execute (line 156) | async fn test_router_execute() {
FILE: networks/ethereum/src/tests/schnorr.rs
function setup_test (line 31) | async fn setup_test() -> (AnvilInstance, Arc<RootProvider<SimpleRequest>...
function test_deploy_contract (line 45) | async fn test_deploy_contract() {
function call_verify (line 49) | pub async fn call_verify(
function test_ecrecover_hack (line 76) | async fn test_ecrecover_hack() {
FILE: orchestration/src/coordinator.rs
function coordinator (line 11) | pub fn coordinator(
FILE: orchestration/src/docker.rs
function build (line 5) | pub fn build(orchestration_path: &Path, network: Network, name: &str) {
FILE: orchestration/src/ethereum_relayer.rs
function ethereum_relayer (line 5) | pub fn ethereum_relayer(orchestration_path: &Path, network: Network) {
FILE: orchestration/src/main.rs
type Network (line 58) | pub enum Network {
method db (line 64) | pub fn db(&self) -> &'static str {
method release (line 71) | pub fn release(&self) -> bool {
method label (line 78) | pub fn label(&self) -> &'static str {
type Os (line 87) | enum Os {
function os (line 92) | fn os(os: Os, additional_root: &str, user: &str) -> String {
function build_serai_service (line 144) | fn build_serai_service(prelude: &str, release: bool, features: &str, pac...
function write_dockerfile (line 199) | pub fn write_dockerfile(path: PathBuf, dockerfile: &str) {
function orchestration_path (line 208) | fn orchestration_path(network: Network) -> PathBuf {
type InfrastructureKeys (line 222) | type InfrastructureKeys =
function infrastructure_keys (line 224) | fn infrastructure_keys(network: Network) -> InfrastructureKeys {
function dockerfiles (line 271) | fn dockerfiles(network: Network) {
function key_gen (line 337) | fn key_gen(network: Network) {
function start (line 356) | fn start(network: Network, services: HashSet<String>) {
function main (line 548) | fn main() {
FILE: orchestration/src/message_queue.rs
function message_queue (line 8) | pub fn message_queue(
FILE: orchestration/src/mimalloc.rs
function mimalloc (line 3) | pub fn mimalloc(os: Os) -> &'static str {
FILE: orchestration/src/networks/bitcoin.rs
function bitcoin (line 5) | pub fn bitcoin(orchestration_path: &Path, network: Network) {
FILE: orchestration/src/networks/ethereum/consensus/lighthouse.rs
function lighthouse (line 3) | pub fn lighthouse(network: Network) -> (String, String, String) {
FILE: orchestration/src/networks/ethereum/consensus/nimbus.rs
function nimbus (line 3) | pub fn nimbus(network: Network) -> (String, String, String) {
FILE: orchestration/src/networks/ethereum/execution/anvil.rs
function anvil (line 3) | pub fn anvil(network: Network) -> (String, String, String) {
FILE: orchestration/src/networks/ethereum/execution/reth.rs
function reth (line 3) | pub fn reth(network: Network) -> (String, String, String) {
FILE: orchestration/src/networks/ethereum/mod.rs
function ethereum (line 11) | pub fn ethereum(orchestration_path: &Path, network: Network) {
FILE: orchestration/src/networks/monero.rs
function monero_internal (line 5) | fn monero_internal(
function monero (line 71) | pub fn monero(orchestration_path: &Path, network: Network) {
function monero_wallet_rpc (line 75) | pub fn monero_wallet_rpc(orchestration_path: &Path) {
FILE: orchestration/src/processor.rs
function processor (line 11) | pub fn processor(
FILE: orchestration/src/serai.rs
function serai (line 9) | pub fn serai(
FILE: patches/option-ext/src/lib.rs
type OptionExt (line 1) | pub trait OptionExt<T: PartialEq> {
method contains (line 2) | fn contains(&self, x: &T) -> bool;
function contains (line 5) | fn contains(&self, x: &T) -> bool {
FILE: processor/messages/src/lib.rs
type SubstrateContext (line 16) | pub struct SubstrateContext {
type KeyGenId (line 27) | pub struct KeyGenId {
type CoordinatorMessage (line 33) | pub enum CoordinatorMessage {
method required_block (line 62) | pub fn required_block(&self) -> Option<BlockHash> {
method required_block (line 129) | pub fn required_block(&self) -> Option<BlockHash> {
method session (line 133) | pub fn session(&self) -> Session {
method required_block (line 199) | pub fn required_block(&self) -> Option<BlockHash> {
method required_block (line 243) | pub fn required_block(&self) -> Option<BlockHash> {
method required_block (line 283) | pub fn required_block(&self) -> Option<BlockHash> {
method intent (line 330) | pub fn intent(&self) -> Vec<u8> {
type ProcessorMessage (line 68) | pub enum ProcessorMessage {
method intent (line 401) | pub fn intent(&self) -> Vec<u8> {
type SignId (line 110) | pub struct SignId {
type CoordinatorMessage (line 117) | pub enum CoordinatorMessage {
method required_block (line 62) | pub fn required_block(&self) -> Option<BlockHash> {
method required_block (line 129) | pub fn required_block(&self) -> Option<BlockHash> {
method session (line 133) | pub fn session(&self) -> Session {
method required_block (line 199) | pub fn required_block(&self) -> Option<BlockHash> {
method required_block (line 243) | pub fn required_block(&self) -> Option<BlockHash> {
method required_block (line 283) | pub fn required_block(&self) -> Option<BlockHash> {
method intent (line 330) | pub fn intent(&self) -> Vec<u8> {
type ProcessorMessage (line 144) | pub enum ProcessorMessage {
method intent (line 401) | pub fn intent(&self) -> Vec<u8> {
function cosign_block_msg (line 159) | pub fn cosign_block_msg(block_number: u64, block: [u8; 32]) -> Vec<u8> {
type SubstrateSignableId (line 171) | pub enum SubstrateSignableId {
type SubstrateSignId (line 178) | pub struct SubstrateSignId {
type CoordinatorMessage (line 185) | pub enum CoordinatorMessage {
method required_block (line 62) | pub fn required_block(&self) -> Option<BlockHash> {
method required_block (line 129) | pub fn required_block(&self) -> Option<BlockHash> {
method session (line 133) | pub fn session(&self) -> Session {
method required_block (line 199) | pub fn required_block(&self) -> Option<BlockHash> {
method required_block (line 243) | pub fn required_block(&self) -> Option<BlockHash> {
method required_block (line 283) | pub fn required_block(&self) -> Option<BlockHash> {
method intent (line 330) | pub fn intent(&self) -> Vec<u8> {
type PlanMeta (line 205) | pub struct PlanMeta {
type ProcessorMessage (line 211) | pub enum ProcessorMessage {
method intent (line 401) | pub fn intent(&self) -> Vec<u8> {
type CoordinatorMessage (line 228) | pub enum CoordinatorMessage {
method required_block (line 62) | pub fn required_block(&self) -> Option<BlockHash> {
method required_block (line 129) | pub fn required_block(&self) -> Option<BlockHash> {
method session (line 133) | pub fn session(&self) -> Session {
method required_block (line 199) | pub fn required_block(&self) -> Option<BlockHash> {
method required_block (line 243) | pub fn required_block(&self) -> Option<BlockHash> {
method required_block (line 283) | pub fn required_block(&self) -> Option<BlockHash> {
method intent (line 330) | pub fn intent(&self) -> Vec<u8> {
type ProcessorMessage (line 253) | pub enum ProcessorMessage {
method intent (line 401) | pub fn intent(&self) -> Vec<u8> {
type CoordinatorMessage (line 270) | pub enum CoordinatorMessage {
method required_block (line 62) | pub fn required_block(&self) -> Option<BlockHash> {
method required_block (line 129) | pub fn required_block(&self) -> Option<BlockHash> {
method session (line 133) | pub fn session(&self) -> Session {
method required_block (line 199) | pub fn required_block(&self) -> Option<BlockHash> {
method required_block (line 243) | pub fn required_block(&self) -> Option<BlockHash> {
method required_block (line 283) | pub fn required_block(&self) -> Option<BlockHash> {
method intent (line 330) | pub fn intent(&self) -> Vec<u8> {
type ProcessorMessage (line 301) | pub enum ProcessorMessage {
method intent (line 401) | pub fn intent(&self) -> Vec<u8> {
constant COORDINATOR_UID (line 315) | const COORDINATOR_UID: u8 = 0;
constant PROCESSOR_UID (line 316) | const PROCESSOR_UID: u8 = 1;
constant TYPE_KEY_GEN_UID (line 318) | const TYPE_KEY_GEN_UID: u8 = 2;
constant TYPE_SIGN_UID (line 319) | const TYPE_SIGN_UID: u8 = 3;
constant TYPE_COORDINATOR_UID (line 320) | const TYPE_COORDINATOR_UID: u8 = 4;
constant TYPE_SUBSTRATE_UID (line 321) | const TYPE_SUBSTRATE_UID: u8 = 5;
FILE: processor/src/additional_key.rs
function additional_key (line 9) | pub fn additional_key<N: Network>(k: u64) -> <N::Curve as Ciphersuite>::F {
FILE: processor/src/batch_signer.rs
type Preprocess (line 36) | type Preprocess = <AlgorithmMachine<Ristretto, Schnorrkel> as Preprocess...
type SignatureShare (line 37) | type SignatureShare = <AlgorithmSignMachine<Ristretto, Schnorrkel> as Si...
type BatchSigner (line 41) | pub struct BatchSigner<D: Db> {
function fmt (line 57) | fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
function new (line 67) | pub fn new(
function verify_id (line 87) | fn verify_id(&self, id: &SubstrateSignId) -> Result<(Session, u32, u32),...
function attempt (line 114) | fn attempt(
function sign (line 192) | pub fn sign(&mut self, txn: &mut D::Transaction<'_>, batch: Batch) -> Op...
function handle (line 206) | pub fn handle(
function batch_signed (line 402) | pub fn batch_signed(&mut self, txn: &mut D::Transaction<'_>, id: u32) {
FILE: processor/src/coordinator.rs
type Message (line 6) | pub struct Message {
type Coordinator (line 12) | pub trait Coordinator {
method send (line 13) | async fn send(&mut self, msg: impl Send + Into<ProcessorMessage>);
method recv (line 14) | async fn recv(&mut self) -> Message;
method ack (line 15) | async fn ack(&mut self, msg: Message);
method send (line 20) | async fn send(&mut self, msg: impl Send + Into<ProcessorMessage>) {
method recv (line 28) | async fn recv(&mut self) -> Message {
method ack (line 40) | async fn ack(&mut self, msg: Message) {
FILE: processor/src/cosigner.rs
type Preprocess (line 31) | type Preprocess = <AlgorithmMachine<Ristretto, Schnorrkel> as Preprocess...
type SignatureShare (line 32) | type SignatureShare = <AlgorithmSignMachine<Ristretto, Schnorrkel> as Si...
type Cosigner (line 36) | pub struct Cosigner {
method fmt (line 50) | fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
method new (line 64) | pub fn new(
method handle (line 117) | pub fn handle(
FILE: processor/src/db.rs
method pending_activation (line 18) | pub fn pending_activation<N: Network>(
method set_pending_activation (line 33) | pub fn set_pending_activation<N: Network>(
FILE: processor/src/key_gen.rs
type KeyConfirmed (line 24) | pub struct KeyConfirmed<C: Ciphersuite> {
method read_keys (line 47) | fn read_keys<N: Network>(
method save_keys (line 65) | fn save_keys<N: Network>(
method confirm_keys (line 88) | fn confirm_keys<N: Network>(
method keys (line 113) | fn keys<N: Network>(
method substrate_keys_by_session (line 123) | pub fn substrate_keys_by_session<N: Network>(
type SecretShareMachines (line 132) | type SecretShareMachines<N> =
type KeyMachines (line 134) | type KeyMachines<N> = Vec<(KeyMachine<Ristretto>, KeyMachine<<N as Netwo...
type KeyGen (line 137) | pub struct KeyGen<N: Network, D: Db> {
function new (line 148) | pub fn new(db: D, entropy: Zeroizing<[u8; 32]>) -> KeyGen<N, D> {
function in_set (line 152) | pub fn in_set(&self, session: &Session) -> bool {
function keys (line 161) | pub fn keys(
function substrate_keys_by_session (line 170) | pub fn substrate_keys_by_session(
function handle (line 177) | pub fn handle(
function confirm (line 570) | pub fn confirm(
FILE: processor/src/main.rs
type TributaryMutable (line 76) | struct TributaryMutable<N: Network, D: Db> {
type SubstrateMutable (line 137) | type SubstrateMutable<N, D> = MultisigManager<D, N>;
function handle_coordinator_msg (line 139) | async fn handle_coordinator_msg<D: Db, N: Network, Co: Coordinator>(
function boot (line 483) | async fn boot<N: Network, D: Db, Co: Coordinator>(
function run (line 582) | async fn run<N: Network, D: Db, Co: Coordinator>(mut raw_db: D, network:...
function main (line 700) | async fn main() {
FILE: processor/src/multisigs/db.rs
type PlanFromScanning (line 19) | pub enum PlanFromScanning<N: Network> {
function read (line 25) | fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
function write (line 49) | fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
method save_active_plan (line 82) | pub fn save_active_plan<N: Network>(
method active_plans (line 114) | pub fn active_plans<N: Network>(getter: &impl Get, key: &[u8]) -> Vec<(u...
method plan_by_key_with_self_change (line 132) | pub fn plan_by_key_with_self_change<N: Network>(
method take_operating_costs (line 148) | pub fn take_operating_costs(txn: &mut impl DbTxn) -> u64 {
method set_operating_costs (line 153) | pub fn set_operating_costs(txn: &mut impl DbTxn, amount: u64) {
method resolve_plan (line 161) | pub fn resolve_plan<N: Network>(
method set_plans_from_scanning (line 190) | pub fn set_plans_from_scanning<N: Network>(
method take_plans_from_scanning (line 202) | pub fn take_plans_from_scanning<N: Network>(
method save_forwarded_output (line 223) | pub fn save_forwarded_output(txn: &mut impl DbTxn, instruction: &InInstr...
method take_forwarded_output (line 229) | pub fn take_forwarded_output(
method save_delayed_output (line 247) | pub fn save_delayed_output(txn: &mut impl DbTxn, instruction: &InInstruc...
method take_delayed_outputs (line 253) | pub fn take_delayed_outputs(txn: &mut impl DbTxn) -> Vec<InInstructionWi...
FILE: processor/src/multisigs/mod.rs
function instruction_from_output (line 40) | fn instruction_from_output<N: Network>(
type RotationStep (line 96) | enum RotationStep {
function prepare_send (line 110) | async fn prepare_send<N: Network>(
type MultisigViewer (line 134) | pub struct MultisigViewer<N: Network> {
type MultisigEvent (line 142) | pub enum MultisigEvent<N: Network> {
type MultisigManager (line 149) | pub struct MultisigManager<D: Db, N: Network> {
function new (line 156) | pub async fn new(
function block_number (line 221) | pub async fn block_number<G: Get>(
function add_key (line 237) | pub async fn add_key(
function current_rotation_step (line 257) | fn current_rotation_step(&self, block_number: usize) -> RotationStep {
function burns_to_payments (line 295) | fn burns_to_payments(
function split_outputs_by_key (line 326) | fn split_outputs_by_key(&self, outputs: Vec<N::Output>) -> (Vec<N::Outpu...
function refund_plan (line 344) | fn refund_plan(
function forward_plan (line 357) | fn forward_plan(&mut self, txn: &mut D::Transaction<'_>, output: &N::Out...
function filter_outputs_due_to_closing (line 374) | fn filter_outputs_due_to_closing(
function plans_from_block (line 558) | async fn plans_from_block(
function substrate_block (line 693) | pub async fn substrate_block(
function release_scanner_lock (line 800) | pub async fn release_scanner_lock(&mut self) {
function scanner_event_to_multisig_event (line 804) | pub async fn scanner_event_to_multisig_event(
function next_scanner_event (line 1065) | pub async fn next_scanner_event(&mut self) -> ScannerEvent<N> {
FILE: processor/src/multisigs/scanner.rs
type ScannerEvent (line 24) | pub enum ScannerEvent<N: Network> {
type ScannerEventChannel (line 42) | pub type ScannerEventChannel<N> = mpsc::UnboundedReceiver<ScannerEvent<N>>;
type ScannerDb (line 45) | struct ScannerDb<N: Network, D: Db>(PhantomData<N>, PhantomData<D>);
function scanner_key (line 47) | fn scanner_key(dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec<u8> {
function block_key (line 51) | fn block_key(number: usize) -> Vec<u8> {
function block_number_key (line 54) | fn block_number_key(id: &<N::Block as Block<N>>::Id) -> Vec<u8> {
function save_block (line 57) | fn save_block(txn: &mut D::Transaction<'_>, number: usize, id: &<N::Bloc...
function block (line 61) | fn block<G: Get>(getter: &G, number: usize) -> Option<<N::Block as Block...
function block_number (line 68) | fn block_number<G: Get>(getter: &G, id: &<N::Block as Block<N>>::Id) -> ...
function keys_key (line 74) | fn keys_key() -> Vec<u8> {
function register_key (line 77) | fn register_key(
function keys (line 102) | fn keys<G: Get>(getter: &G) -> Vec<(usize, <N::Curve as Ciphersuite>::G)> {
function retire_key (line 120) | fn retire_key(txn: &mut D::Transaction<'_>) {
function seen_key (line 127) | fn seen_key(id: &<N::Output as Output<N>>::Id) -> Vec<u8> {
function seen (line 130) | fn seen<G: Get>(getter: &G, id: &<N::Output as Output<N>>::Id) -> bool {
function outputs_key (line 134) | fn outputs_key(block: &<N::Block as Block<N>>::Id) -> Vec<u8> {
function save_outputs (line 137) | fn save_outputs(
function outputs (line 148) | fn outputs(
function scanned_block_key (line 162) | fn scanned_block_key() -> Vec<u8> {
function save_scanned_block (line 166) | fn save_scanned_block(txn: &mut D::Transaction<'_>, block: usize) -> Vec...
function latest_scanned_block (line 181) | fn latest_scanned_block<G: Get>(getter: &G) -> Option<usize> {
function retirement_block_key (line 187) | fn retirement_block_key(key: &<N::Curve as Ciphersuite>::G) -> Vec<u8> {
function save_retirement_block (line 190) | fn save_retirement_block(
function retirement_block (line 197) | fn retirement_block<G: Get>(getter: &G, key: &<N::Curve as Ciphersuite>:...
type Scanner (line 210) | pub struct Scanner<N: Network, D: Db> {
type ScannerHold (line 226) | struct ScannerHold<N: Network, D: Db> {
function read (line 230) | async fn read(&self) -> RwLockReadGuard<'_, Option<Scanner<N, D>>> {
function write (line 241) | async fn write(&self) -> RwLockWriteGuard<'_, Option<Scanner<N, D>>> {
function long_term_acquire (line 254) | async fn long_term_acquire(&self) -> Scanner<N, D> {
function restore (line 257) | async fn restore(&self, scanner: Scanner<N, D>) {
type ScannerHandle (line 263) | pub struct ScannerHandle<N: Network, D: Db> {
function ram_scanned (line 271) | pub async fn ram_scanned(&self) -> usize {
function register_key (line 276) | pub async fn register_key(
function db_scanned (line 305) | pub fn db_scanned<G: Get>(getter: &G) -> Option<usize> {
function block_number (line 312) | pub fn block_number<G: Get>(getter: &G, id: &<N::Block as Block<N>>::Id)...
function ack_block (line 322) | pub async fn ack_block(
function register_eventuality (line 357) | pub async fn register_eventuality(
function release_lock (line 378) | pub async fn release_lock(&mut self) {
function new (line 385) | pub fn new(
function emit (line 429) | fn emit(&mut self, event: ScannerEvent<N>) -> bool {
function run (line 438) | async fn run(
FILE: processor/src/multisigs/scheduler/mod.rs
type SchedulerAddendum (line 13) | pub trait SchedulerAddendum: Send + Clone + PartialEq + Debug {
method read (line 14) | fn read<R: io::Read>(reader: &mut R) -> io::Result<Self>;
method write (line 15) | fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()>;
method read (line 19) | fn read<R: io::Read>(_: &mut R) -> io::Result<Self> {
method write (line 22) | fn write<W: io::Write>(&self, _: &mut W) -> io::Result<()> {
type Scheduler (line 27) | pub trait Scheduler<N: Network>: Sized + Clone + PartialEq + Debug {
method empty (line 31) | fn empty(&self) -> bool;
method new (line 34) | fn new<D: Db>(
method from_db (line 41) | fn from_db<D: Db>(
method can_use_branch (line 48) | fn can_use_branch(&self, balance: ExternalBalance) -> bool;
method schedule (line 51) | fn schedule<D: Db>(
method consume_payments (line 62) | fn consume_payments<D: Db>(&mut self, txn: &mut D::Transaction<'_>) ->...
method created_output (line 66) | fn created_output<D: Db>(
method refund_plan (line 74) | fn refund_plan<D: Db>(
method shim_forward_plan (line 85) | fn shim_forward_plan(output: N::Output, to: <N::Curve as Ciphersuite>:...
method forward_plan (line 90) | fn forward_plan<D: Db>(
FILE: processor/src/multisigs/scheduler/smart_contract.rs
type Scheduler (line 14) | pub struct Scheduler<N: Network> {
type Addendum (line 21) | pub enum Addendum<N: Network> {
method read (line 27) | fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
method write (line 47) | fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
type Addendum (line 70) | type Addendum = Addendum<N>;
function empty (line 73) | fn empty(&self) -> bool {
function new (line 78) | fn new<D: Db>(
function from_db (line 91) | fn from_db<D: Db>(
function can_use_branch (line 103) | fn can_use_branch(&self, _balance: ExternalBalance) -> bool {
function schedule (line 107) | fn schedule<D: Db>(
function consume_payments (line 158) | fn consume_payments<D: Db>(&mut self, _txn: &mut D::Transaction<'_>) -> ...
function created_output (line 162) | fn created_output<D: Db>(
function refund_plan (line 172) | fn refund_plan<D: Db>(
function shim_forward_plan (line 193) | fn shim_forward_plan(_output: N::Output, _to: <N::Curve as Ciphersuite>:...
function forward_plan (line 200) | fn forward_plan<D: Db>(
FILE: processor/src/multisigs/scheduler/utxo.rs
type Scheduler (line 18) | pub struct Scheduler<N: UtxoNetwork> {
function scheduler_key (line 46) | fn scheduler_key<D: Db, G: GroupEncoding>(key: &G) -> Vec<u8> {
function empty (line 51) | pub fn empty(&self) -> bool {
function read (line 58) | fn read<R: Read>(
function serialize (line 112) | fn serialize(&self) -> Vec<u8> {
function new (line 145) | pub fn new<D: Db>(
function from_db (line 173) | pub fn from_db<D: Db>(
function can_use_branch (line 193) | pub fn can_use_branch(&self, balance: ExternalBalance) -> bool {
function execute (line 198) | fn execute(
function add_outputs (line 266) | fn add_outputs(
function schedule (line 303) | pub fn schedule<D: Db>(
function consume_payments (line 453) | pub fn consume_payments<D: Db>(&mut self, txn: &mut D::Transaction<'_>) ...
function created_output (line 463) | pub fn created_output<D: Db>(
type Addendum (line 528) | type Addendum = ();
function empty (line 531) | fn empty(&self) -> bool {
function new (line 536) | fn new<D: Db>(
function from_db (line 545) | fn from_db<D: Db>(
function can_use_branch (line 554) | fn can_use_branch(&self, balance: ExternalBalance) -> bool {
function schedule (line 559) | fn schedule<D: Db>(
function consume_payments (line 571) | fn consume_payments<D: Db>(&mut self, txn: &mut D::Transaction<'_>) -> V...
function created_output (line 578) | fn created_output<D: Db>(
function refund_plan (line 587) | fn refund_plan<D: Db>(
function shim_forward_plan (line 607) | fn shim_forward_plan(output: N::Output, to: <N::Curve as Ciphersuite>::G...
function forward_plan (line 621) | fn forward_plan<D: Db>(
FILE: processor/src/networks/bitcoin.rs
type OutputId (line 60) | pub struct OutputId(pub [u8; 36]);
method as_ref (line 67) | fn as_ref(&self) -> &[u8] {
method as_mut (line 72) | fn as_mut(&mut self) -> &mut [u8] {
method default (line 62) | fn default() -> Self {
type Output (line 78) | pub struct Output {
type Id (line 86) | type Id = OutputId;
method kind (line 88) | fn kind(&self) -> OutputType {
method id (line 92) | fn id(&self) -> Self::Id {
method tx_id (line 106) | fn tx_id(&self) -> [u8; 32] {
method key (line 112) | fn key(&self) -> ProjectivePoint {
method presumed_origin (line 124) | fn presumed_origin(&self) -> Option<Address> {
method balance (line 128) | fn balance(&self) -> ExternalBalance {
method data (line 132) | fn data(&self) -> &[u8] {
method write (line 136) | fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
method read (line 145) | fn read<R: io::Read>(mut reader: &mut R) -> io::Result<Self> {
type Fee (line 170) | pub struct Fee(u64);
type Id (line 174) | type Id = [u8; 32];
method id (line 175) | fn id(&self) -> Self::Id {
method fee (line 182) | async fn fee(&self, network: &Bitcoin) -> u64 {
type Eventuality (line 201) | pub struct Eventuality([u8; 32]);
type EmptyClaim (line 204) | pub struct EmptyClaim;
method as_ref (line 206) | fn as_ref(&self) -> &[u8] {
method as_mut (line 211) | fn as_mut(&mut self) -> &mut [u8] {
type Claim (line 217) | type Claim = EmptyClaim;
type Completion (line 218) | type Completion = Transaction;
method lookup (line 220) | fn lookup(&self) -> Vec<u8> {
method read (line 224) | fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
method serialize (line 231) | fn serialize(&self) -> Vec<u8> {
method claim (line 235) | fn claim(_: &Transaction) -> EmptyClaim {
method serialize_completion (line 238) | fn serialize_completion(completion: &Transaction) -> Vec<u8> {
method read_completion (line 243) | fn read_completion<R: io::Read>(reader: &mut R) -> io::Result<Transactio...
type SignableTransaction (line 250) | pub struct SignableTransaction {
method eq (line 254) | fn eq(&self, other: &SignableTransaction) -> bool {
method fee (line 260) | fn fee(&self) -> u64 {
type Id (line 267) | type Id = [u8; 32];
method id (line 268) | fn id(&self) -> Self::Id {
method parent (line 274) | fn parent(&self) -> Self::Id {
method time (line 280) | async fn time(&self, rpc: &Bitcoin) -> u64 {
constant KEY_DST (line 308) | const KEY_DST: &[u8] = b"Serai Bitcoin Output Offset";
function scanner (line 314) | fn scanner(
type Bitcoin (line 350) | pub struct Bitcoin {
method new (line 363) | pub async fn new(url: String) -> Bitcoin {
method fresh_chain (line 374) | pub async fn fresh_chain(&self) {
method median_fee (line 388) | async fn median_fee(&self, block: &Block) -> Result<Fee, NetworkError> {
method make_signable_transaction (line 417) | async fn make_signable_transaction(
method segwit_data_pattern (line 474) | fn segwit_data_pattern(script: &ScriptBuf) -> Option<bool> {
method extract_serai_data (line 493) | fn extract_serai_data(tx: &Transaction) -> Vec<u8> {
method sign_btc_input_for_p2pkh (line 527) | pub fn sign_btc_input_for_p2pkh(
method eq (line 356) | fn eq(&self, _: &Self) -> bool {
constant MAX_INPUTS (line 575) | const MAX_INPUTS: usize = 520;
constant MAX_OUTPUTS (line 576) | const MAX_OUTPUTS: usize = 520;
function address_from_key (line 578) | fn address_from_key(key: ProjectivePoint) -> Address {
type Curve (line 587) | type Curve = Secp256k1;
type Transaction (line 589) | type Transaction = Transaction;
type Block (line 590) | type Block = Block;
type Output (line 592) | type Output = Output;
type SignableTransaction (line 593) | type SignableTransaction = SignableTransaction;
type Eventuality (line 594) | type Eventuality = Eventuality;
type TransactionMachine (line 595) | type TransactionMachine = TransactionMachine;
type Scheduler (line 597) | type Scheduler = Scheduler<Bitcoin>;
type Address (line 599) | type Address = Address;
constant NETWORK (line 601) | const NETWORK: ExternalNetworkId = ExternalNetworkId::Bitcoin;
constant ID (line 602) | const ID: &'static str = "Bitcoin";
constant ESTIMATED_BLOCK_TIME_IN_SECONDS (line 603) | const ESTIMATED_BLOCK_TIME_IN_SECONDS: usize = 600;
constant CONFIRMATIONS (line 604) | const CONFIRMATIONS: usize = 6;
constant DUST (line 638) | const DUST: u64 = 10_000;
constant COST_TO_AGGREGATE (line 646) | const COST_TO_AGGREGATE: u64 = 800;
constant MAX_OUTPUTS (line 648) | const MAX_OUTPUTS: usize = MAX_OUTPUTS;
method tweak_keys (line 650) | fn tweak_keys(keys: &mut ThresholdKeys<Self::Curve>) {
method external_address (line 657) | async fn external_address(&self, key: ProjectivePoint) -> Address {
method branch_address (line 661) | fn branch_address(key: ProjectivePoint) -> Option<Address> {
method change_address (line 666) | fn change_address(key: ProjectivePoint) -> Option<Address> {
method forward_address (line 671) | fn forward_address(key: ProjectivePoint) -> Option<Address> {
method get_latest_block_number (line 676) | async fn get_latest_block_number(&self) -> Result<usize, NetworkError> {
method get_block (line 680) | async fn get_block(&self, number: usize) -> Result<Self::Block, NetworkE...
method get_outputs (line 686) | async fn get_outputs(&self, block: &Self::Block, key: ProjectivePoint) -...
method get_eventuality_completions (line 742) | async fn get_eventuality_completions(
method needed_fee (line 803) | async fn needed_fee(
method signable_transaction (line 818) | async fn signable_transaction(
method attempt_sign (line 836) | async fn attempt_sign(
method publish_completion (line 844) | async fn publish_completion(&self, tx: &Transaction) -> Result<(), Netwo...
method confirm_completion (line 855) | async fn confirm_completion(
method get_block_number (line 866) | async fn get_block_number(&self, id: &[u8; 32]) -> usize {
method check_eventuality_by_claim (line 871) | async fn check_eventuality_by_claim(
method get_transaction_by_eventuality (line 880) | async fn get_transaction_by_eventuality(&self, _: usize, id: &Eventualit...
method mine_block (line 885) | async fn mine_block(&self) {
method test_send (line 899) | async fn test_send(&self, address: Address) -> Block {
constant MAX_INPUTS (line 941) | const MAX_INPUTS: usize = MAX_INPUTS;
FILE: processor/src/networks/ethereum.rs
constant DAI (line 62) | const DAI: [u8; 20] =
constant DAI (line 68) | const DAI: [u8; 20] =
function coin_to_serai_coin (line 74) | fn coin_to_serai_coin(coin: &EthereumCoin) -> Option<ExternalCoin> {
function amount_to_serai_amount (line 86) | fn amount_to_serai_amount(coin: ExternalCoin, amount: U256) -> Amount {
function balance_to_ethereum_amount (line 95) | fn balance_to_ethereum_amount(balance: ExternalBalance) -> U256 {
type Address (line 104) | pub struct Address(pub [u8; 20]);
type Error (line 106) | type Error = ();
method try_from (line 107) | fn try_from(bytes: Vec<u8>) -> Result<Address, ()> {
type Error (line 117) | type Error = ();
method try_into (line 118) | fn try_into(self) -> Result<Vec<u8>, ()> {
method fmt (line 124) | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
method fee (line 130) | fn fee(&self) -> u64 {
type Id (line 138) | type Id = [u8; 32];
method id (line 139) | fn id(&self) -> Self::Id {
method fee (line 144) | async fn fee(&self, _network: &Ethereum<D>) -> u64 {
type Epoch (line 152) | pub struct Epoch {
method end (line 164) | fn end(&self) -> u64 {
type Id (line 171) | type Id = [u8; 32];
method id (line 172) | fn id(&self) -> [u8; 32] {
method parent (line 175) | fn parent(&self) -> [u8; 32] {
method time (line 178) | async fn time(&self, _: &Ethereum<D>) -> u64 {
type Id (line 184) | type Id = [u8; 32];
method kind (line 186) | fn kind(&self) -> OutputType {
method id (line 190) | fn id(&self) -> Self::Id {
method tx_id (line 196) | fn tx_id(&self) -> [u8; 32] {
method key (line 199) | fn key(&self) -> <Secp256k1 as Ciphersuite>::G {
method presumed_origin (line 203) | fn presumed_origin(&self) -> Option<Address> {
method balance (line 207) | fn balance(&self) -> ExternalBalance {
method data (line 216) | fn data(&self) -> &[u8] {
method write (line 220) | fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
method read (line 223) | fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
type Claim (line 229) | pub struct Claim {
method as_ref (line 233) | fn as_ref(&self) -> &[u8] {
method as_mut (line 238) | fn as_mut(&mut self) -> &mut [u8] {
method from (line 248) | fn from(sig: &Signature) -> Self {
method default (line 243) | fn default() -> Self {
type Eventuality (line 254) | pub struct Eventuality(PublicKey, RouterCommand);
type Claim (line 256) | type Claim = Claim;
type Completion (line 257) | type Completion = SignedRouterCommand;
method lookup (line 259) | fn lookup(&self) -> Vec<u8> {
method read (line 267) | fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
method serialize (line 275) | fn serialize(&self) -> Vec<u8> {
method claim (line 282) | fn claim(completion: &Self::Completion) -> Self::Claim {
method serialize_completion (line 285) | fn serialize_completion(completion: &Self::Completion) -> Vec<u8> {
method read_completion (line 290) | fn read_completion<R: io::Read>(reader: &mut R) -> io::Result<Self::Comp...
type Ethereum (line 296) | pub struct Ethereum<D: Db> {
method eq (line 308) | fn eq(&self, _other: &Ethereum<D>) -> bool {
function fmt (line 313) | fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
function new (line 322) | pub async fn new(db: D, daemon_url: String, relayer_url: String) -> Self {
function router (line 342) | pub async fn router(&self) -> RwLockReadGuard<'_, Option<Router>> {
type Curve (line 386) | type Curve = Secp256k1;
type Transaction (line 388) | type Transaction = Transaction;
type Block (line 389) | type Block = Epoch;
type Output (line 391) | type Output = EthereumInInstruction;
type SignableTransaction (line 392) | type SignableTransaction = RouterCommand;
type Eventuality (line 393) | type Eventuality = Eventuality;
type TransactionMachine (line 394) | type TransactionMachine = RouterCommandMachine;
type Scheduler (line 396) | type Scheduler = Scheduler<Self>;
type Address (line 398) | type Address = Address;
constant NETWORK (line 400) | const NETWORK: ExternalNetworkId = ExternalNetworkId::Ethereum;
constant ID (line 401) | const ID: &'static str = "Ethereum";
constant ESTIMATED_BLOCK_TIME_IN_SECONDS (line 402) | const ESTIMATED_BLOCK_TIME_IN_SECONDS: usize = 32 * 12;
constant CONFIRMATIONS (line 403) | const CONFIRMATIONS: usize = 1;
constant DUST (line 405) | const DUST: u64 = 0;
constant COST_TO_AGGREGATE (line 407) | const COST_TO_AGGREGATE: u64 = 0;
constant MAX_OUTPUTS (line 410) | const MAX_OUTPUTS: usize = 256;
method tweak_keys (line 412) | fn tweak_keys(keys: &mut ThresholdKeys<Self::Curve>) {
method external_address (line 419) | async fn external_address(&self, _key: <Secp256k1 as Ciphersuite>::G) ->...
method branch_address (line 423) | fn branch_address(_key: <Secp256k1 as Ciphersuite>::G) -> Option<Address> {
method change_address (line 427) | fn change_address(_key: <Secp256k1 as Ciphersuite>::G) -> Option<Address> {
method forward_address (line 431) | fn forward_address(_key: <Secp256k1 as Ciphersuite>::G) -> Option<Addres...
method get_latest_block_number (line 435) | async fn get_latest_block_number(&self) -> Result<usize, NetworkError> {
method get_block (line 453) | async fn get_block(&self, number: usize) -> Result<Self::Block, NetworkE...
method get_outputs (line 490) | async fn get_outputs(
method get_eventuality_completions (line 569) | async fn get_eventuality_completions(
method needed_fee (line 629) | async fn needed_fee(
method signable_transaction (line 641) | async fn signable_transaction(
method attempt_sign (line 709) | async fn attempt_sign(
method publish_completion (line 720) | async fn publish_completion(
method confirm_completion (line 799) | async fn confirm_completion(
method get_block_number (line 808) | async fn get_block_number(&self, id: &<Self::Block as Block<Self>>::Id) ...
method check_eventuality_by_claim (line 822) | async fn check_eventuality_by_claim(
method get_transaction_by_eventuality (line 831) | async fn get_transaction_by_eventuality(
method mine_block (line 883) | async fn mine_block(&self) {
method test_send (line 888) | async fn test_send(&self, send_to: Self::Address) -> Self::Block {
FILE: processor/src/networks/mod.rs
type NetworkError (line 37) | pub enum NetworkError {
type Id (line 42) | pub trait Id:
type OutputType (line 49) | pub enum OutputType {
method write (line 85) | fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
method read (line 94) | fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
type Output (line 107) | pub trait Output<N: Network>: Send + Sync + Sized + Clone + PartialEq + ...
method kind (line 110) | fn kind(&self) -> OutputType;
method id (line 112) | fn id(&self) -> Self::Id;
method tx_id (line 113) | fn tx_id(&self) -> <N::Transaction as Transaction<N>>::Id;
method key (line 114) | fn key(&self) -> <N::Curve as Ciphersuite>::G;
method presumed_origin (line 116) | fn presumed_origin(&self) -> Option<N::Address>;
method balance (line 118) | fn balance(&self) -> ExternalBalance;
method data (line 119) | fn data(&self) -> &[u8];
method write (line 121) | fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()>;
method read (line 122) | fn read<R: io::Read>(reader: &mut R) -> io::Result<Self>;
type Transaction (line 126) | pub trait Transaction<N: Network>: Send + Sync + Sized + Clone + Partial...
method id (line 128) | fn id(&self) -> Self::Id;
method fee (line 131) | async fn fee(&self, network: &N) -> u64;
type SignableTransaction (line 134) | pub trait SignableTransaction: Send + Sync + Clone + Debug {
method fee (line 136) | fn fee(&self) -> u64;
type Eventuality (line 139) | pub trait Eventuality: Send + Sync + Clone + PartialEq + Debug {
method lookup (line 143) | fn lookup(&self) -> Vec<u8>;
method read (line 145) | fn read<R: io::Read>(reader: &mut R) -> io::Result<Self>;
method serialize (line 146) | fn serialize(&self) -> Vec<u8>;
method claim (line 148) | fn claim(completion: &Self::Completion) -> Self::Claim;
method serialize_completion (line 151) | fn serialize_completion(completion: &Self::Completion) -> Vec<u8>;
method read_completion (line 152) | fn read_completion<R: io::Read>(reader: &mut R) -> io::Result<Self::Co...
type EventualitiesTracker (line 156) | pub struct EventualitiesTracker<E: Eventuality> {
function new (line 164) | pub fn new() -> Self {
function register (line 168) | pub fn register(&mut self, block_number: usize, id: [u8; 32], eventualit...
function drop (line 180) | pub fn drop(&mut self, id: [u8; 32]) {
method default (line 197) | fn default() -> Self {
type Block (line 203) | pub trait Block<N: Network>: Send + Sync + Sized + Clone + Debug {
method id (line 206) | fn id(&self) -> Self::Id;
method parent (line 207) | fn parent(&self) -> Self::Id;
method time (line 211) | async fn time(&self, rpc: &N) -> u64;
type PostFeeBranch (line 215) | pub struct PostFeeBranch {
function drop_branches (line 221) | fn drop_branches<N: Network>(
type PreparedSend (line 234) | pub struct PreparedSend<N: Network> {
type Network (line 243) | pub trait Network: 'static + Send + Sync + Clone + PartialEq + Debug {
constant NETWORK (line 283) | const NETWORK: ExternalNetworkId;
constant ID (line 285) | const ID: &'static str;
constant ESTIMATED_BLOCK_TIME_IN_SECONDS (line 287) | const ESTIMATED_BLOCK_TIME_IN_SECONDS: usize;
constant CONFIRMATIONS (line 289) | const CONFIRMATIONS: usize;
constant MAX_OUTPUTS (line 293) | const MAX_OUTPUTS: usize;
constant DUST (line 301) | const DUST: u64;
constant COST_TO_AGGREGATE (line 304) | const COST_TO_AGGREGATE: u64;
method tweak_keys (line 307) | fn tweak_keys(key: &mut ThresholdKeys<Self::Curve>);
method external_address (line 311) | async fn external_address(&self, key: <Self::Curve as Ciphersuite>::G)...
method branch_address (line 313) | fn branch_address(key: <Self::Curve as Ciphersuite>::G) -> Option<Self...
method change_address (line 315) | fn change_address(key: <Self::Curve as Ciphersuite>::G) -> Option<Self...
method forward_address (line 319) | fn forward_address(key: <Self::Curve as Ciphersuite>::G) -> Option<Sel...
method get_latest_block_number (line 322) | async fn get_latest_block_number(&self) -> Result<usize, NetworkError>;
method get_block (line 324) | async fn get_block(&self, number: usize) -> Result<Self::Block, Networ...
method get_latest_block_number_with_retries (line 327) | async fn get_latest_block_number_with_retries(&self) -> usize {
method get_block_with_retries (line 344) | async fn get_block_with_retries(&self, block_number: usize) -> Self::B...
method get_outputs (line 359) | async fn get_outputs(
method get_eventuality_completions (line 375) | async fn get_eventuality_completions(
method needed_fee (line 391) | async fn needed_fee(
method signable_transaction (line 410) | async fn signable_transaction(
method prepare_send (line 424) | async fn prepare_send(
method attempt_sign (line 598) | async fn attempt_sign(
method publish_completion (line 605) | async fn publish_completion(
method confirm_completion (line 615) | async fn confirm_completion(
method get_block_number (line 623) | async fn get_block_number(&self, id: &<Self::Block as Block<Self>>::Id...
method check_eventuality_by_claim (line 627) | async fn check_eventuality_by_claim(
method get_transaction_by_eventuality (line 635) | async fn get_transaction_by_eventuality(
method mine_block (line 642) | async fn mine_block(&self);
method test_send (line 647) | async fn test_send(&self, key: Self::Address) -> Self::Block;
type UtxoNetwork (line 650) | pub trait UtxoNetwork: Network {
constant MAX_INPUTS (line 654) | const MAX_INPUTS: usize;
FILE: processor/src/networks/monero.rs
type Output (line 49) | pub struct Output(WalletOutput);
type Id (line 60) | type Id = [u8; 32];
method kind (line 62) | fn kind(&self) -> OutputType {
method id (line 72) | fn id(&self) -> Self::Id {
method tx_id (line 76) | fn tx_id(&self) -> [u8; 32] {
method key (line 80) | fn key(&self) -> EdwardsPoint {
method presumed_origin (line 84) | fn presumed_origin(&self) -> Option<Address> {
method balance (line 88) | fn balance(&self) -> ExternalBalance {
method data (line 92) | fn data(&self) -> &[u8] {
method write (line 102) | fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
method read (line 107) | fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
constant EXTERNAL_SUBADDRESS (line 51) | const EXTERNAL_SUBADDRESS: Option<SubaddressIndex> = SubaddressIndex::ne...
constant BRANCH_SUBADDRESS (line 52) | const BRANCH_SUBADDRESS: Option<SubaddressIndex> = SubaddressIndex::new(...
constant CHANGE_SUBADDRESS (line 53) | const CHANGE_SUBADDRESS: Option<SubaddressIndex> = SubaddressIndex::new(...
constant FORWARD_SUBADDRESS (line 54) | const FORWARD_SUBADDRESS: Option<SubaddressIndex> = SubaddressIndex::new...
type Id (line 115) | type Id = [u8; 32];
method id (line 116) | fn id(&self) -> Self::Id {
method fee (line 121) | async fn fee(&self, _: &Monero) -> u64 {
type Claim (line 130) | type Claim = [u8; 32];
type Completion (line 131) | type Completion = Transaction;
method lookup (line 137) | fn lookup(&self) -> Vec<u8> {
method read (line 141) | fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
method serialize (line 144) | fn serialize(&self) -> Vec<u8> {
method claim (line 148) | fn claim(tx: &Transaction) -> [u8; 32] {
method serialize_completion (line 151) | fn serialize_completion(completion: &Transaction) -> Vec<u8> {
method read_completion (line 154) | fn read_completion<R: io::Read>(reader: &mut R) -> io::Result<Transactio...
type SignableTransaction (line 160) | pub struct SignableTransaction(MSignableTransaction);
method fee (line 162) | fn fee(&self) -> u64 {
type Id (line 169) | type Id = [u8; 32];
method id (line 170) | fn id(&self) -> Self::Id {
method parent (line 174) | fn parent(&self) -> Self::Id {
method time (line 178) | async fn time(&self, rpc: &Monero) -> u64 {
type Monero (line 226) | pub struct Monero {
method new (line 254) | pub async fn new(url: String) -> Monero {
method view_pair (line 264) | fn view_pair(spend: EdwardsPoint) -> GuaranteedViewPair {
method address_internal (line 268) | fn address_internal(spend: EdwardsPoint, subaddress: Option<Subaddress...
method scanner (line 272) | fn scanner(spend: EdwardsPoint) -> GuaranteedScanner {
method median_fee (line 281) | async fn median_fee(&self, block: &Block) -> Result<FeeRate, NetworkEr...
method make_signable_transaction (line 301) | async fn make_signable_transaction(
method test_view_pair (line 445) | fn test_view_pair() -> ViewPair {
method test_scanner (line 450) | fn test_scanner() -> Scanner {
method test_address (line 455) | fn test_address() -> Address {
method eq (line 232) | fn eq(&self, _: &Self) -> bool {
function map_rpc_err (line 239) | fn map_rpc_err(err: RpcError) -> NetworkError {
type MakeSignableTransactionResult (line 248) | enum MakeSignableTransactionResult {
type Curve (line 462) | type Curve = Ed25519;
type Transaction (line 464) | type Transaction = Transaction;
type Block (line 465) | type Block = Block;
type Output (line 467) | type Output = Output;
type SignableTransaction (line 468) | type SignableTransaction = SignableTransaction;
type Eventuality (line 469) | type Eventuality = Eventuality;
type TransactionMachine (line 470) | type TransactionMachine = TransactionMachine;
type Scheduler (line 472) | type Scheduler = Scheduler<Monero>;
type Address (line 474) | type Address = Address;
constant NETWORK (line 476) | const NETWORK: ExternalNetworkId = ExternalNetworkId::Monero;
constant ID (line 477) | const ID: &'static str = "Monero";
constant ESTIMATED_BLOCK_TIME_IN_SECONDS (line 478) | const ESTIMATED_BLOCK_TIME_IN_SECONDS: usize = 120;
constant CONFIRMATIONS (line 479) | const CONFIRMATIONS: usize = 10;
constant MAX_OUTPUTS (line 481) | const MAX_OUTPUTS: usize = 16;
constant DUST (line 484) | const DUST: u64 = 10000000000;
constant COST_TO_AGGREGATE (line 487) | const COST_TO_AGGREGATE: u64 = 0;
method tweak_keys (line 490) | fn tweak_keys(_: &mut ThresholdKeys<Self::Curve>) {}
method external_address (line 493) | async fn external_address(&self, key: EdwardsPoint) -> Address {
method branch_address (line 497) | fn branch_address(key: EdwardsPoint) -> Option<Address> {
method change_address (line 501) | fn change_address(key: EdwardsPoint) -> Option<Address> {
method forward_address (line 505) | fn forward_address(key: EdwardsPoint) -> Option<Address> {
method get_latest_block_number (line 509) | async fn get_latest_block_number(&self) -> Result<usize, NetworkError> {
method get_block (line 514) | async fn get_block(&self, number: usize) -> Result<Self::Block, NetworkE...
method get_outputs (line 524) | async fn get_outputs(&self, block: &Block, key: EdwardsPoint) -> Vec<Out...
method get_eventuality_completions (line 558) | async fn get_eventuality_completions(
method needed_fee (line 624) | async fn needed_fee(
method signable_transaction (line 641) | async fn signable_transaction(
method attempt_sign (line 664) | async fn attempt_sign(
method publish_completion (line 675) | async fn publish_completion(&self, tx: &Transaction) -> Result<(), Netwo...
method confirm_completion (line 688) | async fn confirm_completion(
method get_block_number (line 702) | async fn get_block_number(&self, id: &[u8; 32]) -> usize {
method check_eventuality_by_claim (line 707) | async fn check_eventuality_by_claim(
method get_transaction_by_eventuality (line 716) | async fn get_transaction_by_eventuality(
method mine_block (line 732) | async fn mine_block(&self) {
method test_send (line 739) | async fn test_send(&self, address: Address) -> Block {
constant MAX_INPUTS (line 809) | const MAX_INPUTS: usize = 120;
FILE: processor/src/plan.rs
type Payment (line 17) | pub struct Payment<N: Network> {
function transcript (line 24) | pub fn transcript<T: Transcript>(&self, transcript: &mut T) {
function write (line 34) | pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
function read (line 53) | pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
type Plan (line 80) | pub struct Plan<N: Network> {
function fmt (line 103) | fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fm...
function transcript (line 116) | pub fn transcript(&self) -> RecommendedTranscript {
function id (line 143) | pub fn id(&self) -> [u8; 32] {
function write (line 150) | pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
function read (line 180) | pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
FILE: processor/src/signer.rs
method add_active_sign (line 34) | fn add_active_sign(txn: &mut impl DbTxn, id: &[u8; 32]) {
method complete_on_chain (line 45) | fn complete_on_chain(txn: &mut impl DbTxn, id: &[u8; 32]) {
method completions (line 58) | fn completions<N: Network>(
method complete (line 83) | fn complete<N: Network>(
method save_eventuality (line 119) | fn save_eventuality<N: Network>(
method eventuality (line 127) | fn eventuality<N: Network>(getter: &impl Get, id: [u8; 32]) -> Option<N:...
method save_completion (line 133) | fn save_completion<N: Network>(
method completion (line 142) | fn completion<N: Network>(
type PreprocessFor (line 151) | type PreprocessFor<N> = <<N as Network>::TransactionMachine as Preproces...
type SignMachineFor (line 152) | type SignMachineFor<N> = <<N as Network>::TransactionMachine as Preproce...
type SignatureShareFor (line 153) | type SignatureShareFor<N> = <SignMachineFor<N> as SignMachine<
type SignatureMachineFor (line 156) | type SignatureMachineFor<N> = <SignMachineFor<N> as SignMachine<
type Signer (line 160) | pub struct Signer<N: Network, D: Db> {
function fmt (line 177) | fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
function rebroadcast_task (line 190) | pub async fn rebroadcast_task(db: D, network: N) {
function new (line 206) | pub fn new(network: N, session: Session, keys: Vec<ThresholdKeys<N::Curv...
function verify_id (line 223) | fn verify_id(&self, id: &SignId) -> Result<(), ()> {
function already_completed (line 254) | fn already_completed(txn: &mut D::Transaction<'_>, id: [u8; 32]) -> bool {
function complete (line 268) | fn complete(
function completed (line 289) | pub fn completed(
function claimed_eventuality_completion (line 311) | async fn claimed_eventuality_completion(
function attempt (line 365) | async fn attempt(
function sign_transaction (line 456) | pub async fn sign_transaction(
function handle (line 478) | pub async fn handle(
FILE: processor/src/slash_report_signer.rs
type Preprocess (line 35) | type Preprocess = <AlgorithmMachine<Ristretto, Schnorrkel> as Preprocess...
type SignatureShare (line 36) | type SignatureShare = <AlgorithmSignMachine<Ristretto, Schnorrkel> as Si...
type SlashReportSigner (line 40) | pub struct SlashReportSigner {
method fmt (line 54) | fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
method new (line 67) | pub fn new(
method handle (line 119) | pub fn handle(
FILE: processor/src/tests/addresses.rs
function spend (line 24) | async fn spend<N: UtxoNetwork, D: Db>(
function test_addresses (line 85) | pub async fn test_addresses<N: UtxoNetwork>(
FILE: processor/src/tests/batch_signer.rs
function test_batch_signer (line 27) | fn test_batch_signer() {
FILE: processor/src/tests/cosigner.rs
function test_cosigner (line 22) | fn test_cosigner() {
FILE: processor/src/tests/key_gen.rs
constant ID (line 21) | const ID: KeyGenId = KeyGenId { session: Session(1), attempt: 3 };
function test_key_gen (line 23) | pub fn test_key_gen<N: Network>() {
FILE: processor/src/tests/literal/mod.rs
function test_dust_constant (line 46) | fn test_dust_constant() {
function test_receive_data_from_input (line 57) | fn test_receive_data_from_input() {
function spawn_bitcoin (line 194) | fn spawn_bitcoin() -> DockerTest {
function bitcoin (line 213) | async fn bitcoin(
function spawn_monero (line 241) | fn spawn_monero() -> DockerTest {
function monero (line 260) | async fn monero(
function spawn_ethereum (line 296) | fn spawn_ethereum() -> DockerTest {
function ethereum (line 315) | async fn ethereum(
FILE: processor/src/tests/mod.rs
function init_logger (line 19) | fn init_logger() {
FILE: processor/src/tests/scanner.rs
function new_scanner (line 20) | pub async fn new_scanner<N: Network, D: Db>(
function test_scanner (line 45) | pub async fn test_scanner<N: Network>(
function test_no_deadlock_in_multisig_completed (line 115) | pub async fn test_no_deadlock_in_multisig_completed<N: Network>(
FILE: processor/src/tests/signer.rs
function sign (line 29) | pub async fn sign<N: Network>(
function test_signer (line 159) | pub async fn test_signer<N: Network>(
FILE: processor/src/tests/wallet.rs
function test_wallet (line 30) | pub async fn test_wallet<N: Network>(
FILE: substrate/abi/src/babe.rs
type ReportEquivocation (line 8) | pub struct ReportEquivocation {
type Call (line 18) | pub enum Call {
FILE: substrate/abi/src/coins.rs
type Call (line 12) | pub enum Call {
type Event (line 24) | pub enum Event {
FILE: substrate/abi/src/dex.rs
type PoolId (line 5) | type PoolId = ExternalCoin;
type MaxSwapPathLength (line 6) | type MaxSwapPathLength = sp_core::ConstU32<3>;
type Call (line 13) | pub enum Call {
type Event (line 48) | pub enum Event {
FILE: substrate/abi/src/economic_security.rs
type Event (line 8) | pub enum Event {
FILE: substrate/abi/src/genesis_liquidity.rs
type Call (line 10) | pub enum Call {
type Event (line 20) | pub enum Event {
FILE: substrate/abi/src/grandpa.rs
type ReportEquivocation (line 8) | pub struct ReportEquivocation {
type Call (line 16) | pub enum Call {
type Event (line 26) | pub enum Event {
FILE: substrate/abi/src/in_instructions.rs
type Call (line 12) | pub enum Call {
type Event (line 22) | pub enum Event {
FILE: substrate/abi/src/lib.rs
type Call (line 38) | pub enum Call {
type TransactionPaymentEvent (line 65) | pub enum TransactionPaymentEvent {
type Event (line 72) | pub enum Event {
type Extra (line 102) | pub struct Extra {
type SignedPayloadExtra (line 116) | pub struct SignedPayloadExtra {
type Transaction (line 123) | pub type Transaction = tx::Transaction<Call, Extra>;
FILE: substrate/abi/src/liquidity_tokens.rs
type Call (line 8) | pub enum Call {
type Event (line 18) | pub enum Event {
FILE: substrate/abi/src/signals.rs
type Call (line 14) | pub enum Call {
type Event (line 28) | pub enum Event {
FILE: substrate/abi/src/system.rs
type Event (line 9) | pub enum Event {
FILE: substrate/abi/src/timestamp.rs
type Call (line 6) | pub enum Call {
FILE: substrate/abi/src/tx.rs
type TransactionMember (line 12) | pub trait TransactionMember:
type TransactionEncodeAs (line 21) | type TransactionEncodeAs<'a, Extra> =
type TransactionDecodeAs (line 23) | type TransactionDecodeAs<Extra> = (crate::Call, Option<(SeraiAddress, Si...
type Transaction (line 29) | pub struct Transaction<
function new (line 41) | pub fn new(call: crate::Call, signature: Option<(SeraiAddress, Signature...
function call (line 45) | pub fn call(&self) -> &crate::Call {
function signer (line 49) | pub fn signer(&self) -> Option<SeraiAddress> {
function using_encoded (line 57) | fn using_encoded<R, F: FnOnce(&[u8]) -> R>(&self, f: F) -> R {
function decode (line 65) | fn decode<I: scale::Input>(input: &mut I) -> Result<Self, scale::Error> {
method serialize (line 80) | fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Er...
function deserialize (line 95) | fn deserialize<D: Deserializer<'a>>(de: D) -> Result<Self, D::Error> {
type Call (line 108) | type Call = Call;
type SignaturePayload (line 109) | type SignaturePayload = (SeraiAddress, Signature, Extra);
function is_signed (line 110) | fn is_signed(&self) -> Option<bool> {
function new (line 113) | fn new(call: Call, signature: Option<Self::SignaturePayload>) -> Option<...
type Call (line 123) | type Call = Call;
function call (line 124) | fn call(&self) -> &Call {
method get_dispatch_info (line 134) | fn get_dispatch_info(&self) -> frame_support::dispatch::DispatchInfo {
type Checked (line 145) | type Checked = sp_runtime::generic::CheckedExtrinsic<Public, Call, Extra
Condensed preview — 594 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (2,616K chars).
[
{
"path": ".gitattributes",
"chars": 94,
"preview": "# Auto detect text files and perform LF normalization\n* text=auto\n* text eol=lf\n\n*.pdf binary\n"
},
{
"path": ".github/LICENSE",
"chars": 1073,
"preview": "MIT License\n\nCopyright (c) 2022-2023 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining "
},
{
"path": ".github/actions/bitcoin/action.yml",
"chars": 1162,
"preview": "name: bitcoin-regtest\ndescription: Spawns a regtest Bitcoin daemon\n\ninputs:\n version:\n description: \"Version to down"
},
{
"path": ".github/actions/build-dependencies/action.yml",
"chars": 3340,
"preview": "name: build-dependencies\ndescription: Installs build dependencies for Serai\n\nruns:\n using: \"composite\"\n steps:\n - n"
},
{
"path": ".github/actions/monero/action.yml",
"chars": 1449,
"preview": "name: monero-regtest\ndescription: Spawns a regtest Monero daemon\n\ninputs:\n version:\n description: \"Version to downlo"
},
{
"path": ".github/actions/monero-wallet-rpc/action.yml",
"chars": 1579,
"preview": "name: monero-wallet-rpc\ndescription: Spawns a Monero Wallet-RPC.\n\ninputs:\n version:\n description: \"Version to downlo"
},
{
"path": ".github/actions/test-dependencies/action.yml",
"chars": 1040,
"preview": "name: test-dependencies\ndescription: Installs test dependencies for Serai\n\ninputs:\n monero-version:\n description: \"M"
},
{
"path": ".github/nightly-version",
"chars": 19,
"preview": "nightly-2025-11-01\n"
},
{
"path": ".github/workflows/common-tests.yml",
"chars": 664,
"preview": "name: common/ Tests\n\non:\n push:\n branches:\n - develop\n paths:\n - \"common/**\"\n\n pull_request:\n paths"
},
{
"path": ".github/workflows/coordinator-tests.yml",
"chars": 885,
"preview": "name: Coordinator Tests\n\non:\n push:\n branches:\n - develop\n paths:\n - \"common/**\"\n - \"crypto/**\"\n "
},
{
"path": ".github/workflows/crypto-tests.yml",
"chars": 1037,
"preview": "name: crypto/ Tests\n\non:\n push:\n branches:\n - develop\n paths:\n - \"common/**\"\n - \"crypto/**\"\n\n pul"
},
{
"path": ".github/workflows/daily-deny.yml",
"chars": 599,
"preview": "name: Daily Deny Check\n\non:\n schedule:\n - cron: \"0 0 * * *\"\n\njobs:\n deny:\n name: Run cargo deny\n runs-on: ubu"
},
{
"path": ".github/workflows/full-stack-tests.yml",
"chars": 464,
"preview": "name: Full Stack Tests\n\non:\n push:\n branches:\n - develop\n\n pull_request:\n\n workflow_dispatch:\n\njobs:\n build:"
},
{
"path": ".github/workflows/lint.yml",
"chars": 2635,
"preview": "name: Lint\n\non:\n push:\n branches:\n - develop\n pull_request:\n workflow_dispatch:\n\njobs:\n clippy:\n strategy"
},
{
"path": ".github/workflows/message-queue-tests.yml",
"chars": 801,
"preview": "name: Message Queue Tests\n\non:\n push:\n branches:\n - develop\n paths:\n - \"common/**\"\n - \"crypto/**\"\n"
},
{
"path": ".github/workflows/mini-tests.yml",
"chars": 485,
"preview": "name: mini/ Tests\n\non:\n push:\n branches:\n - develop\n paths:\n - \"mini/**\"\n\n pull_request:\n paths:\n "
},
{
"path": ".github/workflows/monthly-nightly-update.yml",
"chars": 1552,
"preview": "name: Monthly Nightly Update\n\non:\n schedule:\n - cron: \"0 0 1 * *\"\n\njobs:\n update:\n name: Update nightly\n runs"
},
{
"path": ".github/workflows/networks-tests.yml",
"chars": 728,
"preview": "name: networks/ Tests\n\non:\n push:\n branches:\n - develop\n paths:\n - \"common/**\"\n - \"crypto/**\"\n "
},
{
"path": ".github/workflows/no-std.yml",
"chars": 840,
"preview": "name: no-std build\n\non:\n push:\n branches:\n - develop\n paths:\n - \"common/**\"\n - \"crypto/**\"\n -"
},
{
"path": ".github/workflows/pages.yml",
"chars": 3181,
"preview": "# MIT License\n#\n# Copyright (c) 2022 just-the-docs\n# Copyright (c) 2022-2024 Luke Parker\n#\n# Permission is hereby grante"
},
{
"path": ".github/workflows/processor-tests.yml",
"chars": 871,
"preview": "name: Processor Tests\n\non:\n push:\n branches:\n - develop\n paths:\n - \"common/**\"\n - \"crypto/**\"\n "
},
{
"path": ".github/workflows/reproducible-runtime.yml",
"chars": 821,
"preview": "name: Reproducible Runtime\n\non:\n push:\n branches:\n - develop\n paths:\n - \"Cargo.lock\"\n - \"common/**"
},
{
"path": ".github/workflows/tests.yml",
"chars": 2431,
"preview": "name: Tests\n\non:\n push:\n branches:\n - develop\n paths:\n - \"common/**\"\n - \"crypto/**\"\n - \"netwo"
},
{
"path": ".gitignore",
"chars": 275,
"preview": "target\n\n# Don't commit any `Cargo.lock` which aren't the workspace's\nCargo.lock\n!./Cargo.lock\n\n# Don't commit any `Docke"
},
{
"path": ".rustfmt.toml",
"chars": 355,
"preview": "edition = \"2021\"\ntab_spaces = 2\n\nmax_width = 100\n# Let the developer decide based on the 100 char line limit\nuse_small_h"
},
{
"path": "AGPL-3.0",
"chars": 34523,
"preview": " GNU AFFERO GENERAL PUBLIC LICENSE\n Version 3, 19 November 2007\n\n Copyright (C)"
},
{
"path": "CONTRIBUTING.md",
"chars": 1185,
"preview": "# Contributing\n\nContributions come in a variety of forms. Developing Serai, helping document it,\nusing its libraries in "
},
{
"path": "Cargo.toml",
"chars": 5471,
"preview": "[workspace]\nresolver = \"2\"\nmembers = [\n # std patches\n \"patches/matches\",\n\n # Rewrites/redirects\n \"patches/option-ex"
},
{
"path": "LICENSE",
"chars": 522,
"preview": "Serai crates are licensed under one of two licenses, either MIT or AGPL-3.0,\ndepending on the crate in question. Each cr"
},
{
"path": "README.md",
"chars": 2667,
"preview": "# Serai\n\nSerai is a new DEX, built from the ground up, initially planning on listing\nBitcoin, Ethereum, DAI, and Monero,"
},
{
"path": "audits/Cypher Stack crypto March 2023/LICENSE",
"chars": 1069,
"preview": "MIT License\n\nCopyright (c) 2023 Cypher Stack\n\nPermission is hereby granted, free of charge, to any person obtaining a co"
},
{
"path": "audits/Cypher Stack crypto March 2023/README.md",
"chars": 341,
"preview": "# Cypher Stack /crypto Audit, March 2023\n\nThis audit was over the /crypto folder, excluding the ed448 crate, the `Ed448`"
},
{
"path": "audits/Cypher Stack networks bitcoin August 2023/LICENSE",
"chars": 1069,
"preview": "MIT License\n\nCopyright (c) 2023 Cypher Stack\n\nPermission is hereby granted, free of charge, to any person obtaining a co"
},
{
"path": "audits/Cypher Stack networks bitcoin August 2023/README.md",
"chars": 296,
"preview": "# Cypher Stack /networks/bitcoin Audit, August 2023\n\nThis audit was over the `/networks/bitcoin` folder (at the time loc"
},
{
"path": "common/db/Cargo.toml",
"chars": 664,
"preview": "[package]\nname = \"serai-db\"\nversion = \"0.1.0\"\ndescription = \"A simple database trait and backends for it\"\nlicense = \"MIT"
},
{
"path": "common/db/LICENSE",
"chars": 1073,
"preview": "MIT License\n\nCopyright (c) 2022-2023 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining "
},
{
"path": "common/db/src/create_db.rs",
"chars": 4047,
"preview": "#[doc(hidden)]\npub fn serai_db_key(\n db_dst: &'static [u8],\n item_dst: &'static [u8],\n key: impl AsRef<[u8]>,\n) -> Ve"
},
{
"path": "common/db/src/lib.rs",
"chars": 1083,
"preview": "mod create_db;\npub use create_db::*;\n\nmod mem;\npub use mem::*;\n\n#[cfg(feature = \"rocksdb\")]\nmod rocks;\n#[cfg(feature = \""
},
{
"path": "common/db/src/mem.rs",
"chars": 1887,
"preview": "use core::fmt::Debug;\nuse std::{\n sync::{Arc, RwLock},\n collections::{HashSet, HashMap},\n};\n\nuse crate::*;\n\n/// An ato"
},
{
"path": "common/db/src/parity_db.rs",
"chars": 1233,
"preview": "use std::sync::Arc;\n\npub use ::parity_db::{Options, Db as ParityDb};\n\nuse crate::*;\n\n#[must_use]\npub struct Transaction<"
},
{
"path": "common/db/src/rocks.rs",
"chars": 2220,
"preview": "use std::sync::Arc;\n\nuse rocksdb::{\n DBCompressionType, ThreadMode, SingleThreaded, LogLevel, WriteOptions,\n Transacti"
},
{
"path": "common/env/Cargo.toml",
"chars": 442,
"preview": "[package]\nname = \"serai-env\"\nversion = \"0.1.0\"\ndescription = \"A common library for Serai apps to access environment vari"
},
{
"path": "common/env/LICENSE",
"chars": 624,
"preview": "AGPL-3.0-only license\n\nCopyright (c) 2023 Luke Parker\n\nThis program is free software: you can redistribute it and/or mod"
},
{
"path": "common/env/src/lib.rs",
"chars": 259,
"preview": "#![cfg_attr(docsrs, feature(doc_cfg))]\n\n// Obtain a variable from the Serai environment/secret store.\npub fn var(variabl"
},
{
"path": "common/patchable-async-sleep/Cargo.toml",
"chars": 572,
"preview": "[package]\nname = \"patchable-async-sleep\"\nversion = \"0.1.0\"\ndescription = \"An async sleep function, patchable to the pref"
},
{
"path": "common/patchable-async-sleep/LICENSE",
"chars": 1068,
"preview": "MIT License\n\nCopyright (c) 2024 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining a cop"
},
{
"path": "common/patchable-async-sleep/README.md",
"chars": 319,
"preview": "# Patchable Async Sleep\n\nAn async sleep function, patchable to the preferred runtime.\n\nThis crate is `tokio`-backed. App"
},
{
"path": "common/patchable-async-sleep/src/lib.rs",
"chars": 277,
"preview": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![doc = include_str!(\"../README.md\")]\n#![deny(missing_docs)]\n\nuse core::time::Du"
},
{
"path": "common/request/Cargo.toml",
"chars": 1196,
"preview": "[package]\nname = \"simple-request\"\nversion = \"0.1.0\"\ndescription = \"A simple HTTP(S) request library\"\nlicense = \"MIT\"\nrep"
},
{
"path": "common/request/LICENSE",
"chars": 1068,
"preview": "MIT License\n\nCopyright (c) 2023 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining a cop"
},
{
"path": "common/request/README.md",
"chars": 285,
"preview": "# Simple Request\n\nA simple alternative to reqwest, supporting HTTPS, intended to support a\nmajority of use cases with a "
},
{
"path": "common/request/src/lib.rs",
"chars": 5150,
"preview": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![doc = include_str!(\"../README.md\")]\n\nuse std::sync::Arc;\n\nuse tokio::sync::Mut"
},
{
"path": "common/request/src/request.rs",
"chars": 2028,
"preview": "use hyper::body::Bytes;\n#[cfg(feature = \"basic-auth\")]\nuse hyper::header::HeaderValue;\npub use http_body_util::Full;\n\n#["
},
{
"path": "common/request/src/response.rs",
"chars": 675,
"preview": "use hyper::{\n StatusCode,\n header::{HeaderValue, HeaderMap},\n body::{Buf, Incoming},\n};\nuse http_body_util::BodyExt;\n"
},
{
"path": "common/std-shims/Cargo.toml",
"chars": 768,
"preview": "[package]\nname = \"std-shims\"\nversion = \"0.1.4\"\ndescription = \"A series of std shims to make alloc more feasible\"\nlicense"
},
{
"path": "common/std-shims/LICENSE",
"chars": 1068,
"preview": "MIT License\n\nCopyright (c) 2023 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining a cop"
},
{
"path": "common/std-shims/README.md",
"chars": 433,
"preview": "# std shims\n\nA crate which passes through to std when the default `std` feature is enabled,\nyet provides a series of shi"
},
{
"path": "common/std-shims/src/collections.rs",
"chars": 182,
"preview": "#[cfg(feature = \"std\")]\npub use std::collections::*;\n\n#[cfg(not(feature = \"std\"))]\npub use alloc::collections::*;\n#[cfg("
},
{
"path": "common/std-shims/src/io.rs",
"chars": 2354,
"preview": "#[cfg(feature = \"std\")]\npub use std::io::*;\n\n#[cfg(not(feature = \"std\"))]\nmod shims {\n use core::fmt::{Debug, Formatter"
},
{
"path": "common/std-shims/src/lib.rs",
"chars": 1753,
"preview": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![doc = include_str!(\"../README.md\")]\n#![cfg_attr(not(feature = \"std\"), no_std)]"
},
{
"path": "common/std-shims/src/sync.rs",
"chars": 820,
"preview": "pub use core::sync::*;\npub use alloc::sync::*;\n\nmod mutex_shim {\n #[cfg(feature = \"std\")]\n pub use std::sync::*;\n #[c"
},
{
"path": "common/zalloc/Cargo.toml",
"chars": 642,
"preview": "[package]\nname = \"zalloc\"\nversion = \"0.1.0\"\ndescription = \"An allocator wrapper which zeroizes memory on dealloc\"\nlicens"
},
{
"path": "common/zalloc/LICENSE",
"chars": 1073,
"preview": "MIT License\n\nCopyright (c) 2022-2023 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining "
},
{
"path": "common/zalloc/build.rs",
"chars": 263,
"preview": "#[rustversion::nightly]\nfn main() {\n println!(\"cargo::rustc-check-cfg=cfg(zalloc_rustc_nightly)\");\n println!(\"cargo::r"
},
{
"path": "common/zalloc/src/lib.rs",
"chars": 1531,
"preview": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![cfg_attr(docsrs, feature(doc_cfg))]\n#![cfg_attr(all(zalloc_rustc_nightly, feat"
},
{
"path": "coordinator/Cargo.toml",
"chars": 3413,
"preview": "[package]\nname = \"serai-coordinator\"\nversion = \"0.1.0\"\ndescription = \"Serai coordinator to prepare batches and sign tran"
},
{
"path": "coordinator/LICENSE",
"chars": 624,
"preview": "AGPL-3.0-only license\n\nCopyright (c) 2023 Luke Parker\n\nThis program is free software: you can redistribute it and/or mod"
},
{
"path": "coordinator/README.md",
"chars": 232,
"preview": "# Coordinator\n\nThe Serai coordinator communicates with other coordinators to prepare batches\nfor Serai and sign transact"
},
{
"path": "coordinator/src/cosign_evaluator.rs",
"chars": 10810,
"preview": "use core::time::Duration;\nuse std::{\n sync::Arc,\n collections::{HashSet, HashMap},\n};\n\nuse tokio::{\n sync::{mpsc, Mut"
},
{
"path": "coordinator/src/db.rs",
"chars": 3998,
"preview": "use blake2::{\n digest::{consts::U32, Digest},\n Blake2b,\n};\n\nuse scale::Encode;\nuse borsh::{BorshSerialize, BorshDeseri"
},
{
"path": "coordinator/src/main.rs",
"chars": 51820,
"preview": "#![expect(clippy::cast_possible_truncation)]\n\nuse core::ops::Deref;\nuse std::{\n sync::{OnceLock, Arc},\n time::Duration"
},
{
"path": "coordinator/src/p2p.rs",
"chars": 39196,
"preview": "use core::{time::Duration, fmt};\nuse std::{\n sync::Arc,\n io::{self, Read},\n collections::{HashSet, HashMap},\n time::"
},
{
"path": "coordinator/src/processors.rs",
"chars": 1565,
"preview": "use std::sync::Arc;\n\nuse serai_client::primitives::ExternalNetworkId;\nuse processor_messages::{ProcessorMessage, Coordin"
},
{
"path": "coordinator/src/substrate/cosign.rs",
"chars": 11818,
"preview": "/*\n If:\n A) This block has events and it's been at least X blocks since the last cosign or\n B) This block doesn't"
},
{
"path": "coordinator/src/substrate/db.rs",
"chars": 955,
"preview": "use serai_client::primitives::ExternalNetworkId;\n\npub use serai_db::*;\n\nmod inner_db {\n use super::*;\n\n create_db!(\n "
},
{
"path": "coordinator/src/substrate/mod.rs",
"chars": 18112,
"preview": "use core::{ops::Deref, time::Duration};\nuse std::{\n sync::Arc,\n collections::{HashSet, HashMap},\n};\n\nuse zeroize::Zero"
},
{
"path": "coordinator/src/tests/mod.rs",
"chars": 3466,
"preview": "use core::fmt::Debug;\nuse std::{\n sync::Arc,\n collections::{VecDeque, HashSet, HashMap},\n};\n\nuse serai_client::{primit"
},
{
"path": "coordinator/src/tests/tributary/chain.rs",
"chars": 6454,
"preview": "use std::{\n time::{Duration, SystemTime},\n collections::HashSet,\n};\n\nuse zeroize::Zeroizing;\nuse rand_core::{RngCore, "
},
{
"path": "coordinator/src/tests/tributary/dkg.rs",
"chars": 12502,
"preview": "use core::time::Duration;\nuse std::collections::HashMap;\n\nuse zeroize::Zeroizing;\nuse rand_core::{RngCore, OsRng};\n\nuse "
},
{
"path": "coordinator/src/tests/tributary/handle_p2p.rs",
"chars": 2269,
"preview": "use core::time::Duration;\nuse std::sync::Arc;\n\nuse rand_core::OsRng;\n\nuse tokio::{\n sync::{mpsc, broadcast},\n time::sl"
},
{
"path": "coordinator/src/tests/tributary/mod.rs",
"chars": 8459,
"preview": "use core::fmt::Debug;\n\nuse rand_core::{RngCore, OsRng};\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::{group::Group, "
},
{
"path": "coordinator/src/tests/tributary/sync.rs",
"chars": 5723,
"preview": "use core::time::Duration;\nuse std::{sync::Arc, collections::HashSet};\n\nuse rand_core::OsRng;\n\nuse dalek_ff_group::Ristre"
},
{
"path": "coordinator/src/tests/tributary/tx.rs",
"chars": 2010,
"preview": "use core::time::Duration;\n\nuse rand_core::{RngCore, OsRng};\n\nuse tokio::time::sleep;\n\nuse serai_db::MemDb;\n\nuse tributar"
},
{
"path": "coordinator/src/tributary/db.rs",
"chars": 6593,
"preview": "use std::collections::HashMap;\n\nuse scale::Encode;\nuse borsh::{BorshSerialize, BorshDeserialize};\n\nuse dalek_ff_group::R"
},
{
"path": "coordinator/src/tributary/handle.rs",
"chars": 29508,
"preview": "use core::ops::Deref;\nuse std::collections::HashMap;\n\nuse zeroize::Zeroizing;\nuse rand_core::OsRng;\n\nuse dalek_ff_group:"
},
{
"path": "coordinator/src/tributary/mod.rs",
"chars": 3381,
"preview": "use dalek_ff_group::Ristretto;\nuse ciphersuite::{group::GroupEncoding, Ciphersuite};\n\nuse serai_client::validator_sets::"
},
{
"path": "coordinator/src/tributary/scanner.rs",
"chars": 29130,
"preview": "use core::{marker::PhantomData, ops::Deref, future::Future, time::Duration};\nuse std::{sync::Arc, collections::HashSet};"
},
{
"path": "coordinator/src/tributary/signing_protocol.rs",
"chars": 12807,
"preview": "/*\n A MuSig-based signing protocol executed with the validators' keys.\n\n This is used for confirming the results of a "
},
{
"path": "coordinator/src/tributary/spec.rs",
"chars": 5169,
"preview": "use core::{ops::Range, fmt::Debug};\nuse std::{io, collections::HashMap};\n\nuse transcript::{Transcript, RecommendedTransc"
},
{
"path": "coordinator/src/tributary/transaction.rs",
"chars": 25419,
"preview": "use core::{ops::Deref, fmt::Debug};\nuse std::io;\n\nuse zeroize::Zeroizing;\nuse rand_core::{RngCore, CryptoRng};\n\nuse blak"
},
{
"path": "coordinator/tributary/Cargo.toml",
"chars": 2140,
"preview": "[package]\nname = \"tributary-chain\"\nversion = \"0.1.0\"\ndescription = \"A micro-blockchain to provide consensus and ordering"
},
{
"path": "coordinator/tributary/LICENSE",
"chars": 624,
"preview": "AGPL-3.0-only license\n\nCopyright (c) 2023 Luke Parker\n\nThis program is free software: you can redistribute it and/or mod"
},
{
"path": "coordinator/tributary/README.md",
"chars": 90,
"preview": "# Tributary\n\nA verifiable, ordered broadcast layer implemented as a BFT micro-blockchain.\n"
},
{
"path": "coordinator/tributary/src/block.rs",
"chars": 8389,
"preview": "use std::{\n io,\n collections::{VecDeque, HashSet, HashMap},\n};\n\nuse thiserror::Error;\n\nuse blake2::{Digest, Blake2s256"
},
{
"path": "coordinator/tributary/src/blockchain.rs",
"chars": 10955,
"preview": "use std::collections::{VecDeque, HashSet};\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::{group::GroupEncoding, Ciphe"
},
{
"path": "coordinator/tributary/src/lib.rs",
"chars": 12199,
"preview": "use core::{marker::PhantomData, fmt::Debug};\nuse std::{sync::Arc, io};\n\nuse async_trait::async_trait;\n\nuse zeroize::Zero"
},
{
"path": "coordinator/tributary/src/mempool.rs",
"chars": 8406,
"preview": "use std::collections::HashMap;\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::Ciphersuite;\n\nuse serai_db::{DbTxn, Db};"
},
{
"path": "coordinator/tributary/src/merkle.rs",
"chars": 753,
"preview": "use blake2::{Digest, Blake2s256};\n\npub(crate) fn merkle(hash_args: &[[u8; 32]]) -> [u8; 32] {\n let mut hashes = Vec::wi"
},
{
"path": "coordinator/tributary/src/provided.rs",
"chars": 6674,
"preview": "use std::collections::{VecDeque, HashMap};\n\nuse thiserror::Error;\n\nuse serai_db::{Get, DbTxn, Db};\n\nuse crate::transacti"
},
{
"path": "coordinator/tributary/src/tendermint/mod.rs",
"chars": 12606,
"preview": "use core::ops::Deref;\nuse std::{sync::Arc, collections::HashMap};\n\nuse async_trait::async_trait;\n\nuse subtle::ConstantTi"
},
{
"path": "coordinator/tributary/src/tendermint/tx.rs",
"chars": 1982,
"preview": "use std::io;\n\nuse scale::{Encode, Decode, IoReader};\n\nuse blake2::{Digest, Blake2s256};\n\nuse dalek_ff_group::Ristretto;\n"
},
{
"path": "coordinator/tributary/src/tests/block.rs",
"chars": 3871,
"preview": "use std::{sync::Arc, io, collections::HashMap, fmt::Debug};\n\nuse blake2::{Digest, Blake2s256};\n\nuse dalek_ff_group::Rist"
},
{
"path": "coordinator/tributary/src/tests/blockchain.rs",
"chars": 19226,
"preview": "use core::ops::Deref;\nuse std::{\n collections::{VecDeque, HashMap},\n sync::Arc,\n io,\n};\n\nuse zeroize::Zeroizing;\nuse "
},
{
"path": "coordinator/tributary/src/tests/mempool.rs",
"chars": 5536,
"preview": "use std::{sync::Arc, collections::HashMap};\n\nuse zeroize::Zeroizing;\nuse rand::{RngCore, rngs::OsRng};\n\nuse dalek_ff_gro"
},
{
"path": "coordinator/tributary/src/tests/merkle.rs",
"chars": 799,
"preview": "use std::collections::HashSet;\n\nuse rand::{RngCore, rngs::OsRng};\n\n#[test]\nfn merkle() {\n let mut used = HashSet::new()"
},
{
"path": "coordinator/tributary/src/tests/mod.rs",
"chars": 199,
"preview": "#[cfg(test)]\nmod tendermint;\n\nmod transaction;\npub use transaction::*;\n\n#[cfg(test)]\nmod merkle;\n\n#[cfg(test)]\nmod block"
},
{
"path": "coordinator/tributary/src/tests/p2p.rs",
"chars": 201,
"preview": "pub use crate::P2p;\n\n#[derive(Clone, Debug)]\npub struct DummyP2p;\n\n#[async_trait::async_trait]\nimpl P2p for DummyP2p {\n "
},
{
"path": "coordinator/tributary/src/tests/tendermint.rs",
"chars": 737,
"preview": "use tendermint::ext::Network;\nuse crate::{\n P2p, TendermintTx,\n tendermint::{TARGET_BLOCK_TIME, TendermintNetwork},\n};"
},
{
"path": "coordinator/tributary/src/tests/transaction/mod.rs",
"chars": 6312,
"preview": "use core::ops::Deref;\nuse std::{sync::Arc, io};\n\nuse zeroize::Zeroizing;\nuse rand::{RngCore, CryptoRng, rngs::OsRng};\n\nu"
},
{
"path": "coordinator/tributary/src/tests/transaction/signed.rs",
"chars": 2434,
"preview": "use rand::rngs::OsRng;\n\nuse blake2::{Digest, Blake2s256};\n\nuse dalek_ff_group::Ristretto;\nuse ciphersuite::{group::ff::F"
},
{
"path": "coordinator/tributary/src/tests/transaction/tendermint.rs",
"chars": 10836,
"preview": "use std::sync::Arc;\n\nuse zeroize::Zeroizing;\nuse rand::{RngCore, rngs::OsRng};\n\nuse dalek_ff_group::Ristretto;\nuse ciphe"
},
{
"path": "coordinator/tributary/src/transaction.rs",
"chars": 7589,
"preview": "use core::fmt::Debug;\nuse std::io;\n\nuse zeroize::Zeroize;\nuse thiserror::Error;\n\nuse blake2::{Digest, Blake2b512};\n\nuse "
},
{
"path": "coordinator/tributary/tendermint/Cargo.toml",
"chars": 1299,
"preview": "[package]\nname = \"tendermint-machine\"\nversion = \"0.2.0\"\ndescription = \"An implementation of the Tendermint state machine"
},
{
"path": "coordinator/tributary/tendermint/LICENSE",
"chars": 1073,
"preview": "MIT License\n\nCopyright (c) 2022-2023 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining "
},
{
"path": "coordinator/tributary/tendermint/README.md",
"chars": 2641,
"preview": "# Tendermint\n\nAn implementation of the Tendermint state machine in Rust.\n\nThis is solely the state machine, intended to "
},
{
"path": "coordinator/tributary/tendermint/src/block.rs",
"chars": 6869,
"preview": "use std::{\n sync::Arc,\n collections::{HashSet, HashMap},\n};\n\nuse serai_db::{Get, DbTxn, Db};\n\nuse crate::{\n time::Can"
},
{
"path": "coordinator/tributary/tendermint/src/ext.rs",
"chars": 10634,
"preview": "use core::{hash::Hash, fmt::Debug};\nuse std::{sync::Arc, collections::HashSet};\n\nuse async_trait::async_trait;\nuse thise"
},
{
"path": "coordinator/tributary/tendermint/src/lib.rs",
"chars": 37466,
"preview": "#![expect(clippy::cast_possible_truncation)]\n\nuse core::fmt::Debug;\n\nuse std::{\n sync::Arc,\n time::{SystemTime, Instan"
},
{
"path": "coordinator/tributary/tendermint/src/message_log.rs",
"chars": 2831,
"preview": "use std::{sync::Arc, collections::HashMap};\n\nuse parity_scale_codec::Encode;\n\nuse crate::{ext::*, RoundNumber, Step, Dat"
},
{
"path": "coordinator/tributary/tendermint/src/round.rs",
"chars": 2605,
"preview": "use std::{\n marker::PhantomData,\n time::{Duration, Instant},\n collections::HashMap,\n};\n\nuse futures_util::{FutureExt,"
},
{
"path": "coordinator/tributary/tendermint/src/time.rs",
"chars": 1278,
"preview": "use core::ops::Add;\nuse std::time::{UNIX_EPOCH, SystemTime, Instant, Duration};\n\n#[derive(Clone, Copy, PartialEq, Eq, De"
},
{
"path": "coordinator/tributary/tendermint/tests/ext.rs",
"chars": 5031,
"preview": "use std::{\n sync::Arc,\n time::{UNIX_EPOCH, SystemTime, Duration},\n};\n\nuse async_trait::async_trait;\n\nuse parity_scale_"
},
{
"path": "crypto/ciphersuite/Cargo.toml",
"chars": 1530,
"preview": "[package]\nname = \"ciphersuite\"\nversion = \"0.4.2\"\ndescription = \"Ciphersuites built around ff/group\"\nlicense = \"MIT\"\nrepo"
},
{
"path": "crypto/ciphersuite/LICENSE",
"chars": 1073,
"preview": "MIT License\n\nCopyright (c) 2021-2023 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining "
},
{
"path": "crypto/ciphersuite/README.md",
"chars": 2225,
"preview": "# Ciphersuite\n\nCiphersuites for elliptic curves premised on ff/group.\n\nThis library, except for the not recommended Ed44"
},
{
"path": "crypto/ciphersuite/kp256/Cargo.toml",
"chars": 1481,
"preview": "[package]\nname = \"ciphersuite-kp256\"\nversion = \"0.4.0\"\ndescription = \"Ciphersuites built around ff/group\"\nlicense = \"MIT"
},
{
"path": "crypto/ciphersuite/kp256/LICENSE",
"chars": 1073,
"preview": "MIT License\n\nCopyright (c) 2021-2023 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining "
},
{
"path": "crypto/ciphersuite/kp256/README.md",
"chars": 80,
"preview": "# Ciphersuite {k, p}256\n\nSECP256k1 and P-256 Ciphersuites around k256 and p256.\n"
},
{
"path": "crypto/ciphersuite/kp256/src/lib.rs",
"chars": 6085,
"preview": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![cfg_attr(not(feature = \"std\"), no_std)]\n\nuse zeroize::Zeroize;\n\nuse sha2::Sha2"
},
{
"path": "crypto/ciphersuite/src/lib.md",
"chars": 615,
"preview": "# Ciphersuite\n\nCiphersuites for elliptic curves premised on ff/group.\n\nThis library was\n[audited by Cypher Stack in Marc"
},
{
"path": "crypto/ciphersuite/src/lib.rs",
"chars": 3792,
"preview": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![doc = include_str!(\"lib.md\")]\n#![cfg_attr(not(feature = \"std\"), no_std)]\n\nuse "
},
{
"path": "crypto/dalek-ff-group/Cargo.toml",
"chars": 1605,
"preview": "[package]\nname = \"dalek-ff-group\"\nversion = \"0.4.4\"\ndescription = \"ff/group bindings around curve25519-dalek\"\nlicense = "
},
{
"path": "crypto/dalek-ff-group/LICENSE",
"chars": 1073,
"preview": "MIT License\n\nCopyright (c) 2022-2023 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining "
},
{
"path": "crypto/dalek-ff-group/README.md",
"chars": 561,
"preview": "# Dalek FF/Group\n\nff/group bindings around curve25519-dalek with a from_hash/random function based\naround modern depende"
},
{
"path": "crypto/dalek-ff-group/src/ciphersuite.rs",
"chars": 2830,
"preview": "use zeroize::Zeroize;\n\nuse sha2::{Digest, Sha512};\n\nuse group::Group;\nuse crate::Scalar;\n\nuse ciphersuite::Ciphersuite;\n"
},
{
"path": "crypto/dalek-ff-group/src/field.rs",
"chars": 10848,
"preview": "use core::{\n ops::{Add, AddAssign, Sub, SubAssign, Neg, Mul, MulAssign},\n iter::{Sum, Product},\n};\n\nuse zeroize::Zeroi"
},
{
"path": "crypto/dalek-ff-group/src/lib.rs",
"chars": 12912,
"preview": "#![allow(deprecated)]\n#![cfg_attr(docsrs, feature(doc_cfg))]\n#![no_std] // Prevents writing new code, in what should be "
},
{
"path": "crypto/dkg/Cargo.toml",
"chars": 1073,
"preview": "[package]\nname = \"dkg\"\nversion = \"0.6.1\"\ndescription = \"Distributed key generation over ff/group\"\nlicense = \"MIT\"\nreposi"
},
{
"path": "crypto/dkg/LICENSE",
"chars": 1073,
"preview": "MIT License\n\nCopyright (c) 2021-2025 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining "
},
{
"path": "crypto/dkg/README.md",
"chars": 833,
"preview": "# Distributed Key Generation\n\nA crate implementing a type for keys, presumably the result of a distributed\nkey generatio"
},
{
"path": "crypto/dkg/dealer/Cargo.toml",
"chars": 990,
"preview": "[package]\nname = \"dkg-dealer\"\nversion = \"0.6.0\"\ndescription = \"Produce dkg::ThresholdKeys with a dealer key generation\"\n"
},
{
"path": "crypto/dkg/dealer/LICENSE",
"chars": 1073,
"preview": "MIT License\n\nCopyright (c) 2021-2025 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining "
},
{
"path": "crypto/dkg/dealer/README.md",
"chars": 730,
"preview": "# Distributed Key Generation - Dealer\n\nThis crate implements a dealer key generation protocol for the\n[`dkg`](https://do"
},
{
"path": "crypto/dkg/dealer/src/lib.rs",
"chars": 2251,
"preview": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![doc = include_str!(\"../README.md\")]\n#![no_std]\n\nuse core::ops::Deref;\nuse std_"
},
{
"path": "crypto/dkg/musig/Cargo.toml",
"chars": 1402,
"preview": "[package]\nname = \"dkg-musig\"\nversion = \"0.6.0\"\ndescription = \"The MuSig key aggregation protocol\"\nlicense = \"MIT\"\nreposi"
},
{
"path": "crypto/dkg/musig/LICENSE",
"chars": 1073,
"preview": "MIT License\n\nCopyright (c) 2021-2025 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining "
},
{
"path": "crypto/dkg/musig/README.md",
"chars": 604,
"preview": "# Distributed Key Generation - MuSig\n\nThis implements the MuSig key aggregation protocol for the\n[`dkg`](https://docs.rs"
},
{
"path": "crypto/dkg/musig/src/lib.rs",
"chars": 5103,
"preview": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![doc = include_str!(\"../README.md\")]\n#![cfg_attr(not(feature = \"std\"), no_std)]"
},
{
"path": "crypto/dkg/musig/src/tests.rs",
"chars": 2126,
"preview": "use std::collections::HashMap;\n\nuse zeroize::Zeroizing;\nuse rand_core::OsRng;\n\nuse dalek_ff_group::Ristretto;\nuse cipher"
},
{
"path": "crypto/dkg/pedpop/Cargo.toml",
"chars": 1707,
"preview": "[package]\nname = \"dkg-pedpop\"\nversion = \"0.6.0\"\ndescription = \"The PedPoP distributed key generation protocol\"\nlicense ="
},
{
"path": "crypto/dkg/pedpop/LICENSE",
"chars": 1073,
"preview": "MIT License\n\nCopyright (c) 2021-2025 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining "
},
{
"path": "crypto/dkg/pedpop/README.md",
"chars": 602,
"preview": "# Distributed Key Generation - PedPoP\n\nThis implements the PedPoP distributed key generation protocol for the\n[`dkg`](ht"
},
{
"path": "crypto/dkg/pedpop/src/encryption.rs",
"chars": 16197,
"preview": "use core::{ops::Deref, fmt};\nuse std::{io, collections::HashMap};\n\nuse thiserror::Error;\n\nuse zeroize::{Zeroize, Zeroizi"
},
{
"path": "crypto/dkg/pedpop/src/lib.rs",
"chars": 24774,
"preview": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![doc = include_str!(\"../README.md\")]\n// This crate requires `dleq` which doesn'"
},
{
"path": "crypto/dkg/pedpop/src/tests.rs",
"chars": 10842,
"preview": "use std::collections::HashMap;\n\nuse rand_core::{RngCore, CryptoRng, OsRng};\n\nuse dalek_ff_group::Ristretto;\nuse ciphersu"
},
{
"path": "crypto/dkg/promote/Cargo.toml",
"chars": 1437,
"preview": "[package]\nname = \"dkg-promote\"\nversion = \"0.6.1\"\ndescription = \"Promotions for keys from the dkg crate\"\nlicense = \"MIT\"\n"
},
{
"path": "crypto/dkg/promote/LICENSE",
"chars": 1073,
"preview": "MIT License\n\nCopyright (c) 2021-2025 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining "
},
{
"path": "crypto/dkg/promote/README.md",
"chars": 651,
"preview": "# Distributed Key Generation - Promote\n\nThis crate implements 'promotions' for keys from the\n[`dkg`](https://docs.rs/dkg"
},
{
"path": "crypto/dkg/promote/src/lib.rs",
"chars": 5090,
"preview": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![doc = include_str!(\"../README.md\")]\n// This crate requires `dleq` which doesn'"
},
{
"path": "crypto/dkg/promote/src/tests.rs",
"chars": 3423,
"preview": "use core::marker::PhantomData;\nuse std::collections::HashMap;\n\nuse zeroize::{Zeroize, Zeroizing};\nuse rand_core::OsRng;\n"
},
{
"path": "crypto/dkg/recovery/Cargo.toml",
"chars": 891,
"preview": "[package]\nname = \"dkg-recovery\"\nversion = \"0.6.0\"\ndescription = \"Recover a secret-shared key from a collection of dkg::T"
},
{
"path": "crypto/dkg/recovery/LICENSE",
"chars": 1073,
"preview": "MIT License\n\nCopyright (c) 2021-2025 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining "
},
{
"path": "crypto/dkg/recovery/README.md",
"chars": 763,
"preview": "# Distributed Key Generation - Recovery\n\nA utility function to recover a key from its secret shares.\n\nKeys likely SHOULD"
},
{
"path": "crypto/dkg/recovery/src/lib.rs",
"chars": 2504,
"preview": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![doc = include_str!(\"../README.md\")]\n#![no_std]\n\nuse core::ops::{Deref, DerefMu"
},
{
"path": "crypto/dkg/src/lib.rs",
"chars": 21001,
"preview": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![doc = include_str!(\"../README.md\")]\n#![cfg_attr(not(feature = \"std\"), no_std)]"
},
{
"path": "crypto/dleq/Cargo.toml",
"chars": 2038,
"preview": "[package]\nname = \"dleq\"\nversion = \"0.4.1\"\ndescription = \"Implementation of single and cross-curve Discrete Log Equality "
},
{
"path": "crypto/dleq/LICENSE",
"chars": 1088,
"preview": "MIT License\n\nCopyright (c) 2020-2023 Luke Parker, Lee Bousfield\n\nPermission is hereby granted, free of charge, to any pe"
},
{
"path": "crypto/dleq/README.md",
"chars": 3669,
"preview": "# Discrete Log Equality\n\nImplementation of discrete log equality proofs for curves implementing\n`ff`/`group`.\n\nThere is "
},
{
"path": "crypto/dleq/src/cross_group/aos.rs",
"chars": 7687,
"preview": "use rand_core::{RngCore, CryptoRng};\n\nuse zeroize::Zeroize;\n\nuse transcript::Transcript;\n\nuse group::{\n ff::{Field, Pri"
},
{
"path": "crypto/dleq/src/cross_group/bits.rs",
"chars": 4805,
"preview": "use rand_core::{RngCore, CryptoRng};\n\nuse zeroize::Zeroize;\n\nuse transcript::Transcript;\n\nuse group::{ff::PrimeFieldBits"
},
{
"path": "crypto/dleq/src/cross_group/mod.rs",
"chars": 15656,
"preview": "use core::ops::{Deref, DerefMut};\n#[cfg(feature = \"serialize\")]\nuse std::io::{self, Read, Write};\n\nuse thiserror::Error;"
},
{
"path": "crypto/dleq/src/cross_group/scalar.rs",
"chars": 2311,
"preview": "use core::ops::DerefMut;\n\nuse ff::PrimeFieldBits;\n\nuse zeroize::Zeroize;\n\nuse crate::cross_group::u8_from_bool;\n\n/// Con"
},
{
"path": "crypto/dleq/src/cross_group/schnorr.rs",
"chars": 2339,
"preview": "use core::ops::Deref;\n\nuse rand_core::{RngCore, CryptoRng};\n\nuse zeroize::{Zeroize, Zeroizing};\n\nuse transcript::Transcr"
},
{
"path": "crypto/dleq/src/lib.rs",
"chars": 10318,
"preview": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![cfg_attr(not(feature = \"std\"), no_std)]\n#![doc = include_str!(\"../README.md\")]"
},
{
"path": "crypto/dleq/src/tests/cross_group/aos.rs",
"chars": 2015,
"preview": "use rand_core::OsRng;\n\nuse group::{ff::Field, Group};\n\nuse multiexp::BatchVerifier;\n\nuse crate::{\n cross_group::aos::{R"
},
{
"path": "crypto/dleq/src/tests/cross_group/mod.rs",
"chars": 5414,
"preview": "use core::ops::Deref;\n\nuse hex_literal::hex;\n\nuse zeroize::Zeroizing;\nuse rand_core::{RngCore, OsRng};\n\nuse ff::{Field, "
},
{
"path": "crypto/dleq/src/tests/cross_group/scalar.rs",
"chars": 1387,
"preview": "use rand_core::OsRng;\n\nuse ff::{Field, PrimeField};\n\nuse k256::Scalar as K256Scalar;\nuse dalek_ff_group::Scalar as Dalek"
},
{
"path": "crypto/dleq/src/tests/cross_group/schnorr.rs",
"chars": 967,
"preview": "use core::ops::Deref;\n\nuse rand_core::OsRng;\n\nuse zeroize::Zeroize;\n\nuse group::{\n ff::{Field, PrimeFieldBits},\n prime"
},
{
"path": "crypto/dleq/src/tests/mod.rs",
"chars": 4664,
"preview": "use core::ops::Deref;\n\nuse hex_literal::hex;\n\nuse rand_core::OsRng;\n\nuse zeroize::Zeroizing;\n\nuse ff::Field;\nuse group::"
},
{
"path": "crypto/ed448/Cargo.toml",
"chars": 1488,
"preview": "[package]\nname = \"minimal-ed448\"\nversion = \"0.4.2\"\ndescription = \"Unaudited, inefficient implementation of Ed448 in Rust"
},
{
"path": "crypto/ed448/LICENSE",
"chars": 1073,
"preview": "MIT License\n\nCopyright (c) 2022-2023 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining "
},
{
"path": "crypto/ed448/README.md",
"chars": 286,
"preview": "# Minimal Ed448\n\nBarebones implementation of Ed448 bound to the ff/group API, rejecting torsion\nto achieve a PrimeGroup "
},
{
"path": "crypto/ed448/src/backend.rs",
"chars": 8646,
"preview": "use zeroize::Zeroize;\n\n// Use black_box when possible\n#[rustversion::since(1.66)]\nmod black_box {\n pub(crate) fn black_"
},
{
"path": "crypto/ed448/src/ciphersuite.rs",
"chars": 2840,
"preview": "use zeroize::Zeroize;\n\nuse sha3::{\n digest::{\n typenum::U114, core_api::BlockSizeUser, Update, Output, OutputSizeUse"
},
{
"path": "crypto/ed448/src/field.rs",
"chars": 1497,
"preview": "use zeroize::{DefaultIsZeroes, Zeroize};\n\nuse crypto_bigint::{\n U448, U896,\n modular::constant_mod::{ResidueParams, Re"
},
{
"path": "crypto/ed448/src/lib.rs",
"chars": 323,
"preview": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![doc = include_str!(\"../README.md\")]\n#![no_std]\n#![allow(clippy::redundant_clos"
},
{
"path": "crypto/ed448/src/point.rs",
"chars": 9813,
"preview": "use core::{\n ops::{Add, AddAssign, Neg, Sub, SubAssign, Mul, MulAssign},\n iter::Sum,\n};\n\nuse rand_core::RngCore;\n\nuse "
},
{
"path": "crypto/ed448/src/scalar.rs",
"chars": 2162,
"preview": "use zeroize::{DefaultIsZeroes, Zeroize};\n\nuse crypto_bigint::{\n U448, U896, U1024,\n modular::constant_mod::{ResiduePar"
},
{
"path": "crypto/ff-group-tests/Cargo.toml",
"chars": 841,
"preview": "[package]\nname = \"ff-group-tests\"\nversion = \"0.13.2\"\ndescription = \"A collection of sanity tests for implementors of ff/"
},
{
"path": "crypto/ff-group-tests/LICENSE",
"chars": 1073,
"preview": "MIT License\n\nCopyright (c) 2022-2023 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining "
},
{
"path": "crypto/ff-group-tests/README.md",
"chars": 613,
"preview": "# FF/Group Tests\n\nA series of sanity checks for implementors of the ff/group APIs.\n\nImplementors are assumed to be of a "
},
{
"path": "crypto/ff-group-tests/src/field.rs",
"chars": 7791,
"preview": "use rand_core::RngCore;\nuse subtle::Choice;\nuse group::ff::Field;\n\n/// Perform basic tests on equality.\npub fn test_eq<F"
},
{
"path": "crypto/ff-group-tests/src/group.rs",
"chars": 7081,
"preview": "use rand_core::RngCore;\nuse group::{\n ff::{Field, PrimeFieldBits},\n Group,\n prime::PrimeGroup,\n};\n\nuse crate::prime_f"
},
{
"path": "crypto/ff-group-tests/src/lib.rs",
"chars": 268,
"preview": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![doc = include_str!(\"../README.md\")]\n\n/// Tests for the Field trait.\npub mod fi"
},
{
"path": "crypto/ff-group-tests/src/prime_field.rs",
"chars": 12001,
"preview": "use rand_core::RngCore;\nuse group::ff::{PrimeField, PrimeFieldBits};\n\nuse crate::field::test_field;\n\n// Ideally, this an"
},
{
"path": "crypto/frost/Cargo.toml",
"chars": 2856,
"preview": "[package]\nname = \"modular-frost\"\nversion = \"0.10.1\"\ndescription = \"Modular implementation of FROST over ff/group\"\nlicens"
},
{
"path": "crypto/frost/LICENSE",
"chars": 1073,
"preview": "MIT License\n\nCopyright (c) 2021-2023 Luke Parker\n\nPermission is hereby granted, free of charge, to any person obtaining "
},
{
"path": "crypto/frost/README.md",
"chars": 1349,
"preview": "# Modular FROST\n\nA modular implementation of FROST for any curve with a ff/group API.\nAdditionally, custom algorithms ma"
},
{
"path": "crypto/frost/src/algorithm.rs",
"chars": 7623,
"preview": "use core::{marker::PhantomData, fmt::Debug};\nuse std::io::{self, Read, Write};\n\nuse zeroize::Zeroizing;\nuse rand_core::{"
},
{
"path": "crypto/frost/src/curve/dalek.rs",
"chars": 1223,
"preview": "use digest::Digest;\n\nuse dalek_ff_group::Scalar;\n\nuse ciphersuite::Ciphersuite;\n\nuse crate::{curve::Curve, algorithm::Hr"
}
]
// ... and 394 more files (download for full content)
About this extraction
This page contains the full source code of the serai-dex/serai GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 594 files (2.4 MB), approximately 651.7k tokens, and a symbol index with 3271 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.